code stringlengths 101 5.91M |
|---|
def velocity_after_collision(n: np.ndarray, velocity: np.ndarray, m: float, j: float) -> np.ndarray:
return (velocity + ((j * n) / m)) |
def paths_to_tensors(paths, max_path_length, baseline_predictions, discount):
baselines = []
returns = []
for (idx, path) in enumerate(paths):
path['baselines'] = baseline_predictions[idx]
baselines.append(path['baselines'])
path['returns'] = tensor_utils.discount_cumsum(path['rewards'], discount)
returns.append(path['returns'])
obs = [path['observations'] for path in paths]
obs = tensor_utils.pad_tensor_n(obs, max_path_length)
actions = [path['actions'] for path in paths]
actions = tensor_utils.pad_tensor_n(actions, max_path_length)
rewards = [path['rewards'] for path in paths]
rewards = tensor_utils.pad_tensor_n(rewards, max_path_length)
agent_infos = [path['agent_infos'] for path in paths]
agent_infos = tensor_utils.stack_tensor_dict_list([tensor_utils.pad_tensor_dict(p, max_path_length) for p in agent_infos])
env_infos = [path['env_infos'] for path in paths]
env_infos = tensor_utils.stack_tensor_dict_list([tensor_utils.pad_tensor_dict(p, max_path_length) for p in env_infos])
valids = [np.ones_like(path['returns']) for path in paths]
valids = tensor_utils.pad_tensor_n(valids, max_path_length)
samples_data = dict(observations=obs, actions=actions, rewards=rewards, agent_infos=agent_infos, env_infos=env_infos, valids=valids)
return samples_data |
def benchmark_shortest_path_image(configuration_space, source):
def shortest_path_image():
graph = shortest_paths.GridGraph(configuration_space)
graph.shortest_path_image(source)
benchmark(shortest_path_image) |
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
for i in xrange(startpos, len(line)):
if (line[i] == startchar):
depth += 1
elif (line[i] == endchar):
depth -= 1
if (depth == 0):
return ((i + 1), 0)
return ((- 1), depth) |
class FlaxBigBirdForMultipleChoice(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class AssignScoreWithK(Function):
def forward(ctx, scores, points, centers, knn_idx, aggregate):
agg = {'sum': 0, 'avg': 1, 'max': 2}
(B, N, M, O) = points.size()
K = scores.size(2)
output = torch.zeros([B, O, N], dtype=points.dtype, device=points.device)
output = output.contiguous()
src.gpu.assign_score_withk_forward_cuda(B, N, M, K, O, agg[aggregate], points.contiguous(), centers.contiguous(), scores.contiguous(), knn_idx.contiguous(), output)
ctx.save_for_backward(output, points, centers, scores, knn_idx)
ctx.agg = agg[aggregate]
return output
def backward(ctx, grad_out):
(output, points, centers, scores, knn_idx) = ctx.saved_tensors
agg = ctx.agg
(B, N, M, O) = points.size()
K = scores.size(2)
grad_points = torch.zeros_like(points, dtype=points.dtype, device=points.device).contiguous()
grad_centers = torch.zeros_like(centers, dtype=points.dtype, device=points.device).contiguous()
grad_scores = torch.zeros_like(scores, dtype=scores.dtype, device=scores.device).contiguous()
src.gpu.assign_score_withk_backward_cuda(B, N, M, K, O, agg, grad_out.contiguous(), points.contiguous(), centers.contiguous(), scores.contiguous(), knn_idx.contiguous(), grad_points, grad_centers, grad_scores)
return (grad_scores, grad_points, grad_centers, None, None, None) |
def list_non_bool_dtypes():
return [o3c.float32, o3c.float64, o3c.int8, o3c.int16, o3c.int32, o3c.int64, o3c.uint8, o3c.uint16, o3c.uint32, o3c.uint64] |
def get_hbond_donor_indice(m):
smarts = ['[!#6;!H0]']
indice = []
for s in smarts:
s = Chem.MolFromSmarts(s)
indice += [i[0] for i in m.GetSubstructMatches(s)]
indice = np.array(indice)
return indice |
class FeatureFusionModule(nn.Module):
def __init__(self, in_chan, out_chan, *args, **kwargs):
super(FeatureFusionModule, self).__init__()
self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0)
self.conv1 = nn.Conv2d(out_chan, (out_chan // 4), kernel_size=1, stride=1, padding=0, bias=False)
self.conv2 = nn.Conv2d((out_chan // 4), out_chan, kernel_size=1, stride=1, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
self.init_weight()
def forward(self, fsp, fcp):
fcat = torch.cat([fsp, fcp], dim=1)
feat = self.convblk(fcat)
atten = F.avg_pool2d(feat, feat.size()[2:])
atten = self.conv1(atten)
atten = self.relu(atten)
atten = self.conv2(atten)
atten = self.sigmoid(atten)
feat_atten = torch.mul(feat, atten)
feat_out = (feat_atten + feat)
return feat_out
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if (not (ly.bias is None)):
nn.init.constant_(ly.bias, 0)
def get_params(self):
(wd_params, nowd_params) = ([], [])
for (name, module) in self.named_modules():
if (isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d)):
wd_params.append(module.weight)
if (not (module.bias is None)):
nowd_params.append(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nowd_params += list(module.parameters())
return (wd_params, nowd_params) |
class MetaFeature(AbstractMetaFeature):
def __init__(self):
super(MetaFeature, self).__init__()
self.type_ = 'METAFEATURE' |
def makeplot_bar(experiments, planner, ttl):
if (plt_cfg['qt'] == 1):
N = len(experiments)
x = np.linspace(1, N, N, endpoint=True)
plt.figure(ttl)
plt.xlabel('Test run')
if ('dur' in ttl):
plt.ylabel('Execution Time in [s]')
else:
plt.ylabel('Path Length in [m]')
plt.bar(x, experiments, color=planner_stl[planner])
plt.axhline(y=np.mean(experiments), color=planner_stl[planner], linewidth=2)
else:
data_dur = {}
data_len = {}
for key in experiments:
if ('dur' in key):
data_dur[key] = experiments[key]
elif ('len' in key):
data_len[key] = experiments[key]
(fig, ax) = plt.subplots()
ax.set_title(ttl)
bar_plot(ax, data_dur, total_width=0.8, single_width=0.9)
(fig, ax) = plt.subplots()
ax.set_title(ttl)
bar_plot(ax, data_len, total_width=0.8, single_width=0.9)
plt.savefig((('quanti/' + ttl) + '.png'), bbox_inches='tight') |
class AutoregressiveRationalQuadraticSplineBijection(RationalQuadraticSplineBijection):
def __init__(self, num_input_channels, num_hidden_layers, num_hidden_channels, num_bins, tail_bound, activation, dropout_probability):
super().__init__(num_input_channels=num_input_channels, flow=MaskedPiecewiseRationalQuadraticAutoregressiveTransform(features=num_input_channels, hidden_features=num_hidden_channels, context_features=None, num_bins=num_bins, tails='linear', tail_bound=tail_bound, num_blocks=num_hidden_layers, use_residual_blocks=True, random_mask=False, activation=activation(), dropout_probability=dropout_probability, use_batch_norm=False)) |
def rearrange_tf_to_pt(value, depthwise=False):
if (value.ndim == 4):
if depthwise:
return einops.rearrange(value, 'h w c_in c_out -> c_in c_out h w')
else:
return einops.rearrange(value, 'h w c_in c_out -> c_out c_in h w')
elif (value.ndim == 2):
return einops.rearrange(value, 'c_in c_out -> c_out c_in')
elif (value.ndim == 1):
return value |
class HPText():
dataset = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'datasets/data/LJSpeech-1.1')
(num_train, num_valid) = (13000, 13099)
punctuation = list('\'",.:?!')
graphemes = ((['<pad>', '<unk>'] + list('abcdefghijklmnopqrstuvwxyz ')) + punctuation)
use_phonemes = True |
def test_construct_arguments_without_duplicates_passes():
s = Signature(bariza)
s.construct_arguments([1, 2], {'c': 5}, {})
s = Signature(complex_function_name)
s.construct_arguments([1], {'b': 4}, {})
s = Signature(FunCTIonWithCAPItals)
s.construct_arguments([], {'a': 6, 'b': 6, 'c': 6}, {}) |
def all_metrics(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
pre = precision_score(y_true=labels, y_pred=preds)
rec = recall_score(y_true=labels, y_pred=preds)
return {'acc': acc, 'precision': pre, 'recall': rec, 'f1': f1} |
def apiurl(args: argparse.Namespace, subpath: str, query_args: typing.Dict=None) -> str:
if (query_args is None):
query_args = {}
if (args.api_key is not None):
query_args['api_key'] = args.api_key
if query_args:
return (((args.url + subpath) + '?') + '&'.join(('{x}={y}'.format(x=x, y=quote_plus((y if isinstance(y, str) else json.dumps(y)))) for (x, y) in query_args.items())))
else:
return (args.url + subpath) |
class DRN_A(nn.Module):
def __init__(self, block, layers, BatchNorm=None):
self.inplanes = 64
super(DRN_A, self).__init__()
self.out_dim = (512 * block.expansion)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BatchNorm(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], BatchNorm=BatchNorm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, BatchNorm=BatchNorm)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, BatchNorm=BatchNorm)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, BatchNorm=BatchNorm))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=(dilation, dilation), BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x |
def convert(item):
(qid, example) = item
q = {'image_id': example['imageId'], 'question_id': qid, 'question': example['question']}
if ('answer' in example):
a = {'image_id': example['imageId'], 'question_id': qid, 'answers': [{'answer': example['answer']}]}
else:
a = None
return (q, a) |
class Xception_dilation(nn.Module):
def __init__(self, input_channel=None, num_classes=None):
super(Xception_dilation, self).__init__()
self.num_classes = num_classes
self.conv1 = nn.Conv2d(input_channel, 32, 3, 2, 0, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.block1 = Block(64, 128, 2, 2, start_with_relu=False, grow_first=True)
self.block2 = Block(128, 256, 2, 2, start_with_relu=True, grow_first=True)
self.block3 = Block(256, 728, 2, 2, start_with_relu=True, grow_first=True)
self.block4 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.residual = nn.Sequential(nn.Conv2d(728, 1024, 1, 1, dilation=2, bias=False), nn.BatchNorm2d(1024))
self.SepConv1 = nn.Sequential(nn.ReLU(inplace=False), SeparableConv2d(728, 728, 3, stride=1, padding=1, bias=False), nn.BatchNorm2d(728))
self.SepConv2 = nn.Sequential(nn.ReLU(inplace=False), SeparableConv2d(728, 1024, 3, stride=1, padding=1, bias=False), nn.BatchNorm2d(1024))
self.SepConv3 = nn.Sequential(SeparableConv2d(1024, 1536, 3, dilation=2, stride=1, padding=2, bias=False), nn.BatchNorm2d(1536), nn.ReLU(inplace=False))
self.SepConv4 = nn.Sequential(SeparableConv2d(1536, 2048, 3, dilation=2, stride=1, padding=2, bias=False), nn.BatchNorm2d(2048), nn.ReLU(inplace=False))
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.cls = nn.Linear(2048, num_classes)
def get_layers(self):
return self.layers
def forward(self, x):
self.layers = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
res = self.residual(x)
x = self.SepConv1(x)
x = self.SepConv2(x)
x += res
x = self.SepConv3(x)
x = self.SepConv4(x)
self.layers.append(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.cls(x)
self.layers.append(x)
return x |
class Pinwheel(VAE):
def __init__(self, params):
assert (params.latent_dim == 2)
super(Pinwheel, self).__init__(NormalMixture, dist.Normal, dist.Normal, Enc(params.latent_dim, params.num_hidden_layers, params.hidden_dim), Dec(params.latent_dim, params.num_hidden_layers, params.hidden_dim), params)
self.modelName = 'pinwheel'
def init_pz(self, o):
pz_mu = nn.Parameter(torch.tensor([[0.0, (- 1.0)], [0.0, 1.0], [1.0, 0.0], [(- 1.0), 0.0]]), requires_grad=False)
pz_logvar = nn.Parameter(torch.zeros(4, o.latent_dim), requires_grad=o.learn_prior_variance)
return (pz_mu, pz_logvar)
def getDataLoaders(batch_size, shuffle=True, device='cuda'):
kwargs = ({'num_workers': 1, 'pin_memory': True, 'drop_last': True} if (device == 'cuda') else {'drop_last': True})
variant = 'small'
train_loader = DataLoader(PinwheelDataset('../data', train=True, variant=variant), batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = DataLoader(PinwheelDataset('../data', train=False, variant=variant), batch_size=batch_size, shuffle=shuffle, **kwargs)
return (train_loader, test_loader)
def generate(self, runPath, epoch):
(N, K) = (1000, 100)
(mean, means, samples) = super(Pinwheel, self).generate(N, K)
scatter_plot(mean.data.cpu(), '{}/gen_mean_{:03d}.png'.format(runPath, epoch))
scatter_plot(means.data.cpu(), '{}/gen_means_{:03d}.png'.format(runPath, epoch))
scatter_plot(samples.data.cpu(), '{}/gen_samples_{:03d}.png'.format(runPath, epoch))
def reconstruct(self, data, runPath, epoch):
n = min(400, data.size(0))
recon = super(Pinwheel, self).reconstruct(data[:n])
scatter_plot([data[:n].data.cpu(), recon.data.cpu()], '{}/recon_{:03d}.png'.format(runPath, epoch))
def posterior_plot(self, qz_x_mean, qz_x_std, runPath, epoch):
posterior_plot_pinwheel(qz_x_mean, qz_x_std, '{}/posterior_{:03d}.png'.format(runPath, epoch)) |
def np_F1(y_true, y_pred):
score = []
for (yy_true, yy_pred) in zip(y_true, y_pred):
this = f1_score((yy_true > 0.5).astype('int').ravel(), (yy_pred > 0.5).astype('int').ravel())
that = f1_score((yy_true > 0.5).astype('int').ravel(), ((1 - yy_pred) > 0.5).astype('int').ravel())
score.append(max(this, that))
return np.mean(score).astype('float32') |
def indice_maxpool(features, indice_pairs, indice_pair_num, num_activate_out):
if (features.dtype == torch.float32):
return sparse_conv_ext.indice_maxpool_fp32(features, indice_pairs, indice_pair_num, num_activate_out)
elif (features.dtype == torch.half):
return sparse_conv_ext.indice_maxpool_half(features, indice_pairs, indice_pair_num, num_activate_out)
else:
raise NotImplementedError |
def int2bitstr(integer):
four_bytes = struct.pack('>I', integer)
return ''.join((f'{byte:08b}' for byte in four_bytes)) |
class MLPComponentTest(BaseRegressionComponentTest):
__test__ = True
res = dict()
res['default_boston'] = 0.
res['default_boston_places'] = 4
res['boston_n_calls'] = 8
res['boston_iterative_n_iter'] = 161
res['default_boston_iterative'] = res['default_boston']
res['default_boston_iterative_places'] = 1
res['default_boston_sparse'] = (- 0.)
res['default_boston_sparse_places'] = 6
res['default_boston_iterative_sparse'] = res['default_boston_sparse']
res['default_boston_iterative_sparse_places'] = 6
res['default_diabetes'] = 0.
res['diabetes_n_calls'] = 9
res['diabetes_iterative_n_iter'] = 106
res['default_diabetes_iterative'] = res['default_diabetes']
res['default_diabetes_sparse'] = 0.
res['default_diabetes_iterative_sparse'] = res['default_diabetes_sparse']
sk_mod = sklearn.neural_network.MLPRegressor
module = MLPRegressor
step_hyperparameter = {'name': 'n_iter_', 'value': module.get_max_iter()} |
class DukeMTMC(Dataset):
url = '
md5 = '2f93496f9b516d1ee5ef51c1d5e7d601'
def __init__(self, root, split_id=0, num_val=100, download=True):
super(DukeMTMC, self).__init__(root, split_id=split_id)
if download:
self.download()
if (not self._check_integrity()):
raise RuntimeError(('Dataset not found or corrupted. ' + 'You can use download=True to download it.'))
self.load(num_val)
def download(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
import re
import hashlib
import shutil
from glob import glob
from zipfile import ZipFile
raw_dir = osp.join(self.root, 'raw')
mkdir_if_missing(raw_dir)
fpath = osp.join(raw_dir, 'DukeMTMC-reID.zip')
if (osp.isfile(fpath) and (hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5)):
print(('Using downloaded file: ' + fpath))
else:
raise RuntimeError('Please download the dataset manually from {} to {}'.format(self.url, fpath))
exdir = osp.join(raw_dir, 'DukeMTMC-reID')
if (not osp.isdir(exdir)):
print('Extracting zip file')
with ZipFile(fpath) as z:
z.extractall(path=raw_dir)
images_dir = osp.join(self.root, 'images')
mkdir_if_missing(images_dir)
identities = []
all_pids = {}
def register(subdir, pattern=re.compile('([-\\d]+)_c(\\d)')):
fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg')))
pids = set()
for fpath in fpaths:
fname = osp.basename(fpath)
(pid, cam) = map(int, pattern.search(fname).groups())
assert (1 <= cam <= 8)
cam -= 1
if (pid not in all_pids):
all_pids[pid] = len(all_pids)
pid = all_pids[pid]
pids.add(pid)
if (pid >= len(identities)):
assert (pid == len(identities))
identities.append([[] for _ in range(8)])
fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, cam, len(identities[pid][cam]))
identities[pid][cam].append(fname)
shutil.copy(fpath, osp.join(images_dir, fname))
return pids
trainval_pids = register('bounding_box_train')
gallery_pids = register('bounding_box_test')
query_pids = register('query')
assert (query_pids <= gallery_pids)
assert trainval_pids.isdisjoint(gallery_pids)
meta = {'name': 'DukeMTMC', 'shot': 'multiple', 'num_cameras': 8, 'identities': identities}
write_json(meta, osp.join(self.root, 'meta.json'))
splits = [{'trainval': sorted(list(trainval_pids)), 'query': sorted(list(query_pids)), 'gallery': sorted(list(gallery_pids))}]
write_json(splits, osp.join(self.root, 'splits.json')) |
def generate_cross_cols(self, df: pd.DataFrame, crossed_cols):
df_cc = df.copy()
crossed_colnames = []
for cols in crossed_cols:
for c in cols:
df_cc[c] = df_cc[c].astype('str')
colname = '_'.join(cols)
df_cc[colname] = df_cc[list(cols)].apply((lambda x: '-'.join(x)), axis=1)
crossed_colnames.append(colname)
return df_cc[crossed_colnames] |
def model_opts(parser):
group = parser.add_argument_group('Model-Embeddings')
group.add_argument('-src_word_vec_size', type=int, default=500, help='Word embedding size for src.')
group.add_argument('-tgt_word_vec_size', type=int, default=500, help='Word embedding size for tgt.')
group.add_argument('-word_vec_size', type=int, default=(- 1), help='Word embedding size for src and tgt.')
group.add_argument('-share_decoder_embeddings', action='store_true', help='Use a shared weight matrix for the input and\n\t\t\t\t\t output word embeddings in the decoder.')
group.add_argument('-share_embeddings', action='store_true', help='Share the word embeddings between encoder\n\t\t\t\t\t and decoder. Need to use shared dictionary for this\n\t\t\t\t\t option.')
group.add_argument('-position_encoding', action='store_true', help='Use a sin to mark relative words positions.\n\t\t\t\t\t Necessary for non-RNN style models.\n\t\t\t\t\t ')
group.add_argument('-context_type', type=str, default=None, help='Where to consider context: [HAN_enc|HAN_dec|HAN_dec_source|HAN_dec_context|HAN_join]')
group.add_argument('-context_size', type=int, default=3, help='Number of previous sentences.')
group = parser.add_argument_group('Model-Embedding Features')
group.add_argument('-feat_merge', type=str, default='concat', choices=['concat', 'sum', 'mlp'], help='Merge action for incorporating features embeddings.\n\t\t\t\t\t Options [concat|sum|mlp].')
group.add_argument('-feat_vec_size', type=int, default=(- 1), help='If specified, feature embedding sizes\n\t\t\t\t\t will be set to this. Otherwise, feat_vec_exponent\n\t\t\t\t\t will be used.')
group.add_argument('-feat_vec_exponent', type=float, default=0.7, help='If -feat_merge_size is not set, feature\n\t\t\t\t\t embedding sizes will be set to N^feat_vec_exponent\n\t\t\t\t\t where N is the number of values the feature takes.')
group = parser.add_argument_group('Model- Encoder-Decoder')
group.add_argument('-model_type', default='text', help='Type of source model to use. Allows\n\t\t\t\t\t the system to incorporate non-text inputs.\n\t\t\t\t\t Options are [text|img|audio].')
group.add_argument('-encoder_type', type=str, default='rnn', choices=['rnn', 'brnn', 'mean', 'transformer', 'cnn'], help='Type of encoder layer to use. Non-RNN layers\n\t\t\t\t\t are experimental. Options are\n\t\t\t\t\t [rnn|brnn|mean|transformer|cnn].')
group.add_argument('-decoder_type', type=str, default='rnn', choices=['rnn', 'transformer', 'cnn'], help='Type of decoder layer to use. Non-RNN layers\n\t\t\t\t\t are experimental. Options are\n\t\t\t\t\t [rnn|transformer|cnn].')
group.add_argument('-layers', type=int, default=(- 1), help='Number of layers in enc/dec.')
group.add_argument('-enc_layers', type=int, default=2, help='Number of layers in the encoder')
group.add_argument('-dec_layers', type=int, default=2, help='Number of layers in the decoder')
group.add_argument('-rnn_size', type=int, default=500, help='Size of rnn hidden states')
group.add_argument('-cnn_kernel_width', type=int, default=3, help='Size of windows in the cnn, the kernel_size is\n\t\t\t\t\t (cnn_kernel_width, 1) in conv layer')
group.add_argument('-input_feed', type=int, default=1, help='Feed the context vector at each time step as\n\t\t\t\t\t additional input (via concatenation with the word\n\t\t\t\t\t embeddings) to the decoder.')
group.add_argument('-bridge', action='store_true', help='Have an additional layer between the last encoder\n\t\t\t\t\t state and the first decoder state')
group.add_argument('-rnn_type', type=str, default='LSTM', choices=['LSTM', 'GRU', 'SRU'], action=CheckSRU, help='The gate type to use in the RNNs')
group.add_argument('-brnn', action=DeprecateAction, help='Deprecated, use `encoder_type`.')
group.add_argument('-brnn_merge', default='concat', choices=['concat', 'sum'], help='Merge action for the bidir hidden states')
group.add_argument('-context_gate', type=str, default=None, choices=['source', 'target', 'both'], help='Type of context gate to use.\n\t\t\t\t\t Do not select for no context gate.')
group = parser.add_argument_group('Model- Attention')
group.add_argument('-global_attention', type=str, default='general', choices=['dot', 'general', 'mlp'], help='The attention type to use:\n\t\t\t\t\t dotprod or general (Luong) or MLP (Bahdanau)')
group.add_argument('-copy_attn', action='store_true', help='Train copy attention layer.')
group.add_argument('-copy_attn_force', action='store_true', help='When available, train to copy.')
group.add_argument('-reuse_copy_attn', action='store_true', help='Reuse standard attention for copy')
group.add_argument('-copy_loss_by_seqlength', action='store_true', help='Divide copy loss by length of sequence')
group.add_argument('-coverage_attn', action='store_true', help='Train a coverage attention layer.')
group.add_argument('-lambda_coverage', type=float, default=1, help='Lambda value for coverage.') |
def update_context(job_args: JobArgs):
for (node_type, node_args) in job_args.node_args.items():
if (node_type == NodeType.WORKER):
_dlrover_context.auto_worker_enabled = node_args.auto_scale
elif (node_type == NodeType.PS):
_dlrover_context.auto_ps_enabled = node_args.auto_scale
_dlrover_context.relaunch_always = job_args.relaunch_always
_dlrover_context.set_params_from_brain()
_dlrover_context.print_config() |
class L2BallProj(L2Ball):
def __init__(self, X, epsilon, k):
DualObject.__init__(self)
self.epsilon = epsilon
n = X[0].numel()
self.nu_x = [X]
self.nu = [X.new(1, k, *X.size()[1:]).normal_()]
def apply(self, dual_layer):
self.nu_x.append(dual_layer(*self.nu_x))
self.nu.append(dual_layer(*self.nu))
def bounds(self, network=None):
if (network is None):
nu = self.nu[(- 1)]
nu_x = self.nu_x[(- 1)]
else:
nu = network(self.nu[0])
nu_x = network(self.nu_x[0])
k = nu.size(1)
l2 = (nu.norm(2, 1) / (k ** 0.5))
return ((nu_x - (self.epsilon * l2)), (nu_x + (self.epsilon * l2))) |
class SmoothQuantCalibrationLLM(SmoothQuantCalibration):
def __init__(self, model_path, dataloader, iterations, op_types, percentile, temp_path, weight_name_mapping):
self.func = None
self.graph_def = None
self.frozen_func = None
self._saved_model = None
self.model = model_path
self.dataloader = dataloader
self.iterations = iterations
self.op_types = op_types
self.percentile = percentile
self.temp_path = temp_path
self.weight_name_mapping = weight_name_mapping
self.print_node_list = []
self._sq_input_node_names = []
self._sq_target_node_names = {}
self._sq_output_tensor_dict = {}
self._sq_weight_tensor_dict = {}
def _parse_calibration_logs(self, tmp_dump_file):
valid_data = []
with open(tmp_dump_file) as file:
for i in file.readlines():
if i.startswith(';'):
valid_data.append(i.strip())
for activation in valid_data:
activation = activation.split(' ')
data = []
activation_name = ''
per_channel = []
for (idx, s) in enumerate(activation):
if (idx == 0):
per_channel.append(float(s.rsplit(':')[(- 1)].strip('[')))
activation_name = s.rsplit(':')[0][1:(- 9)]
elif (s.find('][') != (- 1)):
pairs = [float(i) for i in s.split('][')]
per_channel.append(pairs[0])
data.append(per_channel)
per_channel = [pairs[1]]
elif (s.find(']]') != (- 1)):
per_channel.append(float(s.strip(']')))
data.append(per_channel)
else:
per_channel.append(float(s))
if (activation_name not in self._sq_output_tensor_dict):
self._sq_output_tensor_dict[activation_name] = [np.array(data)]
else:
self._sq_output_tensor_dict[activation_name].append(np.array(data))
def _insert_print_for_activation(self, graph_def):
cur_graph = GraphAnalyzer()
cur_graph.graph = graph_def
graph_info = cur_graph.parse_graph()
for cur_list in self.print_node_list:
pre_node_name = cur_list[0]
post_node_name = cur_list[(- 1)]
insert_node_pairs = []
top_node = graph_info[pre_node_name].node
if (top_node.op == 'ConcatV2'):
for i in range(top_node.attr['N'].i):
insert_node_pairs.append([top_node.input[i], post_node_name])
elif (top_node.op in ('BatchMatMul', 'BatchMatMulV2')):
insert_node_pairs.append([top_node.input[0], post_node_name])
if (graph_info[top_node.input[1]].node.op != 'Const'):
insert_node_pairs.append([top_node.input[1], post_node_name])
elif (top_node.op in ('Conv2DBackpropInput', 'Conv3DBackpropInputV2')):
insert_node_pairs.append([top_node.input[2], post_node_name])
else:
refresh_pre_node_name = graph_info[pre_node_name].node.input[0]
refresh_pre_node = graph_info[Helper.node_name_from_input(refresh_pre_node_name)].node
if ((refresh_pre_node.op == 'Pad') and (top_node.op in ('Conv2D', 'Conv3D'))):
insert_node_pairs.append([refresh_pre_node_name, post_node_name])
refresh_pre_node_name = refresh_pre_node.input[0]
insert_node_pairs.append([refresh_pre_node_name, post_node_name])
output_names = []
for node_pair_names in insert_node_pairs:
for (index, each_node_name) in enumerate(node_pair_names):
name_with_sig = each_node_name
node_name_prefix = name_with_sig.replace(':', '__port__').replace('^', '__hat__')
print_node = Helper.create_node('Print', (node_name_prefix + '_print__{}'.format(index)), [(each_node_name + ':0'), (each_node_name + ':0')])
if (index == 0):
msg = ';{}__print__:'.format(each_node_name)
if ('swish_f32' in graph_info[pre_node_name].node.name):
src_dt = attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)
else:
src_dt = graph_info[pre_node_name].node.attr['T']
else:
break
print_node.attr['T'].CopyFrom(src_dt)
print_node.attr['message'].s = msg.encode()
print_node.attr['first_n'].i = (- 1)
print_node.attr['summarize'].i =
attr_u = [dtypes.as_dtype(src_dt.type).as_datatype_enum]
print_node.attr['U'].list.CopyFrom(attr_value_pb2.AttrValue.ListValue(type=attr_u))
post_node_names = graph_info[Helper.node_name_from_input(each_node_name)].outputs
if post_node_names:
for post_node_name in post_node_names:
post_node = graph_info[post_node_name].node
if (each_node_name not in post_node.input):
continue
if ((post_node.op == 'FusedBatchNormV3') and ('_print_identity' not in graph_info[Helper.node_name_from_input(post_node.name)].node.input[0])):
identity_node = Helper.create_node('Identity', (post_node.name + '_print_identity'), [graph_info[Helper.node_name_from_input(post_node.name)].node.input[0]])
identity_node.attr['T'].CopyFrom(src_dt)
cur_graph.add_node(identity_node, graph_info[Helper.node_name_from_input(post_node.name)].node.input[0], [post_node.name])
identity_node.input.append(('^' + print_node.name))
else:
post_node.input.append(('^' + print_node.name))
cur_graph.add_node(print_node, each_node_name, [])
else:
identity_node1 = Helper.create_node('Identity', (print_node.name + '_identity'), [print_node.name])
identity_node1.attr['T'].CopyFrom(src_dt)
cur_graph.add_node(print_node, each_node_name, [identity_node1.name])
cur_graph.add_node(identity_node1, print_node.name, [])
output_names.append(identity_node1.name)
return cur_graph.dump_graph()
def evaluate(self, model):
input_tensor_names = model.input_tensor_names
auto_trackable = model.model
infer = auto_trackable.signatures['serving_default']
for (idx, (inputs, _)) in enumerate(self.dataloader):
feed_dict = {}
if (len(input_tensor_names) == 1):
feed_dict[input_tensor_names[0]] = inputs
else:
assert (len(input_tensor_names) == len(inputs)), 'inputs len must equal with input_tensor'
for (i, input_tensor_name) in enumerate(input_tensor_names):
feed_dict[input_tensor_name] = inputs[i]
_ = infer(**feed_dict)
if (idx >= self.iterations):
break
def _inference(self, sampling_graph_def):
logger.info('Start sampling on calibration dataset for Smooth Quantization.')
reconstruct_saved_model(sampling_graph_def, self.func, self.frozen_func, self._saved_model, self.temp_path)
model = Model(self.temp_path, modelType='llm_saved_model')
self.evaluate(model)
def _inference_for_calibration(self, model):
sampling_graph_def = self._insert_print_for_activation(model)
tmp_dump_file = tempfile.mkstemp(suffix='.log')[1]
with CaptureOutputToFile(tmp_dump_file):
self._inference(sampling_graph_def)
self._parse_calibration_logs(tmp_dump_file)
del sampling_graph_def
def _get_weight_tensors(self):
model = load.load(self.model, [tag_constants.SERVING])
for weight_tensor in model.variables:
parsed_name = self.weight_name_mapping(weight_tensor.name)
if (parsed_name in self._sq_target_node_names):
self._sq_weight_tensor_dict[parsed_name] = weight_tensor.numpy()
assert (len(self._sq_weight_tensor_dict) == len(self._sq_target_node_names)), 'Failed to get weights for some nodes, please check variables'
def _generate_calibration_data(self, input_node_names, output_node_names):
sorted_graph = QuantizeGraphHelper().get_sorted_graph(self.graph_def, input_node_names, output_node_names)
for node in sorted_graph.node:
if (node.op not in self.op_types):
continue
if ('while' in node.input[0]):
continue
self._sq_input_node_names.append(node.input[0])
self.print_node_list.append([node.name])
self._sq_target_node_names[node.input[1]] = node.name
self._get_weight_tensors()
sampling_graph_def = copy.deepcopy(self.graph_def)
self._inference_for_calibration(sampling_graph_def)
def __call__(self, input_node_names, output_node_names):
(self.graph_def, self._saved_model, self.func, self.frozen_func, _, _) = parse_saved_model(self.model)
self._generate_calibration_data(input_node_names, output_node_names)
max_vals_per_channel = {}
for (activation_name, output_tensor) in self._sq_output_tensor_dict.items():
max_val_per_channel = self._get_maxval_per_channel(output_tensor, percentile=self.percentile)
max_vals_per_channel[activation_name] = max_val_per_channel
return (max_vals_per_channel, self._sq_target_node_names, self._sq_weight_tensor_dict, self.graph_def) |
class ImageExtents():
def __init__(self, minX, minY, minZ, maxX, maxY, maxZ):
self.minX: float = minX
self.minY: float = minY
self.minZ: float = minZ
self.maxX: float = maxX
self.maxY: float = maxY
self.maxZ: float = maxZ
def get_c_image_extents(self):
c_image_extents = bpConverterTypesC_ImageExtent(self.minX, self.minY, self.minZ, self.maxX, self.maxY, self.maxZ)
return bpConverterTypesC_ImageExtentPtr(c_image_extents) |
def register_box(name: str, box_type: Type=None, force: bool=False) -> Union[(Type, Callable)]:
if (not isinstance(force, bool)):
raise TypeError(f'force must be a boolean, but got {type(force)}')
if (box_type is not None):
_register_box(name=name, box_type=box_type, force=force)
return box_type
def _register(cls):
_register_box(name=name, box_type=cls, force=force)
return cls
return _register |
def disc(samples1, samples2, kernel, is_parallel=True, *args, **kwargs):
d = 0
if (not is_parallel):
for s1 in samples1:
for s2 in samples2:
d += kernel(s1, s2, *args, **kwargs)
else:
with concurrent.futures.ThreadPoolExecutor() as executor:
for dist in executor.map(kernel_parallel_worker, [(s1, samples2, partial(kernel, *args, **kwargs)) for s1 in samples1]):
d += dist
d /= (len(samples1) * len(samples2))
return d |
class _CLAM_Base(nn.Module):
sizes = {'small': [1024, 512, 256], 'big': [1024, 512, 384], 'multiscale': [2048, 512, 256]}
def __init__(self, size: Union[(str, List[int])]='small', dropout: bool=False, k_sample: int=8, n_classes: int=2, instance_loss_fn: Optional[Callable]=None, subtyping: bool=False, gate: bool=True, multi_head_attention: bool=False) -> None:
super().__init__()
if (instance_loss_fn is None):
instance_loss_fn = nn.CrossEntropyLoss()
self.size = (self.sizes[size] if isinstance(size, str) else size)
fc = [nn.Linear(self.size[0], self.size[1]), nn.ReLU()]
if dropout:
fc.append(nn.Dropout(0.25))
att_fn = (Attn_Net_Gated if gate else Attn_Net)
n_att = (1 if (not multi_head_attention) else n_classes)
fc.append(att_fn(L=self.size[1], D=self.size[2], dropout=dropout, n_classes=n_att))
self.attention_net = nn.Sequential(*fc)
self.instance_classifiers = nn.ModuleList([nn.Linear(self.size[1], 2) for _ in range(n_classes)])
self.k_sample = k_sample
self.instance_loss_fn = instance_loss_fn
self.n_classes = n_classes
self.subtyping = subtyping
def relocate(self):
device = get_device()
self.attention_net = self.attention_net.to(device)
self.classifiers = self.classifiers.to(device)
self.instance_classifiers = self.instance_classifiers.to(device)
def create_positive_targets(length, device):
return torch.full((length,), 1, device=device).long()
def create_negative_targets(length, device):
return torch.full((length,), 0, device=device).long()
def _inst_eval(self, A, h, classifier, index=None):
device = h.device
if (len(A.shape) == 1):
A = A.view(1, (- 1))
try:
top_p_ids = torch.topk(A, self.k_sample)[1][(- 1)]
except RuntimeError as e:
raise RuntimeError(f'Error selecting top_k from sample with shape {A.shape}. Verify that all slides have at least {self.k_sample} tiles (min_tiles={self.k_sample}). Error: {e}')
top_p = torch.index_select(h, dim=0, index=top_p_ids)
top_n_ids = torch.topk((- A), self.k_sample, dim=1)[1][(- 1)]
top_n = torch.index_select(h, dim=0, index=top_n_ids)
p_targets = self.create_positive_targets(self.k_sample, device)
n_targets = self.create_negative_targets(self.k_sample, device)
all_targets = torch.cat([p_targets, n_targets], dim=0)
all_instances = torch.cat([top_p, top_n], dim=0)
logits = classifier(all_instances)
all_preds = torch.topk(logits, 1, dim=1)[1].squeeze(1)
instance_loss = self.instance_loss_fn(logits, all_targets)
return (instance_loss, all_preds, all_targets)
def _inst_eval_out(self, A, h, classifier, index=None):
device = h.device
if (len(A.shape) == 1):
A = A.view(1, (- 1))
try:
top_p_ids = torch.topk(A, self.k_sample)[1][(- 1)]
except RuntimeError as e:
raise RuntimeError(f'Error selecting top_k from sample with shape {A.shape}. Verify that all slides have at least {self.k_sample} tiles (min_tiles={self.k_sample}). Error: {e}')
top_p = torch.index_select(h, dim=0, index=top_p_ids)
p_targets = self.create_negative_targets(self.k_sample, device)
logits = classifier(top_p)
p_preds = torch.topk(logits, 1, dim=1)[1].squeeze(1)
instance_loss = self.instance_loss_fn(logits, p_targets)
return (instance_loss, p_preds, p_targets)
def _logits_from_m(self, M):
return self.classifiers(M)
def _process_inputs(self, h, label=None, instance_eval=False):
if (isinstance(h, (list, tuple)) and (len(h) == 2)):
(h, label) = h
elif (isinstance(h, (list, tuple)) and (len(h) == 3)):
(h, label, instance_eval) = h
if (h.ndim == 3):
h = h.squeeze()
if (h.shape[1] != self.size[0]):
raise RuntimeError(f'Input feature size ({h.shape[1]}) does not match size of model first linear layer ({self.size[0]}). ')
return (h, label, instance_eval)
def instance_loss(self, A, h, label):
total_inst_loss = 0.0
all_preds = []
all_targets = []
if ((label.ndim < 2) or (label.shape[1] != self.n_classes)):
inst_labels = F.one_hot(label, num_classes=self.n_classes).squeeze()
else:
inst_labels = label[0]
for i in range(len(self.instance_classifiers)):
inst_label = inst_labels[i].item()
classifier = self.instance_classifiers[i]
if (inst_label == 1):
(instance_loss, preds, targets) = self._inst_eval(A, h, classifier, i)
all_preds.extend(preds.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
elif self.subtyping:
(instance_loss, preds, targets) = self._inst_eval_out(A, h, classifier, i)
all_preds.extend(preds.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
else:
continue
total_inst_loss += instance_loss
if self.subtyping:
total_inst_loss /= len(self.instance_classifiers)
return (total_inst_loss, np.array(all_preds), np.array(all_targets))
def forward(self, h, label=None, instance_eval=False, return_attention=False):
(h, label, instance_eval) = self._process_inputs(h, label, instance_eval)
(A, h) = self.attention_net(h)
A = A_raw = torch.transpose(A, 1, 0)
A = F.softmax(A, dim=1)
if instance_eval:
(inst_loss, inst_targets, inst_preds) = self.instance_loss(A, h, label)
inst_loss_dict = {'instance_loss': inst_loss, 'inst_labels': inst_targets, 'inst_preds': inst_preds}
else:
inst_loss_dict = {}
M = torch.mm(A, h)
logits = self._logits_from_m(M)
if return_attention:
return (logits, A_raw, inst_loss_dict)
else:
return (logits, inst_loss_dict)
def calculate_attention(self, h):
(h, *_) = self._process_inputs(h, None, None)
(A, h) = self.attention_net(h)
return torch.transpose(A, 1, 0)
def get_last_layer_activations(self, h):
(h, *_) = self._process_inputs(h, None, None)
(A, h) = self.attention_net(h)
A = F.softmax(A, dim=1)
A = torch.transpose(A, 1, 0)
M = torch.mm(A, h)
return (M, A) |
class _MSDataLoaderIter(_DataLoaderIter):
def __init__(self, loader):
self.dataset = loader.dataset
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = (loader.pin_memory and torch.cuda.is_available())
self.timeout = loader.timeout
self.done_event = threading.Event()
self.sample_iter = iter(self.batch_sampler)
if (self.num_workers > 0):
self.worker_init_fn = loader.worker_init_fn
self.index_queues = [multiprocessing.Queue() for _ in range(self.num_workers)]
self.worker_queue_idx = 0
self.worker_result_queue = multiprocessing.SimpleQueue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
base_seed = torch.LongTensor(1).random_()[0]
self.workers = [multiprocessing.Process(target=_ms_loop, args=(self.dataset, self.index_queues[i], self.worker_result_queue, self.collate_fn, (base_seed + i), self.worker_init_fn, i)) for i in range(self.num_workers)]
if (self.pin_memory or (self.timeout > 0)):
self.data_queue = queue.Queue()
if self.pin_memory:
maybe_device_id = torch.cuda.current_device()
else:
maybe_device_id = None
self.worker_manager_thread = threading.Thread(target=_worker_manager_loop, args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory, maybe_device_id))
self.worker_manager_thread.daemon = True
self.worker_manager_thread.start()
else:
self.data_queue = self.worker_result_queue
for w in self.workers:
w.daemon = True
w.start()
_update_worker_pids(id(self), tuple((w.pid for w in self.workers)))
_set_SIGCHLD_handler()
self.worker_pids_set = True
for _ in range((2 * self.num_workers)):
self._put_indices() |
def train(train_loader, model, criterion, optimizer, scheduler, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for (i, (input, target)) in enumerate(train_loader):
output = model(input)
loss = criterion(output, target)
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
print('Epoch: [{0}][{1}/{2}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\ {top1.val:.3f} ({top1.avg:.3f})\tLR {scheduler._last_lr[0]:.6f}'.format(epoch, i, len(train_loader), batch_time=batch_time, loss=losses, top1=top1, scheduler=scheduler))
if args.tensorboard:
log_value('train_loss', losses.avg, epoch)
log_value('train_acc', top1.avg, epoch)
log_value('learning_rate', scheduler._last_lr[0], epoch) |
class RayScenario(Scenario, ABC):
def __init__(self, name: str, ray_cluster_cpus: Union[(int, float)], ray_cluster_gpus: Union[(int, float)], ray_object_store_memory_cap_gigabytes: Union[(int, float)], ray_should_log_result_filter: Callable[([ResultDict], bool)]):
super().__init__(name=name)
self.ray_cluster_cpus = ray_cluster_cpus
self.ray_cluster_gpus = ray_cluster_gpus
self.ray_object_store_memory_cap_gigabytes = ray_object_store_memory_cap_gigabytes
self.ray_should_log_result_filter = ray_should_log_result_filter |
class BasicBlock(nn.Module):
def __init__(self, in_chan, out_chan, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_chan, out_chan, stride)
self.bn1 = BatchNorm2d(out_chan)
self.conv2 = conv3x3(out_chan, out_chan)
self.bn2 = BatchNorm2d(out_chan)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
if ((in_chan != out_chan) or (stride != 1)):
self.downsample = nn.Sequential(nn.Conv2d(in_chan, out_chan, kernel_size=1, stride=stride, bias=False), BatchNorm2d(out_chan))
def forward(self, x):
residual = self.conv1(x)
residual = self.bn1(residual)
residual = self.relu(residual)
residual = self.conv2(residual)
residual = self.bn2(residual)
shortcut = x
if (self.downsample is not None):
shortcut = self.downsample(x)
out = (shortcut + residual)
out = self.relu(out)
return out |
(version='2.0')
def check_model(model):
has_integerop = False
has_qlinearop = False
for node in model.graph.node:
if node.op_type.endswith('Integer'):
has_integerop = True
elif node.op_type.startswith('QLinear'):
has_qlinearop = True
elif (node.op_type in ['QAttention', 'QGemm', 'QEmbedLayerNormalization']):
has_qlinearop = True
elif (node.op_type in ['Gather']):
input_data = find_by_name(node.input[0], model.graph.initializer)
if ((input_data is not None) and (numpy_helper.to_array(input_data).dtype in ['int8', 'uint8'])):
has_qlinearop = True
if has_integerop:
logger.info('This model has Integer ops, these ops will be skipped.')
if has_qlinearop:
return True
else:
logger.info('This model has no QLinear ops, save the original model.')
return False |
_operation
def real(a: torch.Tensor):
if is_real(a):
raise ValueError('Last dimension must have length 2.')
return a[(..., 0)] |
def find_missing(xs, ys, rs):
rotations = [0, 3.14, 1.57, (- 1.57), 0.78, (- 0.78), 2.35, (- 2.35)]
expectedPoints = []
missingPointsX = []
missingPointsY = []
for (xd, yd) in zip(*required_points()):
expectedPoints.append((xd, yd))
isMissing = [True for _ in rotations]
for (x, y, r) in zip(xs, ys, rs):
dist = math.sqrt((((x - xd) ** 2) + ((y - yd) ** 2)))
if (dist <= 0.1):
for (rdi, rd) in enumerate(rotations):
angleDiff = abs(angle_difference(r, rd))
if (angleDiff <= 0.26):
isMissing[rdi] = False
if any(isMissing):
missingPointsX.append(xd)
missingPointsY.append(yd)
return (missingPointsX, missingPointsY) |
def initialize_hyperparameters(PATHS: dict, load_target: str, config_name: str='default', n_envs: int=1):
if (load_target is None):
hyperparams = load_hyperparameters_json(PATHS=PATHS, from_scratch=True, config_name=config_name)
hyperparams['agent_name'] = PATHS['model'].split('/')[(- 1)]
else:
hyperparams = load_hyperparameters_json(PATHS=PATHS)
check_batch_size(n_envs, hyperparams['batch_size'], hyperparams['m_batch_size'])
hyperparams['n_steps'] = int((hyperparams['batch_size'] / n_envs))
write_hyperparameters_json(hyperparams, PATHS)
print_hyperparameters(hyperparams)
return hyperparams |
_criterion('legacy_masked_lm_loss')
class LegacyMaskedLmLoss(FairseqCriterion):
def __init__(self, task, masked_lm_only, nsp_loss_weight):
super().__init__(task)
self.masked_lm_only = masked_lm_only
self.nsp_loss_weight = nsp_loss_weight
def add_args(parser):
parser.add_argument('--masked-lm-only', default=False, action='store_true', help='compute MLM loss only')
parser.add_argument('--nsp-loss-weight', default=1.0, type=float, help='weight for next sentence prediction loss (default 1)')
def forward(self, model, sample, reduce=True):
(lm_logits, output_metadata) = model(**sample['net_input'])
lm_logits = lm_logits.view((- 1), lm_logits.size((- 1)))
lm_targets = sample['lm_target'].view((- 1))
lm_loss = compute_cross_entropy_loss(lm_logits, lm_targets, self.padding_idx)
ntokens = utils.strip_pad(lm_targets, self.padding_idx).numel()
loss = (lm_loss / ntokens)
nsentences = sample['nsentences']
sentence_loss = None
if (not self.masked_lm_only):
sentence_logits = output_metadata['sentence_logits']
sentence_targets = sample['sentence_target'].view((- 1))
nsentences = sentence_targets.size(0)
if (sentence_logits is not None):
sentence_loss = compute_cross_entropy_loss(sentence_logits, sentence_targets)
loss += (self.nsp_loss_weight * (sentence_loss / nsentences))
sample_size = 1
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'lm_loss': (utils.item(lm_loss.data) if reduce else lm_loss.data), 'sentence_loss': ((utils.item(sentence_loss.data) if reduce else sentence_loss.data) if (sentence_loss is not None) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def aggregate_logging_outputs(logging_outputs):
lm_loss_sum = sum((log.get('lm_loss', 0) for log in logging_outputs))
sentence_loss_sum = sum((log.get('sentence_loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_loss = sum((log.get('loss', 0) for log in logging_outputs))
agg_output = {'loss': (((agg_loss / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'lm_loss': (((lm_loss_sum / ntokens) / math.log(2)) if (ntokens > 0) else 0.0), 'sentence_loss': (((sentence_loss_sum / nsentences) / math.log(2)) if (nsentences > 0) else 0.0), 'nll_loss': (((lm_loss_sum / ntokens) / math.log(2)) if (ntokens > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
return agg_output
def logging_outputs_can_be_summed() -> bool:
return True |
class DeviceType(object):
CPU = 'CPU'
GPU = 'GPU'
HEXAGON = 'HEXAGON'
HTA = 'HTA'
APU = 'APU'
HTP = 'HTP'
QUANTIZE = 'QUANTIZE' |
class DeResNetBlockGroupNorm(nn.Module):
def __init__(self, inplanes, planes, num_groups, stride=1, output_padding=0, activation='relu'):
super(DeResNetBlockGroupNorm, self).__init__()
assert (activation in ['relu', 'elu', 'leaky_relu'])
self.deconv1 = deconv3x3(inplanes, planes, stride, output_padding)
self.gn1 = nn.GroupNorm(num_groups, planes)
if (activation == 'relu'):
self.activation = nn.ReLU(inplace=True)
elif (activation == 'elu'):
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.deconv2 = deconv3x3(planes, planes)
self.gn2 = nn.GroupNorm(num_groups, planes)
downsample = None
if ((stride != 1) or (inplanes != planes)):
downsample = nn.Sequential(nn.ConvTranspose2d(inplanes, planes, kernel_size=1, stride=stride, output_padding=output_padding, bias=False), nn.GroupNorm(num_groups, planes))
self.downsample = downsample
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.gn1.weight, 1.0)
nn.init.constant_(self.gn1.bias, 0.0)
nn.init.constant_(self.gn2.weight, 1.0)
nn.init.constant_(self.gn2.bias, 0.0)
if (self.downsample is not None):
assert isinstance(self.downsample[1], nn.GroupNorm)
nn.init.constant_(self.downsample[1].weight, 1.0)
nn.init.constant_(self.downsample[1].bias, 0.0)
def init(self, x, init_scale=1.0):
with torch.no_grad():
return self(x)
def forward(self, x):
residual = x
out = self.deconv1(x)
out = self.gn1(out)
out = self.activation(out)
out = self.deconv2(out)
out = self.gn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.activation(out)
return out |
def distributed_init(cfg: FairseqConfig):
if isinstance(cfg, Namespace):
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
cfg = convert_namespace_to_omegaconf(cfg)
if (not cfg.common.tpu):
if (torch.distributed.is_available() and torch.distributed.is_initialized()):
warnings.warn('Distributed is already initialized, cannot initialize twice!')
else:
logger.info('distributed init (rank {}): {}'.format(cfg.distributed_training.distributed_rank, cfg.distributed_training.distributed_init_method))
dist.init_process_group(backend=cfg.distributed_training.distributed_backend, init_method=cfg.distributed_training.distributed_init_method, world_size=cfg.distributed_training.distributed_world_size, rank=cfg.distributed_training.distributed_rank)
logger.info('initialized host {} as rank {}'.format(socket.gethostname(), cfg.distributed_training.distributed_rank))
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda())
cfg.distributed_training.distributed_rank = torch.distributed.get_rank()
else:
assert (xm.xrt_world_size() == cfg.distributed_training.distributed_world_size)
global _USE_XLA
_USE_XLA = True
cfg.distributed_training.device_id = xm.get_local_ordinal()
cfg.distributed_training.distributed_rank = xm.get_ordinal()
xm.rendezvous('distributed_init')
if is_master(cfg.distributed_training):
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
if (cfg.common.model_parallel_size > 1):
try:
from fairseq.model_parallel.megatron.mpu import initialize_model_parallel, model_parallel_cuda_manual_seed
except ImportError:
raise ImportError('\n\nPlease install the megatron submodule:\n\n git submodule update --init fairseq/model_parallel/megatron')
global _USE_MEGATRON
_USE_MEGATRON = True
initialize_model_parallel(cfg.common.model_parallel_size)
model_parallel_cuda_manual_seed(cfg.common.seed)
model_part_number = get_model_parallel_rank()
cfg.checkpoint.checkpoint_suffix += '-model_part-{0}'.format(model_part_number)
if (hasattr(cfg, 'model') and (getattr(cfg.model, 'base_layers', 0) > 0)):
cfg.checkpoint.checkpoint_suffix = f'-rank-{cfg.distributed_training.distributed_rank}'
return cfg.distributed_training.distributed_rank |
def evaluate(gold, guess, ks, rank_keys):
pp = pprint.PrettyPrinter(indent=4)
gold_dataset = kilt_utils.load_data(gold)
guess_dataset = kilt_utils.load_data(guess)
(gold_dataset, guess_dataset) = eval_downstream.validate_input(gold_dataset, guess_dataset)
guess_dataset = filter_answers(guess_dataset)
result = compute(gold_dataset, guess_dataset, ks, rank_keys)
pp.pprint(result)
return result |
def compute(inp, outp, settings, force):
sr = settings['samplerate']
_lazy_y = None
def load():
nonlocal _lazy_y
if (_lazy_y is None):
(_lazy_y, _sr) = librosa.load(inp, sr=sr)
assert (_sr == sr), _sr
return _lazy_y
exists = os.path.exists(outp)
size = 0
if exists:
size = os.stat(outp).st_size
valid = (exists and (size > 0))
if ((not valid) or force):
start_time = time.time()
y = load()
loaded_time = time.time()
f = features.compute_mels(y, settings)
computed_time = time.time()
numpy.savez(outp, f)
saved_time = time.time()
if settings['augmentations']:
paths = [outp.replace('.npz', '.aug{}.npz'.format(aug)) for aug in range(12)]
exists = [os.path.exists(p) for p in paths]
if ((not all(exists)) or force):
y = load()
augmented = augmentations(y, sr).values()
assert (settings['augmentations'] == 12)
assert (len(augmented) == settings['augmentations']), len(augmented)
for (aug, (augdata, path)) in enumerate(zip(augmented, paths)):
f = features.compute_mels(augdata, settings)
numpy.savez(path, f)
return outp |
class VegaHTML(object):
def __init__(self, renderer):
self.specification = dict(width=renderer.figwidth, height=renderer.figheight, data=renderer.data, scales=renderer.scales, axes=renderer.axes, marks=renderer.marks)
def html(self):
id = random.randint(0, (2 ** 16))
html = ('<div id="vis%d"></div>' % id)
html += '<script>\n'
html += (VEGA_TEMPLATE % (json.dumps(self.specification), id))
html += '</script>\n'
return html
def _repr_html_(self):
return self.html() |
def build_fake_yaml():
fake_yaml = '\n model:\n name: fake_yaml\n framework: tensorflow\n inputs: x\n outputs: op_to_store\n device: cpu\n quantization:\n calibration:\n sampling_size: 10\n evaluation:\n accuracy:\n metric:\n topk: 1\n tuning:\n strategy:\n name: mse\n exit_policy:\n max_trials: 1\n accuracy_criterion:\n relative: 0.01\n workspace:\n path: saved\n '
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', 'w', encoding='utf-8') as f:
yaml.dump(y, f)
f.close() |
def preprocess_iwslt17(root: str, src: str, tgt: str, bpe_size: Optional[int], need_chars: bool, bbpe_size: Optional[int], need_bytes: bool):
in_root = op.join(root, f'{src}-{tgt}')
for lang in [src, tgt]:
_convert_train(op.join(in_root, f'train.tags.{src}-{tgt}.{lang}'), op.join(root, f'train.{lang}'))
_convert_xml(op.join(in_root, f'IWSLT17.TED.dev2010.{src}-{tgt}.{lang}.xml'), op.join(root, f'valid.{lang}'))
_convert_xml(op.join(in_root, f'IWSLT17.TED.tst2015.{src}-{tgt}.{lang}.xml'), op.join(root, f'test.{lang}'))
for lang in [src, tgt]:
for split in SPLITS:
pretokenize(op.join(root, f'{split}.{lang}'), op.join(root, f'{split}.moses.{lang}'), src, tgt)
if (bpe_size is not None):
concated_train_path = op.join(root, 'train.all')
_concat_files([op.join(root, 'train.moses.fr'), op.join(root, 'train.moses.en')], concated_train_path)
bpe_model_prefix = op.join(root, f'spm_bpe{bpe_size}')
_get_bpe(concated_train_path, bpe_model_prefix, bpe_size)
os.remove(concated_train_path)
for lang in [src, tgt]:
for split in SPLITS:
_apply_bpe((bpe_model_prefix + '.model'), op.join(root, f'{split}.moses.{lang}'), op.join(root, f'{split}.moses.bpe{bpe_size}.{lang}'))
if need_bytes:
for lang in [src, tgt]:
for split in SPLITS:
_get_bytes(op.join(root, f'{split}.moses.{lang}'), op.join(root, f'{split}.moses.bytes.{lang}'))
if need_chars:
for lang in [src, tgt]:
for split in SPLITS:
_get_chars(op.join(root, f'{split}.moses.{lang}'), op.join(root, f'{split}.moses.chars.{lang}'))
if (bbpe_size is not None):
bchar_path = op.join(root, 'train.bchar')
_convert_to_bchar(op.join(root, 'train.moses'), src, tgt, bchar_path)
bbpe_model_prefix = op.join(root, f'spm_bbpe{bbpe_size}')
_get_bpe(bchar_path, bbpe_model_prefix, bbpe_size)
os.remove(bchar_path)
for lang in [src, tgt]:
for split in SPLITS:
_apply_bbpe((bbpe_model_prefix + '.model'), op.join(root, f'{split}.moses.{lang}'), op.join(root, f'{split}.moses.bbpe{bbpe_size}.{lang}')) |
class AudioFinetuningConfig(AudioPretrainingConfig):
eval_wer: bool = field(default=False, metadata={'help': 'compute WER for Seq2Seq models'})
eval_wer_config: GenerationConfig = field(default_factory=(lambda : GenerationConfig()), metadata={'help': 'beam search config for evaluating wer during training'})
eval_wer_tokenizer: Any = field(default=None, metadata={'help': 'tokenizer config for evaluating wer during training'})
eval_wer_post_process: str = field(default='letter', metadata={'help': 'remove BPE tokens before scoring (can be sentencepiece, letter, and more)'})
eval_bleu: bool = field(default=False, metadata={'help': 'evaluation with BLEU scores'})
eval_bleu_detok: Optional[str] = field(default=None, metadata={'help': "detokenize before computing BLEU (e.g., 'moses'); required if using --eval-bleu; use 'space' to disable detokenization; see fairseq.data.encoders for other options"})
eval_bleu_detok_args: str = field(default='{}', metadata={'help': 'args for building the tokenizer, if needed'})
eval_tokenized_bleu: bool = field(default=False, metadata={'help': 'compute tokenized BLEU instead of sacrebleu'})
eval_bleu_remove_bpe: Optional[str] = field(default=None, metadata={'help': 'remove BPE before computing BLEU'})
eval_bleu_args: str = field(default='{}', metadata={'help': 'generation args for BLUE scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\''})
eval_bleu_print_samples: bool = field(default=False, metadata={'help': 'print sample generations during validation'})
autoregressive: bool = field(default=False, metadata={'help': "required for autoregressive decoders (like seq2seq models); adds 'prev_output_tokens' to input and appends eos to target"})
rebuild_batches: bool = True |
def get_document_ids(source_docs, indexes):
indexes = sorted([(key, value[0], value[1]) for (key, value) in indexes.items()], key=(lambda x: x[0]))
doc_ids = []
for (i, partial_start, partial_end) in indexes:
try:
doc_ids.append((source_docs[i], partial_start, partial_end))
except IndexError:
pass
return doc_ids |
def RI(sentence, alpha_ri, n_aug=9):
sentence = get_only_chars(sentence)
words = sentence.split(' ')
num_words = len(words)
augmented_sentences = []
n_ri = max(1, int((alpha_ri * num_words)))
for _ in range(n_aug):
a_words = random_addition(words, n_ri)
augmented_sentences.append(' '.join(a_words))
augmented_sentences = [get_only_chars(sentence) for sentence in augmented_sentences]
shuffle(augmented_sentences)
augmented_sentences.append(sentence)
return augmented_sentences |
def ResNet18Compressed(channels):
return ResNetCompressed(BasicBlockCompressed, [2, 2, 2, 2], channels) |
_registry(operator_type='BinaryAdd')
class BinaryAdd(Operator):
def __init__(self):
super().__init__() |
def _to_py_obj(x):
if (x.lower() in ['true', 'yes', 'on']):
return True
if (x.lower() in ['false', 'no', 'off']):
return False
try:
obj = eval(x)
if (type(obj).__name__ in ['int', 'float', 'tuple', 'list', 'dict', 'NoneType']):
x = obj
except:
pass
return x |
class Speedometer(object):
def __init__(self, batch_size, frequent=50, batches_per_epoch=None, epochs=None):
self.batch_size = batch_size
self.frequent = frequent
self.batches_per_epoch = batches_per_epoch
self.epochs = epochs
self.epoch = (- 1)
self.init = False
self.tic = 0
self.last_count = 0
self.data_in_time = 0.0
self.data_transfer_time = 0.0
self.forward_time = 0.0
self.backward_time = 0.0
self.optimizer_time = 0.0
self.metric_time = 0.0
def __call__(self, param):
count = param.nbatch
if (self.last_count > count):
self.init = False
self.last_count = count
self.data_in_time += param.data_in_time
self.data_transfer_time += param.data_transfer_time
self.forward_time += param.forward_time
self.backward_time += param.backward_time
self.optimizer_time += param.optimizer_time
self.metric_time += param.metric_time
if self.init:
if ((count % self.frequent) == 0):
speed = ((self.frequent * self.batch_size) / (time.time() - self.tic))
data_in_time = (self.data_in_time / self.frequent)
data_transfer_time = (self.data_transfer_time / self.frequent)
forward_time = (self.forward_time / self.frequent)
backward_time = (self.backward_time / self.frequent)
optimizer_time = (self.optimizer_time / self.frequent)
metric_time = (self.metric_time / self.frequent)
eta = (((((((self.epochs - self.epoch) - 1) * self.batches_per_epoch) + self.batches_per_epoch) - param.nbatch) * self.batch_size) / speed)
eta = int((eta / 60.0))
eta_m = (eta % 60)
eta_h = (int(((eta - eta_m) / 60)) % 24)
eta_d = int((((eta - eta_m) - (eta_h * 60)) / (24 * 60)))
s = ''
if (param.eval_metric is not None):
prefix = ('Epoch[%d] Batch [%d]\t' % (param.epoch, count))
(name, value) = param.eval_metric.get()
s = (prefix + ('Speed: %.2f samples/s ETA: %d d %2d h %2d m\tData: %.3f Tran: %.3f F: %.3f B: %.3f O: %.3f M: %.3f\tTrain-' % (speed, eta_d, eta_h, eta_m, data_in_time, data_transfer_time, forward_time, backward_time, optimizer_time, metric_time)))
for (n, v) in zip(name, value):
s += ('%s=%f,\t' % (n, v))
else:
prefix = ('Epoch[%d] Batch [%d]\t' % (param.epoch, count))
s = (prefix + ('Speed: %.2f ETA: %d d %2d h %2d m samples/s\tData: %.3f Tran: %.3f F: %.3f B: %.3f O: %.3f M: %.3f' % (speed, eta_d, eta_h, eta_m, data_in_time, data_transfer_time, forward_time, backward_time, optimizer_time, metric_time)))
if (param.rank is not None):
s = (('Rank[%3d]' % param.rank) + s)
logging.info(s)
print(s)
self.tic = time.time()
self.data_in_time = 0.0
self.data_transfer_time = 0.0
self.forward_time = 0.0
self.backward_time = 0.0
self.optimizer_time = 0.0
self.metric_time = 0.0
else:
self.init = True
self.epoch += 1
if (param.eval_metric is not None):
(name, value) = param.eval_metric.get()
s = ('Epoch[%d] Batch [%d]\tSpeed: - samples/sec ETA: - d - h - m\tTrain-' % (param.epoch, 0))
for (n, v) in zip(name, value):
s += ('%s=%f,\t' % (n, v))
else:
s = ('Epoch[%d] Batch [%d]\tSpeed: - samples/sec ETA: - d - h - m' % (param.epoch, 0))
if (param.rank is not None):
s = (('Rank[%3d]' % param.rank) + s)
logging.info(s)
print(s)
self.tic = time.time() |
def sample(nodeInfor, edgeInfor):
nodeId = [nodeInfor[i][0] for i in range(len(nodeInfor))]
longitude = [nodeInfor[i][1] for i in range(len(nodeInfor))]
latitude = [nodeInfor[i][2] for i in range(len(nodeInfor))]
n = len(nodeId)
A1 = np.array(([([0] * n)] * n))
Graph1 = nx.Graph(A1)
column = [str(nodeId[i]) for i in range(n)]
mapping = {0: str(nodeId[0])}
for i in range(0, (len(column) - 1)):
mapping.setdefault((i + 1), column[(i + 1)])
Graph1 = nx.relabel_nodes(Graph1, mapping)
edgeSet1 = list()
for i in range(len(edgeInfor)):
edgeRow = edgeInfor[i]
edgeSet1.append((str(edgeRow[0]), str(edgeRow[1])))
edgeSet = list(set(edgeSet1))
Graph1.add_edges_from(edgeSet)
deleteNumber = int((len(Graph1.edges) * 0.2))
T = nx.minimum_spanning_tree(Graph1)
potentialDelete = list((set(Graph1.edges) - set(T.edges)))
realDelete1 = random.sample(potentialDelete, deleteNumber)
realDelete2 = random.sample(potentialDelete, deleteNumber)
print('len(realDelete1)', len(realDelete1), 'len(realDelete2)', len(realDelete2))
edgeInforNew = list()
for i in range(len(edgeInfor)):
edgeRow = edgeInfor[i]
item = list()
if (((str(edgeRow[0]), str(edgeRow[1])) in realDelete1) or ((str(edgeRow[1]), str(edgeRow[0])) in realDelete1)):
item = [edgeRow[0], edgeRow[1], edgeRow[2], 0]
else:
item = [edgeRow[0], edgeRow[1], edgeRow[2], 1]
if (((str(edgeRow[0]), str(edgeRow[1])) in realDelete2) or ((str(edgeRow[1]), str(edgeRow[0])) in realDelete2)):
item.append(0)
else:
item.append(1)
edgeInforNew.append(item)
returnEdgeInforNew = list()
for i in range(len(edgeInforNew)):
returnEdgeInforNew.append((edgeInforNew[i][0], edgeInforNew[i][1], edgeInforNew[i][2], edgeInforNew[i][3], edgeInforNew[i][4]))
return returnEdgeInforNew |
def _relaunch():
log.warn('Relaunching...')
if (sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']):
sys.argv[0] = 'setup.py'
args = ([sys.executable] + sys.argv)
sys.exit(subprocess.call(args)) |
def find_program(basename):
names = [basename]
if (os.name == 'nt'):
extensions = ('.exe', '.bat', '.cmd', '.dll')
if (not basename.endswith(extensions)):
names = ([(basename + ext) for ext in extensions] + [basename])
for name in names:
path = is_program_installed(name)
if path:
return path |
class Dataset(object):
def __init__(self):
self._instances = []
def add_instance(self, propertyDict):
self._instances.append(propertyDict)
def get_phraselist(self):
return [instance['phrase'] for instance in self._instances]
def get_imagelist(self):
return [instance['image'] for instance in self._instances]
def get_boxlist(self):
return [instance['box'] for instance in self._instances]
def get_instances(self):
return self._instances
def get_count(self):
return len(self._instances)
count = property(get_count)
size = property(get_count)
instances = property(get_instances)
phrases = property(get_phraselist)
images = property(get_imagelist)
boxes = property(get_boxlist) |
def main(args):
from benchmark.models.musichubert_hf.extract_bert_features import main as extract_hubert_features_main
from benchmark.models.music2vec.extract_music2vec_features import main as extract_data2vec_features_main
from benchmark.models.data2vec.extract_data2vec_features import main as extract_data2vec_audio_features_main
from benchmark.models.handcrafted.extract_handcrafted_features import main as extract_handcrafted_features_main
from benchmark.models.jukebox.extract_jukemir_features import main as extract_jukemir_features_main
from benchmark.models.musicnn.extract_musicnn_features import main as extract_musicnn_features_main
from benchmark.models.clmr.extract_clmr_features import main as extract_clmr_features_main
from benchmark.models.mule.extract_mule_features import main as extract_mule_features_main
from benchmark.models.hubert.extract_hubert_features import main as extract_speech_hubert_features_main
config = load_config(args.config, namespace=True)
if ((args.override is not None) and (args.override.lower() != 'none')):
override_config(args.override, config)
config = merge_args_to_config(args, config)
print_config(config)
representation_name = config.dataset.pre_extract.feature_extractor.pretrain.name
extract_main = bench.NAME_TO_EXTRACT_FEATURES_MAIN[representation_name]
extract_main = eval(extract_main)
extract_main(config) |
class SegmentationModuleBase(nn.Module):
def __init__(self):
super(SegmentationModuleBase, self).__init__()
def pixel_acc(self, pred, label):
(_, preds) = torch.max(pred, dim=1)
valid = (label >= 0).long()
acc_sum = torch.sum((valid * (preds == label).long()))
pixel_sum = torch.sum(valid)
acc = (acc_sum.float() / (pixel_sum.float() + 1e-10))
return acc |
class MultipleChoiceQuestion(NamedTuple):
stem: str
choices: List[Choice]
id_: str = None
answerKey: str = None
def from_jsonl(line: str) -> 'MultipleChoiceQuestion':
blob = json.loads(line)
question = blob['question']
return MultipleChoiceQuestion(id_=blob['id'], stem=question['stem'], choices=[Choice(c['label'], c['text']) for c in question['choices']], answerKey=blob['answerKey'])
def from_jsonl_ours(line: str, idx: int) -> 'MultipleChoiceQuestion':
blob = json.loads(line)
return MultipleChoiceQuestion(id_=idx, stem=blob['question'], choices=[Choice(num2char(idx), blob['options'][num2char(idx)]) for idx in range(len(blob['options']))], answerKey=blob['answer_idx']) |
class FlaxRobertaModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class MaskFromDensePoseSampler():
def __call__(self, instances: Instances) -> BitMasks:
return ToMaskConverter.convert(instances.pred_densepose, instances.pred_boxes, instances.image_size) |
def _calc_dynamic_intervals(start_interval, dynamic_interval_list):
assert mmcv.is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend([dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend([dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return (dynamic_milestones, dynamic_intervals) |
def read_txt_file(path):
with open(path, 'r') as f:
lists = f.read().splitlines()
lists = [s.strip().split(' ') for s in lists]
return lists |
def get_preprocessing(name, is_training=False):
preprocessing_fn_map = {'resnet_v1_152': resnet_preprocessing, 'mobilenet_v1': mobilenet_preprocessing}
if (name not in preprocessing_fn_map):
raise ValueError(('Preprocessing name [%s] was not recognized' % name))
def preprocessing_fn(image, output_height, output_width):
return preprocessing_fn_map[name].preprocess_image(image, output_height, output_width, is_training=is_training)
return preprocessing_fn |
def _data_augmentation(image, label, bbox, data_augmentation_args):
print('Use data_augmentation_args: ', data_augmentation_args)
if data_augmentation_args['crop_bbox']:
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(tf.shape(image), bounding_boxes=bbox, min_object_covered=0.1, aspect_ratio_range=[0.75, 1.33], area_range=[0.05, 1.0], max_attempts=100, use_image_if_no_bounding_boxes=True)
(bbox_begin, bbox_size, _) = sample_distorted_bounding_box
(offset_y, offset_x, _) = tf.unstack(bbox_begin)
(target_height, target_width, _) = tf.unstack(bbox_size)
image = tf.image.crop_to_bounding_box(image, offset_y, offset_x, target_height, target_width)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image)
if data_augmentation_args['resize']:
new_size = tf.random_uniform([], minval=256, maxval=(512 + 1), dtype=tf.int32)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [new_size, new_size], align_corners=False)
image = tf.squeeze(image)
image = tf.image.random_crop(image, [height, width, depth])
if data_augmentation_args['padding']:
reshaped_image = tf.image.resize_image_with_crop_or_pad(image, 256, 256)
image = tf.random_crop(reshaped_image, [height, width, depth])
image = tf.cast(image, tf.float32)
if (not (data_augmentation_args['crop_bbox'] or data_augmentation_args['resize'] or data_augmentation_args['padding'])):
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [256, 256])
image = tf.squeeze(image)
image = tf.image.resize_image_with_crop_or_pad(image, height, width)
if data_augmentation_args['mirroring']:
image = tf.image.random_flip_left_right(image)
if data_augmentation_args['bright']:
image = tf.image.random_brightness(image, max_delta=32.0)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
mean = data_augmentation_args['mean']
std = data_augmentation_args['std']
if isinstance(mean, list):
mean = tf.reshape(mean, [1, 1, 3])
std = tf.reshape(std, [1, 1, 3])
image = ((image - mean) / std)
else:
image = ((image - mean) / std)
image.set_shape([height, width, depth])
return (image, label, bbox) |
class LogScheduler(LRScheduler):
def __init__(self, optimizer, start_lr=0.03, end_lr=0.0005, epochs=50, last_epoch=(- 1), **kwargs):
self.start_lr = start_lr
self.end_lr = end_lr
self.epochs = epochs
self.lr_spaces = np.logspace(math.log10(start_lr), math.log10(end_lr), epochs)
super(LogScheduler, self).__init__(optimizer, last_epoch) |
class SepConvOp(nn.Module):
def __init__(self, C_in, C_out, kernel_size, act_op, affine=True):
super(SepConvOp, self).__init__()
padding = PADDING_OPS[kernel_size]
kernel_size = KERNEL_SIZE_OPS[kernel_size]
activation = ACTIVATION_OPS[act_op]
if (not activation):
self.op = nn.Sequential(nn.Conv2d(C_in, C_in, kernel_size=kernel_size, padding=padding, groups=C_in, bias=False), nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False))
else:
self.op = nn.Sequential(nn.Conv2d(C_in, C_in, kernel_size=kernel_size, padding=padding, groups=C_in, bias=False), nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False), activation)
def forward(self, x):
return self.op(x) |
class ArgMaxParameter(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ARGMAXPARAMETER |
def discretized_mix_logistic_topk(means, logscales, logit_probs, range, bin_size, lower, upper, topk=1) -> Tuple[(torch.Tensor, torch.LongTensor)]:
eps = 1e-12
means = means.unsqueeze(1)
logscales = logscales.unsqueeze(1)
logit_probs = logit_probs.unsqueeze(1)
x = torch.arange((- range), (range + 1), 1.0, device=means.device).unsqueeze(0).unsqueeze(2)
x = x.div(range)
centered_x = (x - means)
if isinstance(logscales, float):
inv_stdv = np.exp((- logscales))
else:
inv_stdv = torch.exp((- logscales))
min_in = (inv_stdv * (centered_x - bin_size))
plus_in = (inv_stdv * (centered_x + bin_size))
x_in = (inv_stdv * centered_x)
cdf_min = torch.sigmoid(min_in)
cdf_plus = torch.sigmoid(plus_in)
cdf_delta = (cdf_plus - cdf_min)
log_cdf_mid = torch.log((cdf_delta + eps))
log_cdf_approx = (((x_in - logscales) - (2.0 * F.softplus(x_in))) + np.log((2 * bin_size)))
log_cdf_low = (plus_in - F.softplus(plus_in))
log_cdf_up = (- F.softplus(min_in))
log_cdf = torch.where(cdf_delta.gt(1e-05), log_cdf_mid, log_cdf_approx)
log_cdf = torch.where(x.ge(lower), log_cdf, log_cdf_low)
log_cdf = torch.where(x.le(upper), log_cdf, log_cdf_up)
log_probs = torch.logsumexp((log_cdf + logit_probs), dim=2)
(log_probs, idx) = log_probs.topk(topk, dim=1)
return (log_probs, (idx - range)) |
def controled_step(short_memory, long_memory, selected_instruction, current_paras, request: gr.Request):
if (current_paras == ''):
return ('', '', '', '', '', '')
global _CACHE
cookie = request.headers['cookie']
cookie = cookie.split('; _gat_gtag')[0]
cache = _CACHE[cookie]
if ('writer' not in cache):
start_input_to_human = cache['start_input_to_human']
start_input_to_human['output_instruction'] = selected_instruction
init_paragraphs = cache['init_paragraphs']
human = Human(input=start_input_to_human, memory=None, embedder=embedder, model=llm_model, tokenizer=llm_tokenizer)
human.step()
start_short_memory = init_paragraphs['Summary']
writer_start_input = human.output
writer = AIWriter(input=writer_start_input, short_memory=start_short_memory, long_memory=[init_paragraphs['Paragraph 1'], init_paragraphs['Paragraph 2'], init_paragraphs['Paragraph 3']], memory_index=None, embedder=embedder, model=llm_model, tokenizer=llm_tokenizer)
cache['writer'] = writer
cache['human'] = human
writer.step()
else:
human = cache['human']
writer = cache['writer']
output = writer.output
output['output_memory'] = short_memory
output['output_instruction'] = selected_instruction
human.input = output
human.step()
writer.input = human.output
writer.step()
return (writer.output['output_memory'], parse_instructions(writer.long_memory), ((current_paras + '\n\n') + writer.output['input_paragraph']), *writer.output['output_instruction']) |
class MishActivation(nn.Module):
def __init__(self):
super().__init__()
if (version.parse(version.parse(torch.__version__).base_version) < version.parse('1.9')):
self.act = self._mish_python
else:
self.act = nn.functional.mish
def _mish_python(self, input: Tensor) -> Tensor:
return (input * torch.tanh(nn.functional.softplus(input)))
def forward(self, input: Tensor) -> Tensor:
return self.act(input) |
def rmdir(path: str) -> None:
if path.startswith('s3'):
invalidOperationError(False, 'not implement')
elif path.startswith('hdfs://'):
cmd = 'hdfs dfs -rm -r {}'.format(path)
result = subprocess.getstatusoutput(cmd)
if (result[0] != 0):
invalidOperationError(False, result[1])
else:
if path.startswith('file://'):
path = path[len('file://'):]
os.rmdir(path) |
class TFEsmModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def move(src_path: str, tgt_path, suffix: str, split_name: str, url):
os.mkdir(os.path.join(tgt_path, split_name))
os.mkdir(os.path.join(tgt_path, split_name, 'doc'))
os.mkdir(os.path.join(tgt_path, split_name, 'abs'))
with open(url, 'r', encoding='utf-8') as fd:
lines = fd.read().splitlines()
url_names = get_url_hashes(lines)
print('len of urls {}'.format(len(url_names)))
f_abs = [((u + '.abs.') + suffix) for u in url_names]
f_doc = [((u + '.doc.') + suffix) for u in url_names]
l = len(f_abs)
_l = len(f_doc)
assert (l == _l)
cnt = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=cnt)
pool.starmap(_mv_from_to, zip(([src_path] * l), f_abs, ([os.path.join(tgt_path, split_name, 'abs')] * l)))
pool.close()
pool.join()
pool = multiprocessing.Pool(processes=cnt)
pool.starmap(_mv_from_to, zip(([src_path] * l), f_doc, ([os.path.join(tgt_path, split_name, 'doc')] * l)))
pool.close()
pool.join() |
class ResnetBlock(nn.Module):
def __init__(self, dim, dilation=1, use_spectral_norm=False):
super(ResnetBlock, self).__init__()
self.conv_block = nn.Sequential(nn.ReflectionPad2d(dilation), spectral_norm(nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=dilation, bias=(not use_spectral_norm)), use_spectral_norm), nn.InstanceNorm2d(dim, track_running_stats=False), nn.ReLU(True), nn.ReflectionPad2d(1), spectral_norm(nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=1, bias=(not use_spectral_norm)), use_spectral_norm), nn.InstanceNorm2d(dim, track_running_stats=False))
def forward(self, x):
out = (x + self.conv_block(x))
return out |
def exportfile(newAudio, time1, time2, filename, i):
newAudio2 = newAudio[time1:time2]
g = os.listdir()
if ((((filename[0:(- 4)] + '_') + str(i)) + '.wav') in g):
filename2 = ((str(uuid.uuid4()) + '_segment') + '.wav')
print(('making %s' % filename2))
newAudio2.export(filename2, format='wav')
else:
filename2 = (str(uuid.uuid4()) + '.wav')
print(('making %s' % filename2))
newAudio2.export(filename2, format='wav')
return filename2 |
def label_file_from_coordinates(nifti_image, coord_list):
imsh = list(np.array(nifti_image.dataobj).shape)
label_array = np.zeros(tuple(imsh))
for j in range(len(coord_list)):
label_array[(coord_list[j][0], coord_list[j][1], coord_list[j][2])] = 1
nib_pred = nib.Nifti1Image(dataobj=label_array, affine=nifti_image.header.get_best_affine())
return nib_pred |
class BeitImageProcessor(BaseImageProcessor):
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Dict[(str, int)]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Dict[(str, int)]=None, rescale_factor: Union[(int, float)]=(1 / 255), do_rescale: bool=True, do_normalize: bool=True, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, do_reduce_labels: bool=False, **kwargs) -> None:
if ('reduce_labels' in kwargs):
warnings.warn('The `reduce_labels` parameter is deprecated and will be removed in a future version. Please use `do_reduce_labels` instead.', FutureWarning)
do_reduce_labels = kwargs.pop('reduce_labels')
super().__init__(**kwargs)
size = (size if (size is not None) else {'height': 256, 'width': 256})
size = get_size_dict(size)
crop_size = (crop_size if (crop_size is not None) else {'height': 224, 'width': 224})
crop_size = get_size_dict(crop_size, param_name='crop_size')
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = (image_mean if (image_mean is not None) else IMAGENET_STANDARD_MEAN)
self.image_std = (image_std if (image_std is not None) else IMAGENET_STANDARD_STD)
self.do_reduce_labels = do_reduce_labels
def reduce_labels(self) -> bool:
warnings.warn('The `reduce_labels` property is deprecated and will be removed in v4.27. Please use `do_reduce_labels` instead.', FutureWarning)
return self.do_reduce_labels
def from_dict(cls, image_processor_dict: Dict[(str, Any)], **kwargs):
image_processor_dict = image_processor_dict.copy()
if ('reduce_labels' in kwargs):
image_processor_dict['reduce_labels'] = kwargs.pop('reduce_labels')
return super().from_dict(image_processor_dict, **kwargs)
def resize(self, image: np.ndarray, size: Dict[(str, int)], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size, default_to_square=True, param_name='size')
if (('height' not in size) or ('width' not in size)):
raise ValueError(f'The `size` argument must contain `height` and `width` keys. Got {size.keys()}')
return resize(image, size=(size['height'], size['width']), resample=resample, data_format=data_format, **kwargs)
def center_crop(self, image: np.ndarray, size: Dict[(str, int)], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size, default_to_square=True, param_name='size')
return center_crop(image, size=(size['height'], size['width']), data_format=data_format, **kwargs)
def rescale(self, image: np.ndarray, scale: Union[(int, float)], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs):
return rescale(image, scale=scale, data_format=data_format, **kwargs)
def normalize(self, image: np.ndarray, mean: Union[(float, List[float])], std: Union[(float, List[float])], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs)
def reduce_label(self, label: ImageInput) -> np.ndarray:
label = to_numpy_array(label)
label[(label == 0)] = 255
label = (label - 1)
label[(label == 254)] = 255
return label
def _preprocess(self, image: ImageInput, do_reduce_labels: bool=None, do_resize: bool=None, size: Dict[(str, int)]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: Dict[(str, int)]=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None):
if do_reduce_labels:
image = self.reduce_label(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std)
return image
def _preprocess_image(self, image: ImageInput, do_resize: bool=None, size: Dict[(str, int)]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: Dict[(str, int)]=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, data_format: Optional[Union[(str, ChannelDimension)]]=None) -> np.ndarray:
image = to_numpy_array(image)
image = self._preprocess(image, do_reduce_labels=False, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std)
if (data_format is not None):
image = to_channel_dimension_format(image, data_format)
return image
def _preprocess_segmentation_map(self, segmentation_map: ImageInput, do_resize: bool=None, size: Dict[(str, int)]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: Dict[(str, int)]=None, do_reduce_labels: bool=None):
segmentation_map = to_numpy_array(segmentation_map)
if (segmentation_map.ndim == 2):
segmentation_map = segmentation_map[(None, ...)]
added_dimension = True
else:
added_dimension = False
segmentation_map = self._preprocess(image=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, resample=resample, size=size, do_center_crop=do_center_crop, crop_size=crop_size, do_normalize=False, do_rescale=False)
if added_dimension:
segmentation_map = np.squeeze(segmentation_map, axis=0)
segmentation_map = segmentation_map.astype(np.int64)
return segmentation_map
def __call__(self, images, segmentation_maps=None, **kwargs):
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, do_resize: bool=None, size: Dict[(str, int)]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: Dict[(str, int)]=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, **kwargs) -> PIL.Image.Image:
do_resize = (do_resize if (do_resize is not None) else self.do_resize)
size = (size if (size is not None) else self.size)
size = get_size_dict(size, default_to_square=True, param_name='size')
resample = (resample if (resample is not None) else self.resample)
do_center_crop = (do_center_crop if (do_center_crop is not None) else self.do_center_crop)
crop_size = (crop_size if (crop_size is not None) else self.crop_size)
crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size')
do_rescale = (do_rescale if (do_rescale is not None) else self.do_rescale)
rescale_factor = (rescale_factor if (rescale_factor is not None) else self.rescale_factor)
do_normalize = (do_normalize if (do_normalize is not None) else self.do_normalize)
image_mean = (image_mean if (image_mean is not None) else self.image_mean)
image_std = (image_std if (image_std is not None) else self.image_std)
do_reduce_labels = (do_reduce_labels if (do_reduce_labels is not None) else self.do_reduce_labels)
images = make_list_of_images(images)
if (segmentation_maps is not None):
segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)
if (not valid_images(images)):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
if ((segmentation_maps is not None) and (not valid_images(segmentation_maps))):
raise ValueError('Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
if ((do_resize and (size is None)) or (resample is None)):
raise ValueError('Size and resample must be specified if do_resize is True.')
if (do_center_crop and (crop_size is None)):
raise ValueError('Crop size must be specified if do_center_crop is True.')
if (do_rescale and (rescale_factor is None)):
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if (do_normalize and ((image_mean is None) or (image_std is None))):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
images = [self._preprocess_image(image=img, do_resize=do_resize, do_center_crop=do_center_crop, do_rescale=do_rescale, do_normalize=do_normalize, resample=resample, size=size, rescale_factor=rescale_factor, crop_size=crop_size, image_mean=image_mean, image_std=image_std, data_format=data_format) for img in images]
data = {'pixel_values': images}
if (segmentation_maps is not None):
segmentation_maps = [self._preprocess_segmentation_map(segmentation_map=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, resample=resample, size=size, do_center_crop=do_center_crop, crop_size=crop_size) for segmentation_map in segmentation_maps]
data['labels'] = segmentation_maps
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple]=None):
logits = outputs.logits
if (target_sizes is not None):
if (len(logits) != len(target_sizes)):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation |
def test_sanitize_date_range_bad_start_dt() -> None:
with pytest.raises(ValueError) as ex_info:
sanitize_date_range('INVALID', '2020-06-06')
assert (str(ex_info.value) == 'Incorrect data format, should be YYYY-MM-DD') |
def extract_answers(solver, data_path):
answers = []
best_choices = []
num_corrects = 0
with open(data_path, 'r') as f:
for (idx, line) in enumerate(f):
question = MultipleChoiceQuestion.from_jsonl_ours(line, idx)
answer = solver.answer_question(question)
answers.append(answer)
best_choice = answer_to_selection(answer)
if (best_choice.choice.label == question.answerKey):
num_corrects += 1
print('accuracy is: {} / {} = {:.1f}%'.format(num_corrects, len(answers), ((num_corrects * 100.0) / len(answers))))
return (answers, best_choices) |
def compare_files(gold_file, pred_file, up_ignore_layer=0):
(gold_entity, pred_entity, match_entity) = get_matched_ner_from_file(gold_file, pred_file, up_ignore_layer)
match_num = len(match_entity)
gold_num = len(gold_entity)
pred_num = len(pred_entity)
return get_final_score(gold_num, pred_num, match_num) |
def count_model_param_flops(model=None, dataset=None, multiply_adds=True, full=False):
prods = {}
def save_hook(name):
def hook_per(self, input, output):
prods[name] = np.prod(input[0].shape)
return hook_per
list_1 = []
def simple_hook(self, input, output):
list_1.append(np.prod(input[0].shape))
list_2 = {}
def simple_hook2(self, input, output):
list_2['names'] = np.prod(input[0].shape)
list_conv = []
def conv_hook(self, input, output):
(batch_size, input_channels, input_height, input_width) = input[0].size()
(output_channels, output_height, output_width) = output[0].size()
kernel_ops = ((self.kernel_size[0] * self.kernel_size[1]) * (self.in_channels / self.groups))
bias_ops = (1 if (self.bias is not None) else 0)
if (not full):
num_weight_params = (self.weight.data != 0).float().sum()
else:
num_weight_params = torch.numel(self.weight.data)
assert (self.weight.numel() == (kernel_ops * output_channels)), 'Not match'
flops = (((((num_weight_params * (2 if multiply_adds else 1)) + (bias_ops * output_channels)) * output_height) * output_width) * batch_size)
list_conv.append(flops)
list_linear = []
def linear_hook(self, input, output):
batch_size = (input[0].size(0) if (input[0].dim() == 2) else 1)
if (not full):
weight_ops = ((self.weight.data != 0).float().sum() * (2 if multiply_adds else 1))
bias_ops = ((self.bias.data != 0).float().sum() if (self.bias is not None) else 0)
else:
weight_ops = (torch.numel(self.weight.data) * (2 if multiply_adds else 1))
bias_ops = (torch.numel(self.bias.data) if (self.bias is not None) else 0)
flops = (batch_size * (weight_ops + bias_ops))
list_linear.append(flops)
list_bn = []
def bn_hook(self, input, output):
list_bn.append((input[0].nelement() * 2))
list_relu = []
def relu_hook(self, input, output):
list_relu.append(input[0].nelement())
list_pooling = []
def pooling_hook(self, input, output):
(batch_size, input_channels, input_height, input_width) = input[0].size()
(output_channels, output_height, output_width) = output[0].size()
kernel_ops = (self.kernel_size * self.kernel_size)
bias_ops = 0
params = 0
flops = (((((kernel_ops + bias_ops) * output_channels) * output_height) * output_width) * batch_size)
list_pooling.append(flops)
list_upsample = []
def upsample_hook(self, input, output):
(batch_size, input_channels, input_height, input_width) = input[0].size()
(output_channels, output_height, output_width) = output[0].size()
flops = ((((output_height * output_width) * output_channels) * batch_size) * 12)
list_upsample.append(flops)
def foo(handles, net):
childrens = list(net.children())
if (not childrens):
if isinstance(net, torch.nn.Conv2d):
handles += [net.register_forward_hook(conv_hook)]
if isinstance(net, torch.nn.Linear):
handles += [net.register_forward_hook(linear_hook)]
return
for c in childrens:
foo(handles, c)
handles = []
foo(handles, model)
if (dataset == 'emnist'):
input_channel = 1
input_res = 28
elif (dataset == 'cifar10'):
input_channel = 3
input_res = 32
elif (dataset == 'cifar100'):
input_channel = 3
input_res = 32
elif (dataset == 'tiny'):
input_channel = 3
input_res = 64
device = next(model.parameters()).device
input = Variable(torch.rand(input_channel, input_res, input_res).unsqueeze(0), requires_grad=True).to(device)
out = model(input)
total_flops = (((((sum(list_conv) + sum(list_linear)) + sum(list_bn)) + sum(list_relu)) + sum(list_pooling)) + sum(list_upsample))
for handle in handles:
handle.remove()
return total_flops |
def classify(info, gold, test):
coord_tags = ['CC']
info['classified_type'] = ('UNSET ' + info['type'])
if value_present(info, ['type'], ['move']):
if ('start left siblings' in info):
if ((len(info['start left siblings']) > 0) and (info['start left siblings'][(- 1)] in coord_tags)):
info['classified_type'] = 'Co-ordination'
return
if ('start right siblings' in info):
if ((len(info['start right siblings']) > 0) and (info['start right siblings'][0] in coord_tags)):
info['classified_type'] = 'Co-ordination'
return
if ('end left siblings' in info):
if ((len(info['end left siblings']) > 0) and (info['end left siblings'][(- 1)] in coord_tags)):
info['classified_type'] = 'Co-ordination'
return
if ('end right siblings' in info):
if ((len(info['end right siblings']) > 0) and (info['end right siblings'][0] in coord_tags)):
info['classified_type'] = 'Co-ordination'
return
if ('movers' in info):
if ((len(info['movers']) > 0) and ((info['movers'][(- 1)] in coord_tags) or (info['movers'][0] in coord_tags))):
info['classified_type'] = 'Co-ordination'
return
multi_case = False
if ('movers' in info):
if (len(info['movers']) > 1):
multi_case = True
for label in info['movers']:
if (label not in phrase_labels):
multi_case = False
break
if value_present(info, ['movers'], ['PP']):
info['classified_type'] = 'PP Attachment'
return
if value_present(info, ['movers'], ['NP']):
info['classified_type'] = 'NP Attachment'
return
if value_present(info, ['movers'], ['VP']):
info['classified_type'] = 'VP Attachment'
return
if value_present(info, ['movers'], ['S', 'SINV', 'SBAR']):
info['classified_type'] = 'Clause Attachment'
return
if value_present(info, ['movers'], ['RB', 'ADVP', 'ADJP']):
info['classified_type'] = 'Modifier Attachment'
return
if value_present(info, ['old_parent'], ['NP', 'QP']):
if value_present(info, ['new_parent'], ['NP', 'QP']):
info['classified_type'] = 'NP Internal Structure'
return
if (('over_word' in info) and info['over_word']):
info['classified_type'] = 'Single Word Phrase'
return
if value_present(info, ['type'], ['relabel']):
info['classified_type'] = 'Different label'
return
if (info['type'] == 'add'):
if (('subtrees' in info) and (len(info['subtrees']) == 1)):
if (info['subtrees'][0] == info['label']):
info['classified_type'] = 'XoverX Unary'
return
info['classified_type'] = 'Unary'
return
if (info['type'] == 'remove'):
if (('family' in info) and (len(info['family']) == 1)):
if (info['parent'] == info['label']):
info['classified_type'] = 'XoverX Unary'
return
info['classified_type'] = 'Unary'
return
if (('subtrees' in info) and (len(info['subtrees']) == 1)):
info['classified_type'] = 'Unary'
return
if value_present(info, ['label'], ['UCP']):
info['classified_type'] = 'Co-ordination'
return
if ('right siblings' in info):
if ((len(info['right siblings']) > 0) and (info['right siblings'][0] in coord_tags)):
info['classified_type'] = 'Co-ordination'
return
if (('subtrees' in info) and ('PP' in info['subtrees'][1:])):
info['classified_type'] = 'PP Attachment'
return
if ('subtrees' in info):
if ('S' in info['subtrees'][1:]):
info['classified_type'] = 'Clause Attachment'
return
if ('SBAR' in info['subtrees'][1:]):
info['classified_type'] = 'Clause Attachment'
return
if ('SINV' in info['subtrees'][1:]):
info['classified_type'] = 'Clause Attachment'
return
if value_present(info, ['parent'], ['NP']):
all_words = True
if ('subtrees' in info):
for label in info['subtrees']:
if (label in phrase_labels):
all_words = False
break
if all_words:
info['classified_type'] = 'NP Internal Structure'
return
if value_present(info, ['label'], ['ADVP', 'ADJP']):
info['classified_type'] = 'Modifier Attachment'
return
if ('subtrees' in info):
if (('ADVP' in info['subtrees'][1:]) or ('ADJP' in info['subtrees'][1:])):
info['classified_type'] = 'Modifier Attachment'
return
if ('label' in info):
label = info['label']
if ('subtrees' in info):
all_same = True
for slabel in info['subtrees']:
if (slabel != label):
all_same = False
break
if all_same:
if (label == 'NP'):
info['classified_type'] = 'NP Internal Structure'
return
else:
info['classified_type'] = 'Co-ordination'
return |
('l2_side')
class TFL2SideBoxRegularizer(TFBoxRegularizer):
def __init__(self, weight: float, log_scale: bool=False, reduction: str='sum') -> None:
super().__init__(weight, log_scale=log_scale, reduction=reduction)
def _forward(self, box_tensor: TFBoxTensor) -> tf.Tensor:
return tf_l2_side_regularizer(box_tensor, log_scale=self.log_scale) |
def get_optimizer(model):
parameters = _get_paramters(model)
opt_lower = cfg.SOLVER.OPTIMIZER.lower()
if (opt_lower == 'sgd'):
optimizer = optim.SGD(parameters, lr=cfg.SOLVER.LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
elif (opt_lower == 'adam'):
optimizer = optim.Adam(parameters, lr=cfg.SOLVER.LR, eps=cfg.SOLVER.EPSILON, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
elif (opt_lower == 'adadelta'):
optimizer = optim.Adadelta(parameters, lr=cfg.SOLVER.LR, eps=cfg.SOLVER.EPSILON, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
elif (opt_lower == 'rmsprop'):
optimizer = optim.RMSprop(parameters, lr=cfg.SOLVER.LR, alpha=0.9, eps=cfg.SOLVER.EPSILON, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
else:
raise ValueError('Expected optimizer method in [sgd, adam, adadelta, rmsprop], but received {}'.format(opt_lower))
return optimizer |
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, add_bias=True, scale_factor=1, filtering_kernel=(1, 3, 3, 1), use_wscale=True, wscale_gain=_WSCALE_GAIN, lr_mul=1.0, activation_type='lrelu', minibatch_std_group_size=0, minibatch_std_channels=1):
super().__init__()
if (minibatch_std_group_size > 1):
in_channels = (in_channels + minibatch_std_channels)
self.mbstd = MiniBatchSTDLayer(group_size=minibatch_std_group_size, new_channels=minibatch_std_channels)
else:
self.mbstd = nn.Identity()
if (scale_factor > 1):
extra_padding = (kernel_size - scale_factor)
self.filter = DownsamplingLayer(scale_factor=1, kernel=filtering_kernel, extra_padding=extra_padding)
self.stride = scale_factor
self.padding = 0
else:
self.filter = nn.Identity()
assert ((kernel_size % 2) == 1)
self.stride = 1
self.padding = (kernel_size // 2)
weight_shape = (out_channels, in_channels, kernel_size, kernel_size)
fan_in = ((kernel_size * kernel_size) * in_channels)
wscale = (wscale_gain / np.sqrt(fan_in))
if use_wscale:
self.weight = nn.Parameter((torch.randn(*weight_shape) / lr_mul))
self.wscale = (wscale * lr_mul)
else:
self.weight = nn.Parameter(((torch.randn(*weight_shape) * wscale) / lr_mul))
self.wscale = lr_mul
if add_bias:
self.bias = nn.Parameter(torch.zeros(out_channels))
else:
self.bias = None
self.bscale = lr_mul
if (activation_type == 'linear'):
self.activate = nn.Identity()
self.activate_scale = 1.0
elif (activation_type == 'lrelu'):
self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.activate_scale = np.sqrt(2.0)
else:
raise NotImplementedError(f'Not implemented activation function: `{activation_type}`!')
def forward(self, x):
x = self.mbstd(x)
x = self.filter(x)
weight = (self.weight * self.wscale)
bias = ((self.bias * self.bscale) if (self.bias is not None) else None)
x = F.conv2d(x, weight=weight, bias=bias, stride=self.stride, padding=self.padding)
x = (self.activate(x) * self.activate_scale)
return x |
def test_simple_creation() -> None:
tensor = tf.constant(np.random.rand(3, 2, 3))
box_tensor = TFBoxTensor(tensor)
assert (tensor.numpy() == box_tensor.data.numpy()).all()
assert isinstance(box_tensor, TFBoxTensor)
tensor = tf.constant(np.random.rand(2, 10))
box_tensor = TFBoxTensor(tensor)
assert (tensor.numpy() == box_tensor.data.numpy()).all()
assert isinstance(box_tensor, TFBoxTensor) |
def process_images(files):
for file in tqdm(files, mininterval=10):
if ('.jpg' in file):
continue
try:
im = Image.open(file)
process_one_img(file, im)
except:
print(file) |
class StackedConvLayers(nn.Module):
def __init__(self, input_feature_channels, output_feature_channels, num_convs, conv_op=nn.Conv2d, conv_kwargs=None, norm_op=nn.BatchNorm2d, norm_op_kwargs=None, dropout_op=nn.Dropout2d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None, basic_block=ConvDropoutNormNonlin):
self.input_channels = input_feature_channels
self.output_channels = output_feature_channels
if (nonlin_kwargs is None):
nonlin_kwargs = {'negative_slope': 0.01, 'inplace': True}
if (dropout_op_kwargs is None):
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if (norm_op_kwargs is None):
norm_op_kwargs = {'eps': 1e-05, 'affine': True, 'momentum': 0.1}
if (conv_kwargs is None):
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
if (first_stride is not None):
self.conv_kwargs_first_conv = deepcopy(conv_kwargs)
self.conv_kwargs_first_conv['stride'] = first_stride
else:
self.conv_kwargs_first_conv = conv_kwargs
super(StackedConvLayers, self).__init__()
self.blocks = nn.Sequential(*([basic_block(input_feature_channels, output_feature_channels, self.conv_op, self.conv_kwargs_first_conv, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs)] + [basic_block(output_feature_channels, output_feature_channels, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs) for _ in range((num_convs - 1))]))
def forward(self, x):
return self.blocks(x) |
_module()
class LCLDataset(MInstrDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs, placeholders=(IMAGE_PLACEHOLDER, EXPR_PLACEHOLDER))
self.data = self._get_annos(self.filename)
self.cls_neg_label = None
self.cls_idx = None
self.cls_name = None
def _get_annos(self, filename):
cls_metas = []
with jsonlines.open(filename) as reader:
for metas in reader:
cls_metas.append(metas)
return cls_metas
def get_raw_item(self, index):
return self.data[index]
def get_ret(self, image, question, answer, conv_mode=None):
ret = {'image': image, 'conversations': [{'from': 'human', 'value': question}, {'from': 'gpt', 'value': f'{answer}'}]}
if (conv_mode is not None):
ret['mode'] = conv_mode
return ret
def get_samples(self, index, mode):
raise NotImplementedError
def __getitem__(self, index):
raise NotImplementedError
def __get_icl_item__(self, index, shot):
raise NotImplementedError |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
configure_logger(model_args, training_args)
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
if ('validation' not in datasets.keys()):
datasets = DatasetDict()
datasets['validation'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir)
datasets['train'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir)
else:
datasets = DatasetDict()
datasets['validation'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split='validation', cache_dir=model_args.cache_dir)
datasets['train'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'{data_args.train_split_name}', cache_dir=model_args.cache_dir)
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=True)
def prepare_dataset(batch):
(batch['speech'], _) = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate)
return batch
vectorized_datasets = datasets.map(prepare_dataset, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets['train'].column_names)
vectorized_datasets = vectorized_datasets.filter((lambda data: (len(data['speech']) < int((data_args.max_duration_in_seconds * feature_extractor.sampling_rate)))))
def normalize(batch):
return feature_extractor(batch['speech'], sampling_rate=feature_extractor.sampling_rate)
vectorized_datasets = vectorized_datasets.map(normalize, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache), remove_columns=vectorized_datasets['train'].column_names)
config = Wav2Vec2Config.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir, gradient_checkpointing=training_args.gradient_checkpointing)
if ((not config.do_stable_layer_norm) or (config.feat_extract_norm != 'layer')):
raise ValueError("PreTraining is only supported for ``config.do_stable_layer_norm=True`` and ``config.feat_extract_norm='layer'")
model = Wav2Vec2ForPreTraining(config)
data_collator = DataCollatorForWav2Vec2Pretraining(model=model, feature_extractor=feature_extractor)
trainer = Wav2Vec2PreTrainer(model=model, data_collator=data_collator, args=training_args, train_dataset=vectorized_datasets['train'], eval_dataset=vectorized_datasets['validation'], tokenizer=feature_extractor, max_gumbel_temp=model_args.max_gumbel_temperature, min_gumbel_temp=model_args.min_gumbel_temperature, gumbel_temp_decay=model_args.gumbel_temperature_decay)
trainer.train() |
def eye_like(x: Tensor, /) -> Tensor:
ndim = x.ndim
if (ndim < 2):
raise ValueError(f'Input must have at least two dimensions! Got "{ndim}"')
(n, n2) = (x.shape[(- 2)], x.shape[(- 1)])
if (n != n2):
raise ValueError(f'Input last two dimensions must be square (*, n, n)! Got "{x.shape}"')
view = (([1] * (ndim - 2)) + [n, n])
I = torch.eye(n, dtype=x.dtype, device=x.device).view(view).expand_as(x).clone()
return I |
class LibrispeechASR(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 256
DEFAULT_CONFIG_NAME = 'all'
BUILDER_CONFIGS = [LibrispeechASRConfig(name='clean', description="'Clean' speech."), LibrispeechASRConfig(name='other', description="'Other', more challenging, speech."), LibrispeechASRConfig(name='all', description='Combined clean and other dataset.')]
def _info(self):
return datasets.DatasetInfo(description=_DESCRIPTION, features=datasets.Features({'file': datasets.Value('string'), 'audio': datasets.Audio(sampling_rate=16000), 'text': datasets.Value('string'), 'speaker_id': datasets.Value('int64'), 'chapter_id': datasets.Value('int64'), 'id': datasets.Value('string')}), supervised_keys=('file', 'text'), homepage=_URL, citation=_CITATION, task_templates=[AutomaticSpeechRecognition(audio_column='audio', transcription_column='text')])
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(_DL_URLS[self.config.name])
local_extracted_archive = (dl_manager.extract(archive_path) if (not dl_manager.is_streaming) else {})
if (self.config.name == 'clean'):
train_splits = [datasets.SplitGenerator(name='train.100', gen_kwargs={'local_extracted_archive': local_extracted_archive.get('train.100'), 'files': dl_manager.iter_archive(archive_path['train.100'])}), datasets.SplitGenerator(name='train.360', gen_kwargs={'local_extracted_archive': local_extracted_archive.get('train.360'), 'files': dl_manager.iter_archive(archive_path['train.360'])})]
dev_splits = [datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'local_extracted_archive': local_extracted_archive.get('dev'), 'files': dl_manager.iter_archive(archive_path['dev'])})]
test_splits = [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'local_extracted_archive': local_extracted_archive.get('test'), 'files': dl_manager.iter_archive(archive_path['test'])})]
elif (self.config.name == 'other'):
train_splits = [datasets.SplitGenerator(name='train.500', gen_kwargs={'local_extracted_archive': local_extracted_archive.get('train.500'), 'files': dl_manager.iter_archive(archive_path['train.500'])})]
dev_splits = [datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'local_extracted_archive': local_extracted_archive.get('dev'), 'files': dl_manager.iter_archive(archive_path['dev'])})]
test_splits = [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'local_extracted_archive': local_extracted_archive.get('test'), 'files': dl_manager.iter_archive(archive_path['test'])})]
elif (self.config.name == 'all'):
dev_splits = [datasets.SplitGenerator(name='validation.clean', gen_kwargs={'local_extracted_archive': local_extracted_archive.get('dev.clean'), 'files': dl_manager.iter_archive(archive_path['dev.clean'])}), datasets.SplitGenerator(name='validation.other', gen_kwargs={'local_extracted_archive': local_extracted_archive.get('dev.other'), 'files': dl_manager.iter_archive(archive_path['dev.other'])})]
test_splits = [datasets.SplitGenerator(name='test.clean', gen_kwargs={'local_extracted_archive': local_extracted_archive.get('test.clean'), 'files': dl_manager.iter_archive(archive_path['test.clean'])}), datasets.SplitGenerator(name='test.other', gen_kwargs={'local_extracted_archive': local_extracted_archive.get('test.other'), 'files': dl_manager.iter_archive(archive_path['test.other'])})]
return ((train_splits + dev_splits) + test_splits)
def _generate_examples(self, files, local_extracted_archive):
key = 0
audio_data = {}
transcripts = []
for (path, f) in files:
if path.endswith('.flac'):
id_ = path.split('/')[(- 1)][:(- len('.flac'))]
audio_data[id_] = f.read()
elif path.endswith('.trans.txt'):
for line in f:
if line:
line = line.decode('utf-8').strip()
(id_, transcript) = line.split(' ', 1)
audio_file = f'{id_}.flac'
(speaker_id, chapter_id) = [int(el) for el in id_.split('-')[:2]]
audio_file = (os.path.join(local_extracted_archive, audio_file) if local_extracted_archive else audio_file)
transcripts.append({'id': id_, 'speaker_id': speaker_id, 'chapter_id': chapter_id, 'file': audio_file, 'text': transcript})
if (audio_data and (len(audio_data) == len(transcripts))):
for transcript in transcripts:
audio = {'path': transcript['file'], 'bytes': audio_data[transcript['id']]}
(yield (key, {'audio': audio, **transcript}))
key += 1
audio_data = {}
transcripts = [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.