code stringlengths 101 5.91M |
|---|
def parse_path_value(next):
token = next()
value = token[0]
if value:
if ((value[:1] == "'") or (value[:1] == '"')):
return value[1:(- 1)]
try:
return int(value)
except ValueError:
pass
elif token[1].isdigit():
return int(token[1])
else:
name = token[1].lower()
if (name == 'true'):
return True
elif (name == 'false'):
return False
raise ValueError(("Invalid attribute predicate: '%s'" % value)) |
def start_of_bilou_slot(tags, i):
if (i == 0):
return (tags[i] != OUTSIDE)
if (tags[i] == OUTSIDE):
return False
if tags[i].startswith(BEGINNING_PREFIX):
return True
if tags[i].startswith(UNIT_PREFIX):
return True
if tags[(i - 1)].startswith(UNIT_PREFIX):
return True
if tags[(i - 1)].startswith(LAST_PREFIX):
return True
if (tags[(i - 1)] != OUTSIDE):
return False
return True |
def gdb_function_value_to_unicode(function):
(function)
def wrapper(self, string, *args, **kwargs):
if isinstance(string, gdb.Value):
string = string.string()
return function(self, string, *args, **kwargs)
return wrapper |
def make_read_B(sdfg, state, vtype):
dtype = vtype.base_type
mem_veclen = (64 // dtype.bytes)
mtype = dace.vector(dtype, mem_veclen)
(entry, exit) = state.add_map('read_B', {'n0': '0:N//TN', 'm0': '0:M//TM', 'k': '0:K', 'm1': f'0:TM//{mem_veclen}'}, schedule=dace.ScheduleType.FPGA_Device)
mem = state.add_read('B_device')
to_feeder = state.add_write('B_to_feeder')
tasklet = state.add_tasklet('read_B', {'from_memory'}, {'to_feeder'}, 'to_feeder = from_memory')
state.add_memlet_path(mem, entry, tasklet, dst_conn='from_memory', memlet=dace.Memlet(f'B_device[k, m0 * (TM//{mem_veclen}) + m1]'))
if (mem_veclen > vtype.veclen):
sdfg.add_stream('B_to_converter', dtype=mtype, buffer_size=MINIMUM_CHANNEL_DEPTH, storage=dace.StorageType.FPGA_Local, transient=True)
to_converter_write = state.add_write('B_to_converter')
state.add_memlet_path(tasklet, exit, to_converter_write, src_conn='to_feeder', memlet=dace.Memlet('B_to_converter[0]'))
to_converter_read = state.add_read('B_to_converter')
gearbox = Gearbox(f'(N//TN) * (M//TM) * K * (TM//{mem_veclen})', 'convert_B', dace.ScheduleType.FPGA_Device)
state.add_memlet_path(to_converter_read, gearbox, dst_conn='from_memory', memlet=dace.Memlet(f'B_to_converter[0]', dynamic=True))
state.add_memlet_path(gearbox, to_feeder, src_conn='to_feeder', memlet=dace.Memlet('B_to_feeder[0]', dynamic=True))
else:
state.add_memlet_path(tasklet, exit, to_feeder, src_conn='to_feeder', memlet=dace.Memlet(f'B_to_feeder[0]')) |
def dot_distance_between_points(unit_vector, point, reference_point):
return np.dot(unit_vector, (np.array(point) - np.array(reference_point))) |
def unfreeze_weights(*models):
for model in models:
for k in model.parameters():
k.requires_grad = True |
def getWeight(shape, name=''):
with tf.variable_scope('weights'):
initializer = tf.contrib.layers.xavier_initializer()
W = tf.get_variable(('weight' + name), shape=shape, initializer=initializer)
return W |
def test_case70():
url = (brokerIp + '/ngsi-ld/v1/entityOperations/upsert')
headers = {'Content-Type': 'application/json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata60), headers=headers)
print(r.content)
url = (brokerIp + '/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A0101')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json'}
r = requests.get(url, headers=headers)
print(r.content)
url = (brokerIp + '/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A9090')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json'}
r = requests.get(url, headers=headers)
print(r.content)
assert (r.status_code == 200) |
.skip
def test_tensordot_22():
(device=dace.dtypes.DeviceType.GPU)
def tensordot_2a(A: dace.float32[(3, 3, 3, 3, 3, 3)], B: dace.float32[(3, 3, 3, 3, 3, 3)]):
return np.tensordot(A, B, axes=([0, 3], [4, 2]), out_axes=[7, 6, 5, 4, 3, 2, 1, 0])
A = np.arange((3 ** 6), dtype=np.float32).reshape(3, 3, 3, 3, 3, 3)
B = np.arange((3 ** 6), dtype=np.float32).reshape(3, 3, 3, 3, 3, 3)
ref = np.transpose(np.tensordot(A, B, axes=([0, 3], [4, 2])), axes=[7, 6, 5, 4, 3, 2, 1, 0])
with dace.config.set_temporary('library', 'linalg', 'default_implementation', value='cuTENSOR'):
assert np.allclose(tensordot_2a(A.copy(), B.copy()), ref)
(device=dace.dtypes.DeviceType.GPU)
def tensordot_2b(A: dace.float32[(3, 3, 3, 3, 3, 3)], B: dace.float32[(3, 3, 3, 3, 3, 3)]):
return np.tensordot(A, B, axes=([0, 3], [4, 2]), out_axes=[0, 7, 1, 6, 2, 5, 3, 4])
A = np.arange((3 ** 6), dtype=np.float32).reshape(3, 3, 3, 3, 3, 3)
B = np.arange((3 ** 6), dtype=np.float32).reshape(3, 3, 3, 3, 3, 3)
ref = np.transpose(np.tensordot(A, B, axes=([0, 3], [4, 2])), axes=[0, 7, 1, 6, 2, 5, 3, 4])
with dace.config.set_temporary('library', 'linalg', 'default_implementation', value='cuTENSOR'):
assert np.allclose(tensordot_2b(A.copy(), B.copy()), ref) |
def pythran_type(Ty, ptype='ndarray'):
if Ty.is_buffer:
(ndim, dtype) = (Ty.ndim, Ty.dtype)
if isinstance(dtype, CStructOrUnionType):
ctype = dtype.cname
elif isinstance(dtype, CType):
ctype = dtype.sign_and_name()
elif isinstance(dtype, CTypedefType):
ctype = dtype.typedef_cname
else:
raise ValueError(('unsupported type %s!' % dtype))
if pythran_is_pre_0_9:
return ('pythonic::types::%s<%s,%d>' % (ptype, ctype, ndim))
else:
return ('pythonic::types::%s<%s,pythonic::types::pshape<%s>>' % (ptype, ctype, ','.join((('long',) * ndim))))
if Ty.is_pythran_expr:
return Ty.pythran_type
if Ty.is_numeric:
return Ty.sign_and_name()
raise ValueError(('unsupported pythran type %s (%s)' % (Ty, type(Ty)))) |
def reset_config(cfg, args):
if args.root:
cfg.data.root = args.root
if args.sources:
cfg.data.sources = args.sources
if args.targets:
cfg.data.targets = args.targets
if args.transforms:
cfg.data.transforms = args.transforms |
_module()
class CBDNet(BaseNet):
def __init__(self, io_channels=3, estimate_channels=32, nlevel_denoise=3, nf_base_denoise=64, nf_gr_denoise=2, nl_base_denoise=1, nl_gr_denoise=2, down_denoise='avepool2d', up_denoise='transpose2d', reduce_denoise='add'):
super().__init__()
estimate_list = nn.ModuleList([nn.Conv2d(in_channels=io_channels, out_channels=estimate_channels, kernel_size=3, padding=(3 // 2)), nn.ReLU(inplace=True)])
for _ in range(3):
estimate_list += nn.ModuleList([nn.Conv2d(in_channels=estimate_channels, out_channels=estimate_channels, kernel_size=3, padding=(3 // 2)), nn.ReLU(inplace=True)])
estimate_list += nn.ModuleList([nn.Conv2d(estimate_channels, io_channels, 3, padding=(3 // 2)), nn.ReLU(inplace=True)])
self.estimate = nn.Sequential(*estimate_list)
self.denoise = UNet(nf_in=(io_channels * 2), nf_out=io_channels, nlevel=nlevel_denoise, nf_base=nf_base_denoise, nf_gr=nf_gr_denoise, nl_base=nl_base_denoise, nl_gr=nl_gr_denoise, down=down_denoise, up=up_denoise, reduce=reduce_denoise, residual=False)
def forward(self, x):
estimated_noise_map = self.estimate(x)
res = self.denoise(torch.cat([x, estimated_noise_map], dim=1))
out = (res + x)
return out |
def middle_sqrt(Y: dace.float32[(3, 3)]):
intermediate = dace.define_local([3, 3], dace.float32)
W = dace.define_local([3, 3], dace.float32)
intermediate[:] = dace.elementwise((lambda x: sqrt(x)), Y)
inner_sdfg(intermediate, W)
Z = np.sum(W)
return Z |
.parametrize('disable', [True, False])
def test_multi(disable):
split = InvertibleModuleWrapper(SplitChannels(2), disable=disable)
concat = InvertibleModuleWrapper(ConcatenateChannels(2), disable=disable)
assert is_invertible_module(split, test_input_shape=(1, 3, 32, 32))
assert is_invertible_module(concat, test_input_shape=((1, 2, 32, 32), (1, 1, 32, 32)))
conv_a = torch.nn.Conv2d(2, 2, 3)
conv_b = torch.nn.Conv2d(1, 1, 3)
x = torch.rand(1, 3, 32, 32)
x.requires_grad = True
(a, b) = split(x)
(a, b) = (conv_a(a), conv_b(b))
y = concat(a, b)
loss = torch.sum(y)
loss.backward() |
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial) |
def _Psi_coeff(l1, l2, p1, p2, m1, m2, r1, r2):
return (((((binom(l1, m1) * binom(l2, m2)) * falling_factorial((((- 2) * r1) - (p2 / 2)), m2)) * falling_factorial(((- p1) / 2), m1)) * calB((l1 - m1), r1, 2)) * calB((l2 - m2), r2, (- 2))) |
def load_audio(path: (str or Path), ch_format: str, sample_rate: int=None, downmix_to_mono: bool=False, resample_by: str='ffmpeg', **kwargs) -> Tuple[(np.ndarray, int)]:
if (ch_format not in (STR_CH_FIRST, STR_CH_LAST)):
raise ValueError(f'ch_format is wrong here -> {ch_format}')
if (os.stat(path).st_size > 8000):
if (resample_by == 'librosa'):
(src, sr) = _resample_load_librosa(path, sample_rate, downmix_to_mono, **kwargs)
elif (resample_by == 'ffmpeg'):
(src, sr) = _resample_load_ffmpeg(path, sample_rate, downmix_to_mono)
else:
raise NotImplementedError(f'resample_by: "{resample_by}" is not supposred yet')
else:
raise ValueError('Given audio is too short!')
return (src, sr) |
def processInputStreamData(obj):
print('receive context entity')
entityId = obj['entityId']
if (entityId['type'] == 'Camera'):
getCameraURL(obj)
elif (entityId['type'] == 'Pushbutton'):
handlePushButton(obj) |
def vit_b_16_c100():
out_base_name = 'ViT_B_16_norm'
out_dir = 'results/figs'
plot_fn = plot.plot_grad_norm
fn_to_contour = {'results/vit/cifar100/fast_dcgn_global_no_nesterov_meanstd05_vit_base_patch16_384_in21k_imagenet_384c384_8p_bw12_gpipe_acyclic_cifar100_384_gpipe_bs_512_se_16_seed_42.json': 'global'}
gen_plot_from_dict(fn_to_contour, plot_fn, out_base_name, out_dir=out_dir) |
('sdv.datasets.demo._get_data_from_bucket')
def test__download(mock_get_data_from_bucket):
mock_get_data_from_bucket.return_value = b''
_download('single_table', 'ring')
mock_get_data_from_bucket.assert_called_once_with('SINGLE_TABLE/ring.zip') |
class QuantEmbedding(nn.Module):
def __init__(self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, weight_bit=8, momentum=0.95, quant_mode=False):
super().__init__()
self.num_ = num_embeddings
self.dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))
self.register_buffer('weight_scaling_factor', torch.zeros(1))
self.register_buffer('weight_integer', torch.zeros_like(self.weight))
self.weight_bit = weight_bit
self.momentum = momentum
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def forward(self, x, positions=None, incremental_state=None):
if (not self.quant_mode):
return (F.embedding(x, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse), None)
w = self.weight
w_transform = w.data.detach()
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.weight_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, False)
self.weight_integer = self.weight_function(self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor)
emb_int = F.embedding(x, self.weight_integer, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse)
return ((emb_int * self.weight_scaling_factor), self.weight_scaling_factor) |
def setup_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--bert_model', default=None, type=str, required=True, help='Bert pre-trained model selected in the list: bert-base-uncased, bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, bert-base-multilingual-cased, bert-base-chinese, biobert.')
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train.')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_test', action='store_true', help='Whether to run eval on the test set.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--train_batch_size', default=32, type=int, help='Total batch size for training.')
parser.add_argument('--eval_batch_size', default=8, type=int, help='Total batch size for eval.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit float precision instead of 32-bit')
parser.add_argument('--loss_scale', type=float, default=0, help='Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n0 (default value): dynamic loss scaling.\nPositive power of 2: static loss scaling value.\n')
parser.add_argument('--server_ip', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--model_loc', type=str, default='', help='Specify the location of the bio or clinical bert model')
return parser |
_level_function()
def validity_error(array, *, exception=False):
(yield (array,))
return _impl(array, exception) |
def fuse_sg(module):
sdfg = module.sdfg
sdfg.apply_transformations_repeated(TrivialMapRangeElimination)
SubgraphFusion.apply_to(sdfg, *sdfg.node(0).nodes()) |
class DBPedia(Task):
def __init__(self):
super().__init__()
self.class_number = 14
self.file_by_split = dict(train='dbpedia_csv/train.train.csv', val='dbpedia_csv/train.dev.csv', test='dbpedia_csv/test.csv')
self.max_length = 400
def read_data(path, max_length):
def label_fn(x):
return (x - 1)
rows = pd.read_csv(path, sep=',', error_bad_lines=False, header=None, skiprows=None, quoting=0, keep_default_na=False, encoding='utf-8')
label_fn = (label_fn if (label_fn is not None) else (lambda x: x))
labels = rows[0].apply((lambda x: label_fn(x)))
sentences = rows[2]
sentences = sentences.apply((lambda x: clean_tokenize_truncate(x, max_length)))
return (sentences.tolist(), labels.tolist()) |
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--output', type=str, default='results.pt')
parser.add_argument('--acqf', type=str, default='cucb')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--fn', type=str, default='sinc')
return parser.parse_args() |
def get_graph_matrix(edge2idx, objects, relations):
triples = []
for (cat, ships) in relations.items():
for (i, js) in enumerate(ships):
for j in js:
triples.append((i, edge2idx[cat], j))
rel_count = {}
for i in range(len(triples)):
pair = (triples[i][0], triples[i][2])
rel_count[pair] = (rel_count.get(pair, 0) + 1)
num_rel = max(list(rel_count.values()))
n = len(objects)
edge_M = np.zeros((n, n, num_rel))
rel_count = {k: 0 for k in rel_count}
for i in range(len(triples)):
(a, b) = (triples[i][0], triples[i][2])
edge_M[a][b][rel_count[(a, b)]] = triples[i][1]
rel_count[(a, b)] += 1
return edge_M |
_BOX_HEADS.register('roi_xconv1fc_head')
class roi_xconv1fc_head(nn.Module):
def __init__(self, dim_in, spatial_scale):
super().__init__()
self.dim_in = dim_in[(- 1)]
method = cfg.FAST_RCNN.ROI_XFORM_METHOD
resolution = cfg.FAST_RCNN.ROI_XFORM_RESOLUTION
sampling_ratio = cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO
pooler = Pooler(method=method, output_size=resolution, scales=spatial_scale, sampling_ratio=sampling_ratio)
self.pooler = pooler
use_lite = cfg.FAST_RCNN.CONVFC_HEAD.USE_LITE
use_bn = cfg.FAST_RCNN.CONVFC_HEAD.USE_BN
use_gn = cfg.FAST_RCNN.CONVFC_HEAD.USE_GN
conv_dim = cfg.FAST_RCNN.CONVFC_HEAD.CONV_DIM
num_stacked_convs = cfg.FAST_RCNN.CONVFC_HEAD.NUM_STACKED_CONVS
dilation = cfg.FAST_RCNN.CONVFC_HEAD.DILATION
xconvs = []
for ix in range(num_stacked_convs):
xconvs.append(make_conv(self.dim_in, conv_dim, kernel=3, stride=1, dilation=dilation, use_dwconv=use_lite, use_bn=use_bn, use_gn=use_gn, suffix_1x1=use_lite, use_relu=True))
self.dim_in = conv_dim
self.add_module('xconvs', nn.Sequential(*xconvs))
input_size = ((self.dim_in * resolution[0]) * resolution[1])
mlp_dim = cfg.FAST_RCNN.CONVFC_HEAD.MLP_DIM
self.fc6 = make_fc(input_size, mlp_dim, use_bn=False, use_gn=False)
self.dim_out = mlp_dim
if cfg.FAST_RCNN.CONVFC_HEAD.USE_WS:
self = convert_conv2convws_model(self)
def forward(self, x, proposals):
x = self.pooler(x, proposals)
x = self.xconvs(x)
x = x.view(x.size(0), (- 1))
x = F.relu(self.fc6(x), inplace=True)
return x |
def make_vecAdd_sdfg(sdfg_name: str, dtype=dace.float32):
n = dace.symbol('size')
vecAdd_sdfg = dace.SDFG(sdfg_name)
vecAdd_state = vecAdd_sdfg.add_state('vecAdd_nested')
x_name = 'x'
y_name = 'y'
z_name = 'z'
vecAdd_sdfg.add_array(x_name, [n], dtype=dtype)
vecAdd_sdfg.add_array(y_name, [n], dtype=dtype)
vecAdd_sdfg.add_array(z_name, [n], dtype=dtype)
x_in = vecAdd_state.add_read(x_name)
y_in = vecAdd_state.add_read(y_name)
z_out = vecAdd_state.add_write(z_name)
(vecMap_entry, vecMap_exit) = vecAdd_state.add_map('vecAdd_map', dict(i='0:{}'.format(n)))
vecAdd_tasklet = vecAdd_state.add_tasklet('vecAdd_task', ['x_con', 'y_con'], ['z_con'], 'z_con = x_con + y_con')
vecAdd_state.add_memlet_path(x_in, vecMap_entry, vecAdd_tasklet, dst_conn='x_con', memlet=dace.Memlet.simple(x_in.data, 'i'))
vecAdd_state.add_memlet_path(y_in, vecMap_entry, vecAdd_tasklet, dst_conn='y_con', memlet=dace.Memlet.simple(y_in.data, 'i'))
vecAdd_state.add_memlet_path(vecAdd_tasklet, vecMap_exit, z_out, src_conn='z_con', memlet=dace.Memlet.simple(z_out.data, 'i'))
return vecAdd_sdfg |
class TruncateDataset(BaseWrapperDataset):
def __init__(self, dataset, truncation_length):
super().__init__(dataset)
assert (truncation_length is not None)
self.truncation_length = truncation_length
self.dataset = dataset
def __getitem__(self, index):
item = self.dataset[index]
item_len = item.size(0)
if (item_len > self.truncation_length):
item = item[:self.truncation_length]
return item
def sizes(self):
return np.minimum(self.dataset.sizes, self.truncation_length)
def __len__(self):
return len(self.dataset) |
class SegmentationAwareScore(EvaluatorScore):
def __init__(self, weights_path):
super().__init__()
self.segm_network = SegmentationModule(weights_path=weights_path, use_default_normalization=True).eval()
self.target_class_freq_by_image_total = []
self.target_class_freq_by_image_mask = []
self.pred_class_freq_by_image_mask = []
def forward(self, pred_batch, target_batch, mask):
pred_segm_flat = self.segm_network.predict(pred_batch)[0].view(pred_batch.shape[0], (- 1)).long().detach().cpu().numpy()
target_segm_flat = self.segm_network.predict(target_batch)[0].view(pred_batch.shape[0], (- 1)).long().detach().cpu().numpy()
mask_flat = (mask.view(mask.shape[0], (- 1)) > 0.5).detach().cpu().numpy()
batch_target_class_freq_total = []
batch_target_class_freq_mask = []
batch_pred_class_freq_mask = []
for (cur_pred_segm, cur_target_segm, cur_mask) in zip(pred_segm_flat, target_segm_flat, mask_flat):
cur_target_class_freq_total = np.bincount(cur_target_segm, minlength=NUM_CLASS)[(None, ...)]
cur_target_class_freq_mask = np.bincount(cur_target_segm[cur_mask], minlength=NUM_CLASS)[(None, ...)]
cur_pred_class_freq_mask = np.bincount(cur_pred_segm[cur_mask], minlength=NUM_CLASS)[(None, ...)]
self.target_class_freq_by_image_total.append(cur_target_class_freq_total)
self.target_class_freq_by_image_mask.append(cur_target_class_freq_mask)
self.pred_class_freq_by_image_mask.append(cur_pred_class_freq_mask)
batch_target_class_freq_total.append(cur_target_class_freq_total)
batch_target_class_freq_mask.append(cur_target_class_freq_mask)
batch_pred_class_freq_mask.append(cur_pred_class_freq_mask)
batch_target_class_freq_total = np.concatenate(batch_target_class_freq_total, axis=0)
batch_target_class_freq_mask = np.concatenate(batch_target_class_freq_mask, axis=0)
batch_pred_class_freq_mask = np.concatenate(batch_pred_class_freq_mask, axis=0)
return (batch_target_class_freq_total, batch_target_class_freq_mask, batch_pred_class_freq_mask)
def reset(self):
super().reset()
self.target_class_freq_by_image_total = []
self.target_class_freq_by_image_mask = []
self.pred_class_freq_by_image_mask = [] |
class PeriodicWriter(HookBase):
def __init__(self, writers, period=20):
self._writers = writers
for w in writers:
assert isinstance(w, EventWriter), w
self._period = period
def after_step(self):
if ((((self.trainer.iter + 1) % self._period) == 0) or (self.trainer.iter == (self.trainer.max_iter - 1))):
for writer in self._writers:
writer.write()
def after_train(self):
for writer in self._writers:
writer.close() |
class YT8MMusicTextClipsJsonifier(DatasetJsonifier):
def load_raw_data(self):
assert (self.split in ('train', 'test', 'all'))
if (self.split == 'all'):
train_df = pd.read_csv(os.path.join(self.input_dir, 'train.csv'))
test_df = pd.read_csv(os.path.join(self.input_dir, 'test.csv'))
df = pd.concat((train_df, test_df))
elif (self.split == 'train'):
df = pd.read_csv(os.path.join(self.input_dir, 'train.csv'))
elif (self.split == 'test'):
df = pd.read_csv(os.path.join(self.input_dir, 'test.csv'))
self.data = df.to_dict('records') |
()
def stopping_condition() -> StoppingCondition:
return MinimumCoveragePlateauStoppingCondition(50, 1) |
_model
def poolformer_m48(pretrained=False, **kwargs):
layers = [8, 8, 24, 8]
embed_dims = [96, 192, 384, 768]
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, True, True, True]
model = PoolFormer(layers, embed_dims=embed_dims, mlp_ratios=mlp_ratios, downsamples=downsamples, layer_scale_init_value=1e-06, **kwargs)
model.default_cfg = default_cfgs['poolformer_m']
if pretrained:
url = model_urls['poolformer_m48']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint)
return model |
def remove_files_in_dir(dir):
if (not osp.isdir(dir)):
return
for file in os.listdir(dir):
file_path = os.path.join(dir, file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e) |
def ClawGraph():
edge_list = [(0, 1), (0, 2), (0, 3)]
pos_dict = {0: (0, 1), 1: ((- 1), 0), 2: (0, 0), 3: (1, 0)}
return Graph(edge_list, pos=pos_dict, name='Claw graph') |
class LEDTokenizer(BartTokenizer):
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES |
_experiment(snapshot_mode='last')
def her_ddpg_fetchreach(ctxt=None, seed=1):
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = GarageEnv(gym.make('FetchReach-v1'))
policy = ContinuousMLPPolicy(env_spec=env.spec, name='Policy', hidden_sizes=[256, 256, 256], hidden_nonlinearity=tf.nn.relu, output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec, policy, sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec, name='QFunction', hidden_sizes=[256, 256, 256], hidden_nonlinearity=tf.nn.relu)
replay_buffer = HERReplayBuffer(capacity_in_transitions=int(1000000.0), replay_k=4, reward_fn=env.compute_reward, env_spec=env.spec)
ddpg = DDPG(env_spec=env.spec, policy=policy, policy_lr=0.001, qf_lr=0.001, qf=qf, replay_buffer=replay_buffer, target_update_tau=0.01, steps_per_epoch=50, max_path_length=250, n_train_steps=40, discount=0.95, exploration_policy=exploration_policy, policy_optimizer=tf.compat.v1.train.AdamOptimizer, qf_optimizer=tf.compat.v1.train.AdamOptimizer, buffer_batch_size=256)
runner.setup(algo=ddpg, env=env)
runner.train(n_epochs=50, batch_size=256) |
def update_v(critic: Model, value: Model, batch: Batch, alpha: float, alg: str) -> Tuple[(Model, InfoDict)]:
(q1, q2) = critic(batch.observations, batch.actions)
q = jnp.minimum(q1, q2)
def value_loss_fn(value_params: Params) -> Tuple[(jnp.ndarray, InfoDict)]:
v = value.apply({'params': value_params}, batch.observations)
if (alg == 'SQL'):
sp_term = (((q - v) / (2 * alpha)) + 1.0)
sp_weight = jnp.where((sp_term > 0), 1.0, 0.0)
value_loss = ((sp_weight * (sp_term ** 2)) + (v / alpha)).mean()
elif (alg == 'EQL'):
sp_term = ((q - v) / alpha)
sp_term = jnp.minimum(sp_term, 5.0)
max_sp_term = jnp.max(sp_term, axis=0)
max_sp_term = jnp.where((max_sp_term < (- 1.0)), (- 1.0), max_sp_term)
max_sp_term = jax.lax.stop_gradient(max_sp_term)
value_loss = (jnp.exp((sp_term - max_sp_term)) + ((jnp.exp((- max_sp_term)) * v) / alpha)).mean()
else:
raise NotImplementedError('please choose SQL or EQL')
return (value_loss, {'value_loss': value_loss, 'v': v.mean(), 'q-v': (q - v).mean()})
(new_value, info) = value.apply_gradient(value_loss_fn)
return (new_value, info) |
def traverse_depthfirst(finaltree):
if (len(finaltree) == 1):
return (finaltree['id'], '')
strdepthfirst = ''
for child in finaltree['children']:
(child_id, child_shape) = traverse_depthfirst(child)
strdepthfirst += (((finaltree['id'] + '|') + child_id) + ' ')
if (len(child_shape) != 0):
strdepthfirst += child_shape
return (finaltree['id'], strdepthfirst) |
def GenerateSM80_TensorOp_884(manifest, cuda_version):
if (not CudaToolkitVersionSatisfies(cuda_version, 11, 0)):
return
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor)]
math_inst = MathInstruction([8, 8, 4], DataType.f64, DataType.f64, DataType.f64, OpcodeClass.TensorOp, MathOperation.multiply_add)
min_cc = 80
max_cc = 1024
max_cc_smem_limited = 80
alignment_constraints = [1]
tile_descriptions = [TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([256, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([256, 32, 16], 3, [4, 1, 1], math_inst, min_cc, max_cc), TileDescription([32, 256, 16], 3, [1, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc), TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc)]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints) |
class TestStartFixer(unittest.TestCase):
def test_init(self):
contigs_fa = os.path.join(data_dir, 'start_fixer_init_contigs.fa')
dna_da = os.path.join(data_dir, 'start_fixer_init_dnaA.fa')
with self.assertRaises(start_fixer.Error):
sfixer = start_fixer.StartFixer('notafile', 'outprefix')
sfixer = start_fixer.StartFixer(contigs_fa, 'outprefix')
expected_contigs = {'contig1': pyfastaq.sequences.Fasta('contig1', 'ACGT'), 'contig2': pyfastaq.sequences.Fasta('contig2', 'AAAA')}
self.assertEqual(expected_contigs, sfixer.input_assembly)
self.assertEqual(set(), sfixer.ignore)
with self.assertRaises(start_fixer.Error):
sfixer = start_fixer.StartFixer(contigs_fa, 'outprefix', genes_fa='notafile')
ignore_file = os.path.join(data_dir, 'start_fixer_init_ignore_ids')
sfixer = start_fixer.StartFixer(contigs_fa, 'outprefix', ignore=ignore_file)
self.assertEqual({'ignore_me1', 'ignore_me2'}, sfixer.ignore)
def test_rename_contigs(self):
contigs_in = {'ctg1': pyfastaq.sequences.Fasta('ctg1', 'ACGT'), 'ctg2 foo bar': pyfastaq.sequences.Fasta('ctg2 foo bar', 'AAA')}
expected_contigs = {'ctg1': pyfastaq.sequences.Fasta('ctg1', 'ACGT'), 'ctg2': pyfastaq.sequences.Fasta('ctg2', 'AAA')}
expected_names = {'ctg1': 'ctg1', 'ctg2': 'ctg2 foo bar'}
(got_contigs, got_names) = start_fixer.StartFixer._rename_contigs(contigs_in)
self.assertEqual(expected_names, got_names)
self.assertEqual(expected_contigs, got_contigs)
contigs_in['ctg2 abc'] = pyfastaq.sequences.Fasta('ctg2 abc', 'AAA')
with self.assertRaises(start_fixer.Error):
start_fixer.StartFixer._rename_contigs(contigs_in)
def test_write_renamed_contigs(self):
contigs_dict = {'ctg1': pyfastaq.sequences.Fasta('ctg1', 'ACGT'), 'ctg2': pyfastaq.sequences.Fasta('ctg2', 'AAA')}
rename_dict = {'ctg1': 'ctg1', 'ctg2': 'ctg2 foo bar'}
tmp_out = 'tmp.test_write_renamed_contigs.fa'
start_fixer.StartFixer._write_renamed_contigs(contigs_dict, rename_dict, tmp_out)
expected = os.path.join(data_dir, 'start_fixer_write_renamed_contigs.fa')
self.assertTrue(filecmp.cmp(expected, tmp_out, shallow=False))
os.unlink(tmp_out)
def test_max_length_from_fasta_file(self):
infile = os.path.join(data_dir, 'start_fixer_max_length_from_fasta_file.fa')
self.assertEqual(11, start_fixer.StartFixer._max_length_from_fasta_file(infile))
def test_write_fasta_plus_circularized_ends(self):
infile = os.path.join(data_dir, 'start_fixer_write_fasta_plus_circularized_ends.in.fa')
expected = os.path.join(data_dir, 'start_fixer_write_fasta_plus_circularized_ends.out.fa')
expected_ignore = os.path.join(data_dir, 'start_fixer_write_fasta_plus_circularized_ends.out.ignore.fa')
tmp_out = 'tmp.test_write_fasta_plus_circularized_ends.fa'
contigs = {}
pyfastaq.tasks.file_to_dict(infile, contigs)
got = start_fixer.StartFixer._write_fasta_plus_circularized_ends(contigs, tmp_out, 5)
self.assertEqual(6, got)
self.assertTrue(filecmp.cmp(expected, tmp_out, shallow=False))
got = start_fixer.StartFixer._write_fasta_plus_circularized_ends(contigs, tmp_out, 5, ignore={'seq1', 'seq4'})
self.assertEqual(3, got)
self.assertTrue(filecmp.cmp(expected_ignore, tmp_out, shallow=False))
os.unlink(tmp_out)
got = start_fixer.StartFixer._write_fasta_plus_circularized_ends(contigs, tmp_out, 5, ignore={'seq1', 'seq2', 'seq3', 'seq4'})
self.assertEqual(0, got)
self.assertFalse(os.path.exists(tmp_out))
def test_find_circular_using_promer(self):
contigs_infile = os.path.join(data_dir, 'start_fixer_find_circular_using_promer.contigs.fa')
ref_genes_fa = os.path.join(data_dir, 'start_fixer_find_circular_using_promer.refs.fa')
tmp_outprefix = 'tmp.start_fixer_find_circular_using_promer'
contigs_dict = {}
pyfastaq.tasks.file_to_dict(contigs_infile, contigs_dict)
end_extend = start_fixer.StartFixer._max_length_from_fasta_file(ref_genes_fa)
expected = {'ctg_DNAA_ECOLI': pymummer.alignment.Alignment('1021\t2424\t1\t1404\t1404\t1404\t100.00\t.\t.\t3404\t1404\t1\t1\tctg_DNAA_ECOLI\tsp|P03004|DNAA_ECOLI\n'), 'ctg_DNAA1_CHLPN': pymummer.alignment.Alignment('685\t2067\t1\t1383\t1383\t1383\t100.00\t.\t.\t2808\t1383\t1\t1\tctg_DNAA1_CHLPN__ends\tsp|Q9Z8M9|DNAA1_CHLPN\n'), 'ctg_DNAA_ECOLI_2': pymummer.alignment.Alignment('901\t2304\t1\t1404\t1404\t1404\t100.00\t.\t.\t3044\t1404\t1\t1\tctg_DNAA_ECOLI_2\tsp|P03004|DNAA_ECOLI_2\n')}
got = start_fixer.StartFixer._find_circular_using_promer(tmp_outprefix, ref_genes_fa, contigs_dict, 70, end_extend, sys.stdout)
self.assertEqual(expected, got)
os.unlink((tmp_outprefix + '.contigs_with_ends.fa'))
os.unlink((tmp_outprefix + '.promer'))
got = start_fixer.StartFixer._find_circular_using_promer(tmp_outprefix, ref_genes_fa, contigs_dict, 70, end_extend, sys.stdout, ignore={x for x in contigs_dict})
self.assertEqual({}, got)
def test_find_circular_using_prodigal(self):
contigs_infile = os.path.join(data_dir, 'start_fixer_find_circular_using_prodigal.ctgs.fa')
outprefix = 'tmp.test_find_circular_using_prodigal'
contigs_dict = {}
pyfastaq.tasks.file_to_dict(contigs_infile, contigs_dict)
circular_from_promer = {'ctg3': 'foo'}
got = start_fixer.StartFixer._find_circular_using_prodigal(outprefix, contigs_dict, circular_from_promer, sys.stdout)
self.assertEqual({'ctg1'}, set(got.keys()))
got_fields = got['ctg1'].split('\t')
self.assertEqual(got_fields[0], 'ctg1')
self.assertTrue(got_fields[1].startswith('Prodigal'))
os.unlink((outprefix + '.for_prodigal.fa'))
os.unlink((outprefix + '.prodigal.gff'))
got = start_fixer.StartFixer._find_circular_using_prodigal(outprefix, contigs_dict, circular_from_promer, sys.stdout, ignore={'ctg1', 'ctg2'})
self.assertEqual({}, got)
def test_rearrange_contigs(self):
contigs_infile = os.path.join(data_dir, 'start_fixer_rearrange_contigs.in.fa')
ref_genes_fa = os.path.join(data_dir, 'start_fixer_rearrange_contigs.refs.fa')
tmp_outprefix = 'tmp.test_rearrange_contigs'
tmp_log = (tmp_outprefix + '.log')
contigs_dict = {}
pyfastaq.tasks.file_to_dict(contigs_infile, contigs_dict)
to_ignore = {'ignore_ctg'}
end_extend = start_fixer.StartFixer._max_length_from_fasta_file(ref_genes_fa)
circ_with_promer = start_fixer.StartFixer._find_circular_using_promer(tmp_outprefix, ref_genes_fa, contigs_dict, 70, end_extend, sys.stdout, ignore=to_ignore)
circ_with_prodigal = start_fixer.StartFixer._find_circular_using_prodigal(tmp_outprefix, contigs_dict, circ_with_promer, sys.stdout, ignore=to_ignore)
start_fixer.StartFixer._rearrange_contigs(contigs_dict, circ_with_promer, circ_with_prodigal, to_ignore, end_extend, tmp_log)
expected_log = os.path.join(data_dir, 'start_fixer_rearrange_contigs.expect.log')
self.assertTrue(filecmp.cmp(expected_log, tmp_log, shallow=False))
expected_dict = {}
expected_fa = os.path.join(data_dir, 'start_fixer_rearrange_contigs.expect.fa')
pyfastaq.tasks.file_to_dict(expected_fa, expected_dict)
self.assertEqual(expected_dict, contigs_dict)
os.unlink((tmp_outprefix + '.contigs_with_ends.fa'))
os.unlink((tmp_outprefix + '.for_prodigal.fa'))
os.unlink((tmp_outprefix + '.prodigal.gff'))
os.unlink((tmp_outprefix + '.promer'))
os.unlink(tmp_log)
def test_run_ignore_all(self):
tmp_prefix = 'tmp.start_fixer.test_run_when_ignoring_all'
input_fa = os.path.join(data_dir, 'start_fixer_run_ignore_all.fa')
to_ignore = os.path.join(data_dir, 'start_fixer_run_ignore_all.to_ignore')
sfixer = start_fixer.StartFixer(input_fa, tmp_prefix, ignore=to_ignore)
sfixer.run()
expected_fa = os.path.join(data_dir, 'start_fixer_run_ignore_all.expect.fa')
self.assertTrue(filecmp.cmp(expected_fa, (tmp_prefix + '.fasta')))
for suffix in ['detailed.log', 'fasta', 'log', 'promer.contigs_with_ends.fa', 'promer.promer']:
try:
os.unlink(((tmp_prefix + '.') + suffix))
except:
pass
def test_run_none_for_prodigal(self):
tmp_prefix = 'tmp.start_fixer.test_run_none_for_prodigal'
input_ctg = os.path.join(data_dir, 'start_fixer_run_none_for_prodigal.ctg.fa')
input_ref = os.path.join(data_dir, 'start_fixer_run_none_for_prodigal.ref.fa')
sfixer = start_fixer.StartFixer(input_ctg, tmp_prefix, genes_fa=input_ref)
sfixer.run()
expected_fa = os.path.join(data_dir, 'start_fixer_run_none_for_prodigal.expect.fa')
self.assertTrue(filecmp.cmp(expected_fa, (tmp_prefix + '.fasta')))
for suffix in ['detailed.log', 'fasta', 'log', 'promer.contigs_with_ends.fa', 'promer.promer']:
try:
os.unlink(((tmp_prefix + '.') + suffix))
except:
pass
def test_run_bit_of_everything(self):
tmp_prefix = 'tmp.start_fixer.test_run_bit_of_everything'
input_ctg = os.path.join(data_dir, 'start_fixer_run_bit_of_everything.ctg.fa')
input_ref = os.path.join(data_dir, 'start_fixer_run_bit_of_everything.ref.fa')
input_ignore = os.path.join(data_dir, 'start_fixer_run_bit_of_everything.ignore')
sfixer = start_fixer.StartFixer(input_ctg, tmp_prefix, genes_fa=input_ref, ignore=input_ignore)
sfixer.run()
expected_fa = os.path.join(data_dir, 'start_fixer_run_bit_of_everything.expect.fa')
self.assertTrue(filecmp.cmp(expected_fa, (tmp_prefix + '.fasta')))
for suffix in ['detailed.log', 'fasta', 'log', 'promer.contigs_with_ends.fa', 'promer.promer', 'prodigal.for_prodigal.fa', 'prodigal.prodigal.gff']:
try:
os.unlink(((tmp_prefix + '.') + suffix))
except:
pass |
def no_duplicates(f):
def wrap_remove_duplicates():
policies = f()
return remove_duplicates(policies)
return wrap_remove_duplicates |
def get_extractor(data_set, system):
if ((system == 'closest') or (system == 'latent')):
return 'cort.coreference.approaches.mention_ranking.extract_substructures'
elif (system == 'tree'):
return 'cort.coreference.approaches.antecedent_trees.extract_substructures'
elif (system == 'pair'):
if (data_set == 'train'):
return 'cort.coreference.approaches.mention_pairs.extract_training_substructures'
else:
return 'cort.coreference.approaches.mention_pairs.extract_testing_substructures' |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train.')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--train_batch_size', default=16, type=int, help='Total batch size for training.')
parser.add_argument('--eval_batch_size', default=64, type=int, help='Total batch size for eval.')
parser.add_argument('--learning_rate', default=1e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit float precision instead of 32-bit')
parser.add_argument('--loss_scale', type=float, default=0, help='Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n0 (default value): dynamic loss scaling.\nPositive power of 2: static loss scaling value.\n')
parser.add_argument('--server_ip', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='Can be used for distant debugging.')
args = parser.parse_args()
processors = {'rte': RteProcessor}
output_modes = {'rte': 'classification'}
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info('device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}'.format(device, n_gpu, bool((args.local_rank != (- 1))), args.fp16))
if (args.gradient_accumulation_steps < 1):
raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps))
args.train_batch_size = (args.train_batch_size // args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
if ((not args.do_train) and (not args.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
task_name = args.task_name.lower()
if (task_name not in processors):
raise ValueError(('Task not found: %s' % task_name))
processor = processors[task_name]()
output_mode = output_modes[task_name]
train_examples = load_DocNLI('train', hypo_only=False)
label_list = ['entailment', 'not_entailment']
num_labels = len(label_list)
print('num_labels:', num_labels, 'training size:', len(train_examples))
num_train_optimization_steps = None
num_train_optimization_steps = (int(((len(train_examples) / args.train_batch_size) / args.gradient_accumulation_steps)) * args.num_train_epochs)
if (args.local_rank != (- 1)):
num_train_optimization_steps = (num_train_optimization_steps // torch.distributed.get_world_size())
model = RobertaForSequenceClassification(num_labels)
tokenizer = RobertaTokenizer.from_pretrained(pretrain_model_dir, do_lower_case=args.do_lower_case)
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
max_test_acc = 0.0
max_dev_acc = 0.0
if args.do_train:
train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer, output_mode, cls_token_at_end=False, cls_token=tokenizer.cls_token, cls_token_segment_id=0, sep_token=tokenizer.sep_token, sep_token_extra=True, pad_on_left=False, pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=0)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_examples))
logger.info(' Batch size = %d', args.train_batch_size)
logger.info(' Num steps = %d', num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
iter_co = 0
final_test_performance = 0.0
for epoch_i in trange(int(args.num_train_epochs), desc='Epoch'):
tr_loss = 0
(nb_tr_examples, nb_tr_steps) = (0, 0)
for (step, batch) in enumerate(tqdm(train_dataloader, desc='Iteration')):
model.train()
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, label_ids) = batch
logits = model(input_ids, input_mask)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view((- 1), num_labels), label_ids.view((- 1)))
if (n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
optimizer.step()
optimizer.zero_grad()
global_step += 1
iter_co += 1
'store the model'
model.eval()
model_to_save = (model.module if hasattr(model, 'module') else model)
store_transformers_models(model_to_save, tokenizer, '/export/home/Dataset/BERT_pretrained_mine/paragraph_entail/2021', (('docNLI_Longformer_epoch_' + str(epoch_i)) + '.pt')) |
def computeDialogue(greedy, answer):
examples = []
for (idx, (g, a)) in enumerate(zip(greedy, answer)):
examples.append((a[0][0], g, a[0][1], idx))
examples.sort()
turn_request_positives = 0
turn_goal_positives = 0
joint_goal_positives = 0
ldt = None
for ex in examples:
if ((ldt is None) or (ldt.split('_')[:(- 1)] != ex[0].split('_')[:(- 1)])):
(state, answer_state) = ({}, {})
ldt = ex[0]
delta_state = to_delta_state(ex[1])
answer_delta_state = to_delta_state(ex[2])
state = update_state(state, delta_state['inform'])
answer_state = update_state(answer_state, answer_delta_state['inform'])
if dict_cmp(state, answer_state):
joint_goal_positives += 1
if (delta_state['request'] == answer_delta_state['request']):
turn_request_positives += 1
if dict_cmp(delta_state['inform'], answer_delta_state['inform']):
turn_goal_positives += 1
joint_goal_em = ((joint_goal_positives / len(examples)) * 100)
turn_request_em = ((turn_request_positives / len(examples)) * 100)
turn_goal_em = ((turn_goal_positives / len(examples)) * 100)
answer = [(x[(- 1)], x[(- 2)]) for x in examples]
answer.sort()
answer = [[x[1]] for x in answer]
return (joint_goal_em, turn_request_em, turn_goal_em, answer) |
def make_union():
if os.path.exists('../korean_learner/korean_learner_train.txt'):
cd = '../'
elif os.path.exists('extract_data/korean_learner/korean_learner_train.txt'):
cd = 'extract_data/'
for mode in ['train', 'test', 'val']:
os.system(f'echo > {cd}union/union_{mode}.txt')
os.system(f'echo > {cd}union/union_{mode}.m2')
for data in ['lang8', 'korean_learner', 'native']:
syscommand = f'cat {cd}{data}/{data}_{mode}.txt >> {cd}union/union_{mode}.txt'
os.system(syscommand)
os.system(f'echo >> {cd}union/union_{mode}.txt')
os.system(f'cat {cd}{data}/{data}_{mode}.m2 >> {cd}union/union_{mode}.m2')
for mode in ['train', 'test', 'val']:
os.system(f'cat {cd}/union/union_train.txt {cd}/union/union_test.txt {cd}/union/union_val.txt > {cd}/union/union.txt')
os.system(f'cat {cd}/union/union_train.m2 {cd}/union/union_test.m2 {cd}/union/union_val.m2 > {cd}/union/union.m2')
split_pairs(f'{cd}/union/union.txt')
split_pairs(f'{cd}/union/union_val.txt')
print('make union done') |
def test_render():
env = MetaMazeEnv()
with pytest.raises(NotImplementedError):
env.render() |
def init_embedding(hparams):
f = open('data/vocab_20000', 'r', encoding='utf-8')
vocab = []
for line in f:
vocab.append(line.rstrip('\n'))
word_vectors = KeyedVectors.load_word2vec_format('data/roc_vector.txt')
emb = []
num = 0
for i in range(0, len(vocab)):
word = vocab[i]
if (word in word_vectors):
num += 1
emb.append(word_vectors[word])
else:
emb.append(((0.1 * np.random.random([hparams.emb_dim])) - 0.05).astype(np.float32))
print(' init embedding finished')
emb = np.array(emb)
print(num)
print(emb.shape)
return emb |
class SemistandardSkewTableaux_size(SemistandardSkewTableaux):
def __init__(self, n, max_entry):
self.n = n
if (max_entry is None):
self.max_entry = n
else:
self.max_entry = max_entry
SemistandardSkewTableaux.__init__(self, category=FiniteEnumeratedSets())
def _repr_(self):
return ('Semistandard skew tableaux of size %s and maximum entry %s' % (repr(self.n), repr(self.max_entry)))
def cardinality(self):
count = 0
for p in SkewPartitions(self.n):
count += SemistandardSkewTableaux_shape(p, self.max_entry).cardinality()
return count
def __iter__(self):
for p in SkewPartitions(self.n):
for ssst in SemistandardSkewTableaux_shape(p, self.max_entry):
(yield self.element_class(self, ssst)) |
_tf
class TFDataCollatorIntegrationTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]']
self.vocab_file = os.path.join(self.tmpdirname, 'vocab.txt')
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_default_with_dict(self):
features = [{'label': i, 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors='tf')
self.assertEqual(batch['labels'].numpy().tolist(), list(range(8)))
self.assertEqual(batch['labels'].dtype, tf.int64)
self.assertEqual(batch['inputs'].shape.as_list(), [8, 6])
features = [{'label_ids': [0, 1, 2], 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors='tf')
self.assertEqual(batch['labels'].numpy().tolist(), ([[0, 1, 2]] * 8))
self.assertEqual(batch['labels'].dtype, tf.int64)
self.assertEqual(batch['inputs'].shape.as_list(), [8, 6])
features = [{'label': i, 'inputs': np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors='tf')
self.assertEqual(batch['labels'].numpy().tolist(), list(range(8)))
self.assertEqual(batch['labels'].dtype, tf.int64)
self.assertEqual(batch['inputs'].shape.as_list(), [8, 10])
features = [{'label': np.array(i), 'inputs': np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors='tf')
self.assertEqual(batch['labels'].dtype, tf.int64)
self.assertEqual(batch['labels'].numpy().tolist(), list(range(8)))
self.assertEqual(batch['labels'].dtype, tf.int64)
self.assertEqual(batch['inputs'].shape.as_list(), [8, 10])
def test_numpy_dtype_preservation(self):
data_collator = default_data_collator
features = [{'input_ids': np.array([0, 1, 2, 3, 4]), 'label': np.int64(i)} for i in range(4)]
batch = data_collator(features, return_tensors='tf')
self.assertEqual(batch['labels'].dtype, tf.int64)
def test_default_classification_and_regression(self):
data_collator = default_data_collator
features = [{'input_ids': [0, 1, 2, 3, 4], 'label': i} for i in range(4)]
batch = data_collator(features, return_tensors='tf')
self.assertEqual(batch['labels'].dtype, tf.int64)
features = [{'input_ids': [0, 1, 2, 3, 4], 'label': float(i)} for i in range(4)]
batch = data_collator(features, return_tensors='tf')
self.assertEqual(batch['labels'].dtype, tf.float32)
def test_default_with_no_labels(self):
features = [{'label': None, 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors='tf')
self.assertTrue(('labels' not in batch))
self.assertEqual(batch['inputs'].shape.as_list(), [8, 6])
features = [{'label_ids': None, 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors='tf')
self.assertTrue(('labels' not in batch))
self.assertEqual(batch['inputs'].shape.as_list(), [8, 6])
def test_data_collator_with_padding(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': [0, 1, 2]}, {'input_ids': [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorWithPadding(tokenizer, return_tensors='tf')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 6])
self.assertEqual(batch['input_ids'][0].numpy().tolist(), ([0, 1, 2] + ([tokenizer.pad_token_id] * 3)))
data_collator = DataCollatorWithPadding(tokenizer, padding='max_length', max_length=10, return_tensors='tf')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 10])
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8, return_tensors='tf')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, [2, 8])
def test_data_collator_for_token_classification(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': [0, 1, 2], 'labels': [0, 1, 2]}, {'input_ids': [0, 1, 2, 3, 4, 5], 'labels': [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors='tf')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 6])
self.assertEqual(batch['input_ids'][0].numpy().tolist(), ([0, 1, 2] + ([tokenizer.pad_token_id] * 3)))
self.assertEqual(batch['labels'].shape.as_list(), [2, 6])
self.assertEqual(batch['labels'][0].numpy().tolist(), ([0, 1, 2] + ([(- 100)] * 3)))
data_collator = DataCollatorForTokenClassification(tokenizer, padding='max_length', max_length=10, return_tensors='tf')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 10])
self.assertEqual(batch['labels'].shape.as_list(), [2, 10])
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8, return_tensors='tf')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 8])
self.assertEqual(batch['labels'].shape.as_list(), [2, 8])
data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=(- 1), return_tensors='tf')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 6])
self.assertEqual(batch['input_ids'][0].numpy().tolist(), ([0, 1, 2] + ([tokenizer.pad_token_id] * 3)))
self.assertEqual(batch['labels'].shape.as_list(), [2, 6])
self.assertEqual(batch['labels'][0].numpy().tolist(), ([0, 1, 2] + ([(- 1)] * 3)))
def _test_no_pad_and_pad(self, no_pad_features, pad_features):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors='tf')
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 10])
self.assertEqual(batch['labels'].shape.as_list(), [2, 10])
batch = data_collator(pad_features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 10])
self.assertEqual(batch['labels'].shape.as_list(), [2, 10])
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, pad_to_multiple_of=8, return_tensors='tf')
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 16])
self.assertEqual(batch['labels'].shape.as_list(), [2, 16])
batch = data_collator(pad_features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 16])
self.assertEqual(batch['labels'].shape.as_list(), [2, 16])
tokenizer._pad_token = None
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors='tf')
with self.assertRaises(ValueError):
data_collator(pad_features)
set_seed(42)
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors='tf')
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 10])
self.assertEqual(batch['labels'].shape.as_list(), [2, 10])
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(tf.reduce_any(masked_tokens))
batch = data_collator(pad_features, return_tensors='tf')
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 10])
self.assertEqual(batch['labels'].shape.as_list(), [2, 10])
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(tf.reduce_any(masked_tokens))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors='tf')
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 16])
self.assertEqual(batch['labels'].shape.as_list(), [2, 16])
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(tf.reduce_any(masked_tokens))
batch = data_collator(pad_features, return_tensors='tf')
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 16])
self.assertEqual(batch['labels'].shape.as_list(), [2, 16])
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(tf.reduce_any(masked_tokens))
def test_data_collator_for_language_modeling(self):
no_pad_features = [{'input_ids': list(range(10))}, {'input_ids': list(range(10))}]
pad_features = [{'input_ids': list(range(5))}, {'input_ids': list(range(10))}]
self._test_no_pad_and_pad(no_pad_features, pad_features)
no_pad_features = [list(range(10)), list(range(10))]
pad_features = [list(range(5)), list(range(10))]
self._test_no_pad_and_pad(no_pad_features, pad_features)
def test_data_collator_for_whole_word_mask(self):
features = [{'input_ids': list(range(10))}, {'input_ids': list(range(10))}]
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors='tf')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 10])
self.assertEqual(batch['labels'].shape.as_list(), [2, 10])
def test_plm(self):
tokenizer = BertTokenizer(self.vocab_file)
no_pad_features = [{'input_ids': list(range(10))}, {'input_ids': list(range(10))}]
pad_features = [{'input_ids': list(range(5))}, {'input_ids': list(range(10))}]
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer, return_tensors='tf')
batch = data_collator(pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 10])
self.assertEqual(batch['perm_mask'].shape.as_list(), [2, 10, 10])
self.assertEqual(batch['target_mapping'].shape.as_list(), [2, 10, 10])
self.assertEqual(batch['labels'].shape.as_list(), [2, 10])
batch = data_collator(no_pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 10])
self.assertEqual(batch['perm_mask'].shape.as_list(), [2, 10, 10])
self.assertEqual(batch['target_mapping'].shape.as_list(), [2, 10, 10])
self.assertEqual(batch['labels'].shape.as_list(), [2, 10])
example = [np.random.randint(0, 5, [5])]
with self.assertRaises(ValueError):
data_collator(example)
def test_nsp(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': [0, 1, 2, 3, 4], 'token_type_ids': [0, 1, 2, 3, 4], 'next_sentence_label': i} for i in range(2)]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors='tf')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 5])
self.assertEqual(batch['token_type_ids'].shape.as_list(), [2, 5])
self.assertEqual(batch['labels'].shape.as_list(), [2, 5])
self.assertEqual(batch['next_sentence_label'].shape.as_list(), [2])
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors='tf')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 8])
self.assertEqual(batch['token_type_ids'].shape.as_list(), [2, 8])
self.assertEqual(batch['labels'].shape.as_list(), [2, 8])
self.assertEqual(batch['next_sentence_label'].shape.as_list(), [2])
def test_sop(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': tf.convert_to_tensor([0, 1, 2, 3, 4]), 'token_type_ids': tf.convert_to_tensor([0, 1, 2, 3, 4]), 'sentence_order_label': i} for i in range(2)]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors='tf')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 5])
self.assertEqual(batch['token_type_ids'].shape.as_list(), [2, 5])
self.assertEqual(batch['labels'].shape.as_list(), [2, 5])
self.assertEqual(batch['sentence_order_label'].shape.as_list(), [2])
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors='tf')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape.as_list(), [2, 8])
self.assertEqual(batch['token_type_ids'].shape.as_list(), [2, 8])
self.assertEqual(batch['labels'].shape.as_list(), [2, 8])
self.assertEqual(batch['sentence_order_label'].shape.as_list(), [2]) |
class DistilBertConfig(PretrainedConfig):
model_type = 'distilbert'
def __init__(self, vocab_size=30522, max_position_embeddings=512, sinusoidal_pos_embds=False, n_layers=6, n_heads=12, dim=768, hidden_dim=(4 * 768), dropout=0.1, attention_dropout=0.1, activation='gelu', initializer_range=0.02, qa_dropout=0.1, seq_classif_dropout=0.2, pad_token_id=0, **kwargs):
super().__init__(**kwargs, pad_token_id=pad_token_id)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.sinusoidal_pos_embds = sinusoidal_pos_embds
self.n_layers = n_layers
self.n_heads = n_heads
self.dim = dim
self.hidden_dim = hidden_dim
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation = activation
self.initializer_range = initializer_range
self.qa_dropout = qa_dropout
self.seq_classif_dropout = seq_classif_dropout
def hidden_size(self):
return self.dim
def num_attention_heads(self):
return self.n_heads
def num_hidden_layers(self):
return self.n_layers |
def merge_single_set_jsons(set_name: str, ORIGINAL_CATEGORIES: List[str], save_dir: str, jsons_reid_per_category_cropped):
global_json = {}
anno_id = 0
all_annos = []
all_image_ids = []
all_images_info = []
for category_name in ORIGINAL_CATEGORIES:
set_single_category_json = jsons_reid_per_category_cropped[f'{set_name}_{category_name}_cropped']
set_single_category_json_images_ids = np.array([item['id'] for item in set_single_category_json['images']])
set_single_category_all_images_infos = np.array(copy.deepcopy(set_single_category_json['images']))
for item in set_single_category_json['annotations']:
image_id = item.get('image_id')
all_image_ids.append(image_id)
anno_id += 1
item['id'] = anno_id
all_annos.append(item)
all_json_image_info_to_take_inds = np.isin(set_single_category_json_images_ids, all_image_ids)
all_json_image_info_to_take = list(set_single_category_all_images_infos[all_json_image_info_to_take_inds])
all_images_info.extend(all_json_image_info_to_take)
all_image_ids = list(np.unique(all_image_ids))
all_image_ids = [int(item) for item in all_image_ids]
global_json = copy.deepcopy(set_single_category_json)
global_json['images'] = list(all_images_info)
global_json['annotations'] = list(all_annos)
with open((save_dir / f'{set_name}_coco_reid.json'), 'w') as f:
json.dump(global_json, f) |
def compute_graph_max_cut(memory_graph: MemoryGraph, n_iter: int=50, astar_n_iter: int=500, eps: float=0.01) -> Tuple[(List[BaseNode], float, List[Cut])]:
max_cut_astar = MaxCutAstar(memory_graph=memory_graph)
last_result = (None, 0, None)
l_bound = memory_graph.memory_lbound_single_op
u_bound = ((2 * sum([t.total_size for t in memory_graph.b_nodes])) - l_bound)
it = 0
while (it < n_iter):
estimate = ((u_bound + l_bound) / 2)
(schedule, max_cut_size, cuts) = max_cut_astar.solve(estimate_factor=estimate, iter_limit=astar_n_iter)
if (schedule is None):
return last_result
next_u_bound = min(estimate, max_cut_size)
last_result = (schedule, max_cut_size, cuts)
if ((l_bound * (1 + eps)) >= next_u_bound):
return last_result
it += 1
return last_result |
def forSecond(frame_number, output_arrays, count_arrays, average_count, returned_frame):
plt.clf()
plt.show()
this_colors = []
labels = []
sizes = []
counter = 0
for eachItem in average_count:
counter += 1
labels.append(((eachItem + ' = ') + str(average_count[eachItem])))
sizes.append(average_count[eachItem])
this_colors.append(color_index[eachItem])
plt.subplot(1, 2, 1)
plt.title(('Second : ' + str(frame_number)))
plt.axis('off')
plt.imshow(returned_frame, interpolation='none')
plt.subplot(1, 2, 2)
plt.title(('Analysis: ' + str(frame_number)))
plt.pie(sizes, labels=labels, colors=this_colors, shadow=True, startangle=140, autopct='%1.1f%%')
plt.pause(0.01) |
def cityscapes_to_coco_all_random(cityscapes_id):
lookup = {0: (- 1), 1: (- 1), 2: (- 1), 3: (- 1), 4: (- 1), 5: (- 1), 6: (- 1), 7: (- 1), 8: (- 1)}
return lookup[cityscapes_id] |
def read_relational_attribute(ofile, relational_attribute, i):
r_end_relational = re.compile((('^[Ee][Nn][Dd]\\s*' + relational_attribute.name) + '\\s*$'))
while (not r_end_relational.match(i)):
m = r_headerline.match(i)
if m:
isattr = r_attribute.match(i)
if isattr:
(attr, i) = tokenize_attribute(ofile, i)
relational_attribute.attributes.append(attr)
else:
raise ValueError(('Error parsing line %s' % i))
else:
i = next(ofile)
i = next(ofile)
return i |
def try_index(scalar_or_list, i):
try:
return scalar_or_list[i]
except TypeError:
return scalar_or_list |
class RandomFeatureEnsemble(Ensemble):
def __init__(self, M, N, f):
self.M = M
self.N = N
self.f = ACTIVATIONS[f]
self.repr_init()
def generate(self):
Z = (np.random.randn(self.N, self.N) / np.sqrt(self.N))
W = np.random.randn(self.M, self.N)
X = (self.f((W Z)) / np.sqrt(self.N))
return X |
class UAS(Metric):
def __init__(self, eps=1e-08):
super(Metric, self).__init__()
self.eps = eps
self.total = 0.0
self.direct_correct = 0.0
self.undirect_correct = 0.0
self.total_sentence = 0.0
self.correct_root = 0.0
def score(self):
return (self.direct_correct / self.total)
def __call__(self, predicted_arcs, gold_arcs):
for (pred, gold) in zip(predicted_arcs, gold_arcs):
assert (len(pred) == len(gold))
if (len(pred) > 0):
self.total_sentence += 1.0
for (head, child) in pred:
if (gold[int(child)] == (int(head) + 1)):
self.direct_correct += 1.0
self.undirect_correct += 1.0
if ((int(head) + 1) == 0):
self.correct_root += 1.0
elif (gold[int(head)] == (int(child) + 1)):
self.undirect_correct += 1.0
self.total += 1.0
def __repr__(self):
return 'UDAS: {}, UUAS:{}, root:{} '.format(self.score, (self.undirect_correct / self.total), (self.correct_root / self.total_sentence)) |
def _get_training_devices_dump() -> str:
out = subprocess.check_output(['nvidia-smi', '--query-gpu=gpu_name,gpu_bus_id,vbios_version', '--format=csv'])
return out.decode('utf-8').strip() |
def construct_outletDF(outlet_avg_dict, topics):
outlet_topicsDF = pd.DataFrame.from_dict(outlet_avg_dict, orient='index').transpose()
outlet_topicsDF['sum'] = outlet_topicsDF[outlet_topicsDF.columns].sum(axis=1)
outlet_topicsDF = outlet_topicsDF.sort_values('sum', ascending=False).drop('sum', axis=1)
ordered_topics_dict = {idx: topics['topics'][idx] for idx in outlet_topicsDF.index}
return (outlet_topicsDF, ordered_topics_dict) |
def sin_transformer(period):
return FunctionTransformer((lambda x: np.sin((((x / period) * 2) * np.pi)))) |
class HTMLPage(object):
def __init__(self, content, encoding, url, cache_link_parsing=True):
self.content = content
self.encoding = encoding
self.url = url
self.cache_link_parsing = cache_link_parsing
def __str__(self):
return redact_auth_from_url(self.url) |
def save_pickle(pickle_path, data):
with open(pickle_path, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL) |
class SubGoalObserver(WaypointObserver):
def __init__(self, config, vehicle, traffic_manager):
super().__init__(config, vehicle, traffic_manager)
self._num_wps = self.config.observations.sub_goal_num
sub_goal_dist_max = self.config.observations.sub_goal_dist_max
if ((sub_goal_dist_max is not None) and self.config.observations.relative_to_ego):
(x_low, y_low, x_high, y_high) = ((- sub_goal_dist_max), (- sub_goal_dist_max), sub_goal_dist_max, sub_goal_dist_max)
else:
(x_low, y_low, x_high, y_high) = ((- np.inf), (- np.inf), np.inf, np.inf)
dist_low = 0.0
dist_high = self.config.observations.sub_goal_dist_max
self.low = np.array(([x_low, y_low, dist_low, (- np.pi)] * self._num_wps), dtype=np.float64)
self.high = np.array(([x_high, y_high, dist_high, np.pi] * self._num_wps), dtype=np.float64)
def step(self, **kwargs):
sub_goal_manager = self.vehicle.get_attachment(SubGoalAttachment)
sub_goals = sub_goal_manager.sub_goals[:self.config.observations.sub_goal_num]
sub_goal_dist_max = self.config.observations.sub_goal_dist_max
if (sub_goal_dist_max is not None):
ego_pos = self.vehicle.location[:2]
sub_goals = [s for s in sub_goals if (np.linalg.norm((ego_pos - np.array([s.location]))) <= sub_goal_dist_max)]
if (len(sub_goals) == 0):
return np.zeros_like(self.low)
xs = [s.location[0] for s in sub_goals]
ys = [s.location[1] for s in sub_goals]
return super().step(xs=xs, ys=ys) |
def eval_dist_at_powseries(phi, f):
nmoments = phi.parent().precision_cap()
K = f.parent().base_ring()
if K.is_exact():
K = phi.parent().base_ring()
return sum(((a * K(phi.moment(i))) for (a, i) in zip(f.coefficients(), f.exponents()) if ((i >= 0) and (i < nmoments)))) |
def _get_wrn_spec(num_layers, width_factor):
assert (((num_layers - 4) % 6) == 0)
n = ((num_layers - 4) // 6)
layers = ([n] * 3)
channels = [16, (16 * width_factor), (32 * width_factor), (64 * width_factor)]
return (layers, channels) |
class MessagePassingStateChunked():
inputs: chex.Array
hints: chex.Array
is_first: chex.Array
hint_preds: chex.Array
hiddens: chex.Array
lstm_state: Optional[hk.LSTMState] |
def get_text_video_audio_data(data_path, part='train'):
if (part == 'train'):
x_txt = np.load(((data_path + '/') + 'train_text.npy'))
x_vid = np.load(((data_path + '/') + 'train_video.npy'))
vid_seqN = np.load(((data_path + '/') + 'train_video_seqN.npy'))
x_mfcc = np.load(((data_path + '/') + 'train_audio_mfcc.npy'))
x_pros = np.load(((data_path + '/') + 'train_audio_prosody.npy'))
aud_seqN = np.load(((data_path + '/') + 'train_audio_seqN.npy'))
labels = np.load(((data_path + '/') + 'train_label.npy'))
if np.where((vid_seqN == 0))[0].any():
tr_inds = np.where((vid_seqN == 0))
vid_seqN = np.delete(vid_seqN, tr_inds, 0)
x_vid = np.delete(x_vid, tr_inds, 0)
x_txt = np.delete(x_txt, tr_inds, 0)
x_mfcc = np.delete(x_mfcc, tr_inds, 0)
x_pros = np.delete(x_pros, tr_inds, 0)
aud_seqN = np.delete(aud_seqN, tr_inds, 0)
labels = np.delete(labels, tr_inds, 0)
elif (part == 'dev'):
x_txt = np.load(((data_path + '/') + 'dev_text.npy'))
x_vid = np.load(((data_path + '/') + 'dev_video.npy'))
vid_seqN = np.load(((data_path + '/') + 'dev_video_seqN.npy'))
x_mfcc = np.load(((data_path + '/') + 'dev_audio_mfcc.npy'))
x_pros = np.load(((data_path + '/') + 'dev_audio_prosody.npy'))
aud_seqN = np.load(((data_path + '/') + 'dev_audio_seqN.npy'))
labels = np.load(((data_path + '/') + 'dev_label.npy'))
if np.where((vid_seqN == 0))[0].any():
inds = np.where((vid_seqN == 0))
vid_seqN = np.delete(vid_seqN, inds)
x_vid = np.delete(x_vid, inds, 0)
x_txt = np.delete(x_txt, inds, 0)
x_mfcc = np.delete(x_mfcc, inds, 0)
x_pros = np.delete(x_pros, inds, 0)
aud_seqN = np.delete(aud_seqN, inds, 0)
labels = np.delete(labels, inds)
elif (part == 'test'):
x_txt = np.load(((data_path + '/') + 'test_text.npy'))
x_vid = np.load(((data_path + '/') + 'test_video.npy'))
vid_seqN = np.load(((data_path + '/') + 'test_video_seqN.npy'))
x_mfcc = np.load(((data_path + '/') + 'test_audio_mfcc.npy'))
x_pros = np.load(((data_path + '/') + 'test_audio_prosody.npy'))
aud_seqN = np.load(((data_path + '/') + 'test_audio_seqN.npy'))
labels = np.load(((data_path + '/') + 'test_label.npy'))
if np.where((vid_seqN == 0))[0].any():
inds = np.where((vid_seqN == 0))
vid_seqN = np.delete(vid_seqN, inds)
x_vid = np.delete(x_vid, inds, 0)
x_txt = np.delete(x_txt, inds, 0)
x_mfcc = np.delete(x_mfcc, inds, 0)
x_pros = np.delete(x_pros, inds, 0)
aud_seqN = np.delete(aud_seqN, inds, 0)
labels = np.delete(labels, inds)
else:
x_txt = []
x_vid = []
vid_seqN = []
x_mfcc = []
x_pros = []
aud_seqN = []
labels = []
return (x_txt, x_vid, vid_seqN, x_mfcc, x_pros, aud_seqN, labels) |
class FacadesDataset(Pix2pixDataset):
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(dataroot='./dataset/facades/')
parser.set_defaults(preprocess_mode='resize_and_crop')
load_size = (286 if is_train else 256)
parser.set_defaults(load_size=load_size)
parser.set_defaults(crop_size=256)
parser.set_defaults(display_winsize=256)
parser.set_defaults(label_nc=13)
parser.set_defaults(contain_dontcare_label=False)
parser.set_defaults(no_instance_edge=True)
parser.set_defaults(no_instance_dist=True)
parser.set_defaults(lr_instance=False)
return parser
def get_paths(self, opt):
root = opt.dataroot
phase = ('val' if (opt.phase == 'test') else opt.phase)
label_dir = os.path.join(root, ('%s_label' % phase))
label_paths = make_dataset(label_dir, recursive=False, read_cache=True)
image_dir = os.path.join(root, ('%s_img' % phase))
image_paths = make_dataset(image_dir, recursive=False, read_cache=True)
instance_paths = []
return (label_paths, image_paths, instance_paths) |
_grad()
def inspect_lora(model):
moved = {}
for (name, _module) in model.named_modules():
if (_module.__class__.__name__ in ['LoraInjectedLinear', 'LoraInjectedConv2d', 'LoraInjectedConv3d']):
ups = _module.lora_up.weight.data.clone()
downs = _module.lora_down.weight.data.clone()
wght: torch.Tensor = (ups.flatten(1) downs.flatten(1))
dist = wght.flatten().abs().mean().item()
if (name in moved):
moved[name].append(dist)
else:
moved[name] = [dist]
return moved |
def text2():
error_sentence_2 = ',,!'
correct_sent = m.correct(error_sentence_2)
print('original sentence:{} => correct sentence:{}'.format(error_sentence_2, correct_sent)) |
def masked_logit_cross_entropy(preds, labels, mask):
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)
loss = tf.reduce_sum(loss, axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(mask), tf.constant([1.0]))
loss *= mask
return tf.reduce_mean(loss) |
class Modeler(object):
hashtagSegmentor = None
t = 0
totals = 0
totalh = 0
p = 0
r = 0
n = 0
modelerParams = {}
def __init__(self):
pass
def loadParameters(self, args):
leftoverArgs = []
for arg in args:
if (self.loadParameter(arg) == False):
leftoverArgs.append(arg)
return leftoverArgs
def loadParameter(self, param):
pass
def getRunCode(self):
return ''
def train(self, featureFile):
pass
def segmentHashtag(self, hashtag):
pass
def segmentFile(self, fileToSegment, featureFileName, params):
pass
def calculateScore(self, testFile, params):
pass
def loadModelerParams(self, params):
pass
def test(self, testFile, featureFileName, params):
(acc, precision, recall, fscore) = self.calculateScore(testFile, featureFileName, params)
print(('MAXENT ACC %f PRE %f REC %f F1 %f\n' % (acc, precision, recall, fscore)))
def isFeatureOn(self, feature):
return False
def reset(self):
self.t = 0
self.totals = 0
self.totalh = 0
self.p = 0
self.r = 0
self.n = 0
def countEntry(self, segmented, trueSegmentation):
sw = segmented.split(' ')
hw = trueSegmentation.split(' ')
for s in sw:
for h in hw:
if (s == h):
self.p = (self.p + 1)
break
for h in hw:
for s in sw:
if (s == h):
self.r = (self.r + 1)
break
self.totals = (self.totals + len(sw))
self.totalh = (self.totalh + len(hw))
self.n += 1
if (segmented == trueSegmentation):
self.t += 1
def calculatePrecision(self):
if (self.totals > 0):
return (float((self.p * 100)) / float(self.totals))
return 0
def calculateRecall(self):
if (self.totalh > 0):
return (float((self.r * 100)) / float(self.totalh))
return 0
def calculateFScore(self):
precision = self.calculatePrecision()
recall = self.calculateRecall()
if ((precision + recall) > 0):
return (((2 * precision) * recall) / (precision + recall))
return 0
def calculateAccuracy(self):
if (self.n > 0):
return (float((100 * self.t)) / float(self.n))
return 0 |
def get_args_from_command_line():
parser = ArgumentParser(description='Parser of Runner of Network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [cuda]', default=cfg.CONST.DEVICE, type=str)
parser.add_argument('--phase', dest='phase', help='phase of CNN', default=cfg.NETWORK.PHASE, type=str)
parser.add_argument('--weights', dest='weights', help='Initialize network from the weights file', default=cfg.CONST.WEIGHTS, type=str)
parser.add_argument('--data', dest='data_path', help='Set dataset root_path', default=cfg.DIR.DATASET_ROOT, type=str)
parser.add_argument('--out', dest='out_path', help='Set output path', default=cfg.DIR.OUT_PATH)
args = parser.parse_args()
return args |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, noactivation=False):
super(BasicBlock, self).__init__()
self.basicblock_sub = BasicBlockSub(inplanes, planes, stride, noactivation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.basicblock_sub(x)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
return out |
def test_LayerNorm(device):
from speechbrain.nnet.normalization import LayerNorm
input = (torch.randn(4, 101, 256, device=device) + 2.0)
norm = LayerNorm(input_shape=input.shape).to(device)
output = norm(input)
assert (input.shape == output.shape)
current_mean = output.mean(dim=2).mean()
assert (torch.abs(current_mean) < 1e-06)
current_std = output.std(dim=2).mean()
assert (torch.abs((1.0 - current_std)) < 0.01)
input = (torch.randn(100, 101, 16, 32, device=device) + 2.0)
norm = LayerNorm(input_shape=input.shape).to(device)
output = norm(input)
assert (input.shape == output.shape)
current_mean = output.mean(dim=[2, 3]).mean()
assert (torch.abs(current_mean) < 1e-06)
current_std = output.std(dim=[2, 3]).mean()
assert (torch.abs((1.0 - current_std)) < 0.01)
assert torch.jit.trace(norm, input) |
class Upsampling(Layer):
def __init__(self, new_size, **kwargs):
self.new_size = new_size
super(Upsampling, self).__init__(**kwargs)
def build(self, input_shape):
super(Upsampling, self).build(input_shape)
def call(self, inputs, **kwargs):
(new_height, new_width) = self.new_size
resized = ktf.image.resize_images(inputs, [new_height, new_width], align_corners=True)
return resized
def compute_output_shape(self, input_shape):
return tuple([None, self.new_size[0], self.new_size[1], input_shape[3]])
def get_config(self):
config = super(Upsampling, self).get_config()
config['new_size'] = self.new_size
return config |
def test_IndexedOptionArray_NumpyArray():
v2a = ak.contents.indexedoptionarray.IndexedOptionArray(ak.index.Index(np.array([2, 2, (- 1), 1, (- 1), 5, 4], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])))
def f(out, obj):
out[0] = len(obj)
out[1] = (obj[0] if (obj[0] is not None) else 999.0)
out[2] = (obj[1] if (obj[1] is not None) else 999.0)
out[3] = (obj[2] if (obj[2] is not None) else 999.0)
out[4] = (obj[3] if (obj[3] is not None) else 999.0)
out[5] = (obj[4] if (obj[4] is not None) else 999.0)
out[6] = (obj[5] if (obj[5] is not None) else 999.0)
out[7] = (obj[6] if (obj[6] is not None) else 999.0)
out = np.zeros(8, dtype=np.float64)
f(out, ak.highlevel.Array(v2a))
assert (out.tolist() == [7.0, 2.2, 2.2, 999.0, 1.1, 999.0, 5.5, 4.4]) |
def get_gptq_trainable_parameters(fxp_model: Model, fw_info: FrameworkInfo, add_bias: bool=False) -> (List[tf.Variable], List[tf.Variable], List[tf.Variable]):
trainable_weights: List[tf.Tensor] = []
trainable_threshold: List[tf.Tensor] = []
bias_weights: List[List[tf.Tensor]] = []
for layer in fxp_model.layers:
if isinstance(layer, KerasTrainableQuantizationWrapper):
kernel_attribute = get_kernel_attribute_name_for_gptq(layer_type=type(layer.layer), fw_info=DEFAULT_KERAS_INFO)
if (kernel_attribute not in layer.weights_quantizers):
Logger.error(f'{kernel_attribute} was not found in weight quantizers of layer {layer.layer}')
quantizer_trainable_weights = layer.weights_quantizers[kernel_attribute].get_trainable_variables(VariableGroup.WEIGHTS)
quantizer_trainable_threshold = layer.weights_quantizers[kernel_attribute].get_trainable_variables(VariableGroup.QPARAMS)
trainable_weights.append(quantizer_trainable_weights)
trainable_threshold.extend(quantizer_trainable_threshold)
if add_bias:
kernel_ops_attrs = fw_info.kernel_ops_attributes_mapping.get(type(layer.layer))
use_bias = ((kernel_ops_attrs is not None) and (kernel_ops_attrs[0] is not None) and layer.layer.get_config().get(USE_BIAS))
if ((use_bias is not None) and use_bias):
bias_weights.append([layer.layer.bias])
return (trainable_weights, bias_weights, trainable_threshold) |
def test_property_selection_function(mosa_strategy):
selection_function = MagicMock(SelectionFunction())
mosa_strategy.selection_function = selection_function
assert (mosa_strategy.selection_function == selection_function) |
class Retry(object):
DEFAULT_METHOD_WHITELIST = frozenset(['HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(['Authorization'])
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None, status=None, method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None, backoff_factor=0, raise_on_redirect=True, raise_on_status=True, history=None, respect_retry_after_header=True, remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST):
self.total = total
self.connect = connect
self.read = read
self.status = status
if ((redirect is False) or (total is False)):
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = (status_forcelist or set())
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = (history or tuple())
self.respect_retry_after_header = respect_retry_after_header
self.remove_headers_on_redirect = frozenset([h.lower() for h in remove_headers_on_redirect])
def new(self, **kw):
params = dict(total=self.total, connect=self.connect, read=self.read, redirect=self.redirect, status=self.status, method_whitelist=self.method_whitelist, status_forcelist=self.status_forcelist, backoff_factor=self.backoff_factor, raise_on_redirect=self.raise_on_redirect, raise_on_status=self.raise_on_status, history=self.history, remove_headers_on_redirect=self.remove_headers_on_redirect, respect_retry_after_header=self.respect_retry_after_header)
params.update(kw)
return type(self)(**params)
def from_int(cls, retries, redirect=True, default=None):
if (retries is None):
retries = (default if (default is not None) else cls.DEFAULT)
if isinstance(retries, Retry):
return retries
redirect = (bool(redirect) and None)
new_retries = cls(retries, redirect=redirect)
log.debug('Converted retries value: %r -> %r', retries, new_retries)
return new_retries
def get_backoff_time(self):
consecutive_errors_len = len(list(takewhile((lambda x: (x.redirect_location is None)), reversed(self.history))))
if (consecutive_errors_len <= 1):
return 0
backoff_value = (self.backoff_factor * (2 ** (consecutive_errors_len - 1)))
return min(self.BACKOFF_MAX, backoff_value)
def parse_retry_after(self, retry_after):
if re.match('^\\s*[0-9]+\\s*$', retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate(retry_after)
if (retry_date_tuple is None):
raise InvalidHeader(('Invalid Retry-After header: %s' % retry_after))
retry_date = time.mktime(retry_date_tuple)
seconds = (retry_date - time.time())
if (seconds < 0):
seconds = 0
return seconds
def get_retry_after(self, response):
retry_after = response.getheader('Retry-After')
if (retry_after is None):
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response=None):
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self):
backoff = self.get_backoff_time()
if (backoff <= 0):
return
time.sleep(backoff)
def sleep(self, response=None):
if (self.respect_retry_after_header and response):
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
def _is_connection_error(self, err):
if isinstance(err, ProxyError):
err = err.original_error
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method):
if (self.method_whitelist and (method.upper() not in self.method_whitelist)):
return False
return True
def is_retry(self, method, status_code, has_retry_after=False):
if (not self._is_method_retryable(method)):
return False
if (self.status_forcelist and (status_code in self.status_forcelist)):
return True
return (self.total and self.respect_retry_after_header and has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
def is_exhausted(self):
retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
retry_counts = list(filter(None, retry_counts))
if (not retry_counts):
return False
return (min(retry_counts) < 0)
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
if ((self.total is False) and error):
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if (total is not None):
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
cause = 'unknown'
status = None
redirect_location = None
if (error and self._is_connection_error(error)):
if (connect is False):
raise six.reraise(type(error), error, _stacktrace)
elif (connect is not None):
connect -= 1
elif (error and self._is_read_error(error)):
if ((read is False) or (not self._is_method_retryable(method))):
raise six.reraise(type(error), error, _stacktrace)
elif (read is not None):
read -= 1
elif (response and response.get_redirect_location()):
if (redirect is not None):
redirect -= 1
cause = 'too many redirects'
redirect_location = response.get_redirect_location()
status = response.status
else:
cause = ResponseError.GENERIC_ERROR
if (response and response.status):
if (status_count is not None):
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
status = response.status
history = (self.history + (RequestHistory(method, url, error, status, redirect_location),))
new_retry = self.new(total=total, connect=connect, read=read, redirect=redirect, status=status_count, history=history)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, (error or ResponseError(cause)))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return '{cls.__name__}(total={self.total}, connect={self.connect}, read={self.read}, redirect={self.redirect}, status={self.status})'.format(cls=type(self), self=self) |
def run_on_one_sequence(sess, model, batch_img):
with sess.as_default():
prob = sess.run(model.preds, feed_dict={images: batch_img, K.learning_phase(): 0})
print(prob)
(gb_grad_value, target_conv_layer_value, target_conv_layer_grad_value) = sess.run([gb_grad, target_conv_layer, target_conv_layer_grad], feed_dict={images: batch_img, labels: batch_label, K.learning_phase(): 0})
utils.visualize_overlays_4D(batch_img, target_conv_layer_value, target_conv_layer_grad_value) |
def test_fix_span_text():
test_cases = [('The kilogram-force is not a part', ['the', 'kilogram', '-', 'force', 'is', 'not'], 'the kilogram-force is not'), ('The kilogram-force is not a part', ['kilogram', '-', 'force'], 'kilogram-force'), ('In the 1910s, New Yorkbased filmmakers were attracted to', ['new', 'york', '', 'based', 'filmmakers'], 'new yorkbased filmmakers'), ("offered a US$10 a week raise over Tesla's US$18 per week salary; Tesla refused", ['us', '$10', 'a', 'week', 'raise', 'over', 'tesla', "'s", 'us', '$18', 'per', 'week', 'salary'], "us$10 a week raise over tesla's us$18 per week salary"), ("offered a US$10 a week raise over Tesla's US$18 per week salary; Tesla refused", ['us$', '10', 'a', 'week', 'raise', 'over', 'tesla', "'s", 'us$', '18', 'per', 'week', 'salary'], "us$10 a week raise over tesla's us$18 per week salary"), ('while BSkyB paying 304m for the Premier League rights', ['304', 'm'], '304m'), ('the cameras were upgraded to 5K resolution', ['5', 'k'], '5k')]
for (passage, tokens, expected_text) in test_cases:
assert (fix_span_text(tokens, passage) == expected_text), expected_text |
def has_exact_match(ground_truths, candidates):
for ground_truth in ground_truths:
if (ground_truth in candidates):
return True
return False |
class EntropyRegularisationLoss(nn.Module):
def __init__(self):
super(EntropyRegularisationLoss, self).__init__()
pass
def forward(self, policies, tformat):
(_policies, policies_params, policies_tformat) = _to_batch(policies, tformat)
entropy = th.bmm(th.log(_policies).unsqueeze(1), _policies.unsqueeze(2)).squeeze(2)
ret = _from_batch(entropy, policies_params, policies_tformat)
return ret |
class Wrapper():
def get_args(parser):
parser.add('--gan_type', type=str, default='gan', help='gan|rgan|ragan')
def get_net(args):
criterion = Criterion(args.gan_type)
return criterion.to(args.device) |
def resize(image, new_width_height=1920, convert_RGB=True):
image = (Image.open(image) if isinstance(image, (str, BytesIO)) else image)
(w, h) = image.size
fixed_size = (new_width_height if isinstance(new_width_height, int) else False)
if fixed_size:
if (h > w):
fixed_height = fixed_size
height_percent = (fixed_height / float(h))
width_size = int((float(w) * float(height_percent)))
image = image.resize((width_size, fixed_height), Image.NEAREST)
else:
fixed_width = fixed_size
width_percent = (fixed_width / float(w))
height_size = int((float(h) * float(width_percent)))
image = image.resize((fixed_width, height_size), Image.NEAREST)
else:
image = image.resize(new_width_height)
if ((image.mode == 'RGBA') and convert_RGB):
new = Image.new('RGBA', image.size, 'WHITE')
new.paste(image, (0, 0), image)
image = new.convert('RGB')
return image |
def is_source_code_missing_brackets(source_code, prioritize_missing_open=False):
open_brackets = '[{('
close_brackets = ']})'
last_bracket = [(- 1)]
counters = ([0] * len(open_brackets))
missing_open = False
for (t_type, t_content) in list(parse_py_statements(source_code)):
if (t_type != 'op'):
continue
if (t_content in open_brackets):
idx = open_brackets.index(t_content)
counters[idx] += 1
last_bracket.append(idx)
elif (t_content in close_brackets):
idx = close_brackets.index(t_content)
if (last_bracket[(- 1)] == idx):
counters[idx] -= 1
del last_bracket[(- 1)]
else:
if prioritize_missing_open:
return (- 1)
missing_open = True
missing_close = (not all([(c == 0) for c in counters]))
if missing_close:
return 1
if missing_open:
return (- 1)
return 0 |
class CustomAgentExecutor(AgentExecutor):
def _take_next_step(self, name_to_tool_map: Dict[(str, BaseTool)], color_mapping: Dict[(str, str)], inputs: Dict[(str, str)], intermediate_steps: List[Tuple[(AgentAction, str)]]) -> Union[(AgentFinish, List[Tuple[(AgentAction, str)]])]:
output = self.agent.plan(intermediate_steps, **inputs)
if isinstance(output, AgentFinish):
return output
actions: List[AgentAction]
if isinstance(output, AgentAction):
actions = [output]
else:
actions = output
result = []
for agent_action in actions:
self.callback_manager.on_agent_action(agent_action, verbose=self.verbose, color='green')
if (agent_action.tool in name_to_tool_map):
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
tool_run_kwargs['inputs'] = inputs
if return_direct:
tool_run_kwargs['llm_prefix'] = ''
observation = tool.run(agent_action.tool_input, verbose=self.verbose, color=color, **tool_run_kwargs)
else:
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = CustomInvalidTool().run(agent_action.tool, all_tools=list(name_to_tool_map.keys()), verbose=self.verbose, color=None, **tool_run_kwargs)
result.append((agent_action, observation))
return result |
def set_flags(_enabled):
orig_flags = (torch._C._get_mkldnn_enabled(),)
torch._C._set_mkldnn_enabled(_enabled)
return orig_flags |
def test_line_coverage_fully_covered(subject_properties_mock, trace_mock):
subject_properties_mock.existing_lines = {0: LineMetaData(0, 'foo', 0), 1: LineMetaData(0, 'foo', 1)}
trace_mock.covered_line_ids = {0, 1}
assert (ff.compute_line_coverage(trace_mock, subject_properties_mock) == 1.0) |
class MultiResolutionSTFTLoss(torch.nn.Module):
def __init__(self, fft_sizes=[1024, 2048, 512], hop_sizes=[120, 240, 50], win_lengths=[600, 1200, 240], window='hann_window', factor_sc=0.1, factor_mag=0.1):
super(MultiResolutionSTFTLoss, self).__init__()
assert (len(fft_sizes) == len(hop_sizes) == len(win_lengths))
self.stft_losses = torch.nn.ModuleList()
for (fs, ss, wl) in zip(fft_sizes, hop_sizes, win_lengths):
self.stft_losses += [STFTLoss(fs, ss, wl, window)]
self.factor_sc = factor_sc
self.factor_mag = factor_mag
def forward(self, x, y):
sc_loss = 0.0
mag_loss = 0.0
for f in self.stft_losses:
(sc_l, mag_l) = f(x, y)
sc_loss += sc_l
mag_loss += mag_l
sc_loss /= len(self.stft_losses)
mag_loss /= len(self.stft_losses)
return ((self.factor_sc * sc_loss), (self.factor_mag * mag_loss)) |
def deriv_df_coefficient(coeff):
dcoeff = defaultdict(float)
for (key, val) in coeff.items():
if (key[0] == 'indirect'):
pwer = key[1]
dcoeff[('indirect', (pwer + 2))] = (((- 0.5) * pwer) * val)
else:
(p, sjn) = key
(s, j, n) = sjn
if (p > 0):
dcoeff[((p - 1), sjn)] += (p * val)
dcoeff[(p, (s, j, (n + 1)))] += val
return dict(dcoeff) |
def load_langpair_dataset(data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos=False, load_alignments=False, truncate_source=False, append_source_id=False, num_buckets=0, shuffle=True, pad_to_multiple=1):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = (split + (str(k) if (k > 0) else ''))
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
elif (k > 0):
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
src_dataset = data_utils.load_indexed_dataset((prefix + src), src_dict, dataset_impl)
if truncate_source:
src_dataset = AppendTokenDataset(TruncateDataset(StripTokenDataset(src_dataset, src_dict.eos()), (max_source_positions - 1)), src_dict.eos())
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset((prefix + tgt), tgt_dict, dataset_impl)
if (tgt_dataset is not None):
tgt_datasets.append(tgt_dataset)
logger.info('{} {} {}-{} {} examples'.format(data_path, split_k, src, tgt, len(src_datasets[(- 1)])))
if (not combine):
break
assert ((len(src_datasets) == len(tgt_datasets)) or (len(tgt_datasets) == 0))
if (len(src_datasets) == 1):
src_dataset = src_datasets[0]
tgt_dataset = (tgt_datasets[0] if (len(tgt_datasets) > 0) else None)
else:
sample_ratios = ([1] * len(src_datasets))
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if (len(tgt_datasets) > 0):
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert (hasattr(src_dict, 'bos_index') and hasattr(tgt_dict, 'bos_index'))
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if (tgt_dataset is not None):
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(src_dataset, src_dict.index('[{}]'.format(src)))
if (tgt_dataset is not None):
tgt_dataset = AppendTokenDataset(tgt_dataset, tgt_dict.index('[{}]'.format(tgt)))
eos = tgt_dict.index('[{}]'.format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)
tgt_dataset_sizes = (tgt_dataset.sizes if (tgt_dataset is not None) else None)
return LanguagePairDataset(src_dataset, src_dataset.sizes, src_dict, tgt_dataset, tgt_dataset_sizes, tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, align_dataset=align_dataset, eos=eos, num_buckets=num_buckets, shuffle=shuffle, pad_to_multiple=pad_to_multiple) |
class ExtensionTable():
baseURL: list = dc.field(default_factory=list)
id: list = dc.field(default_factory=list)
name: list = dc.field(default_factory=list)
length: int = dc.field(default_factory=list) |
def unwrap_model(model: torch.nn.Module) -> torch.nn.Module:
if hasattr(model, 'module'):
return unwrap_model(model.module)
else:
return model |
def pointnet_sa_module_msg(xyz, points, npoint, radius_list, nsample_list, mlp_list, is_training, bn_decay, scope, bn=True, use_xyz=True, use_nchw=False):
data_format = ('NCHW' if use_nchw else 'NHWC')
with tf.variable_scope(scope) as sc:
new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
new_points_list = []
for i in range(len(radius_list)):
radius = radius_list[i]
nsample = nsample_list[i]
(idx, pts_cnt) = query_ball_point(radius, nsample, xyz, new_xyz)
grouped_xyz = group_point(xyz, idx)
grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1])
if (points is not None):
grouped_points = group_point(points, idx)
if use_xyz:
grouped_points = tf.concat([grouped_points, grouped_xyz], axis=(- 1))
else:
grouped_points = grouped_xyz
if use_nchw:
grouped_points = tf.transpose(grouped_points, [0, 3, 1, 2])
for (j, num_out_channel) in enumerate(mlp_list[i]):
grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope=('conv%d_%d' % (i, j)), bn_decay=bn_decay)
if use_nchw:
grouped_points = tf.transpose(grouped_points, [0, 2, 3, 1])
new_points = tf.reduce_max(grouped_points, axis=[2])
new_points_list.append(new_points)
new_points_concat = tf.concat(new_points_list, axis=(- 1))
return (new_xyz, new_points_concat) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.