code stringlengths 101 5.91M |
|---|
class FileDataset():
def __init__(self, path, tokenizer=nltk.RegexpTokenizer('\\b[a-zA-Z]{2,}\\b')):
self.path = path
self.tokenizer = tokenizer
def __iter__(self):
lines = list(open(self.path))
lines_tok = self.tokenizer.tokenize_sents(map(str.lower, lines))
return iter(lines_tok) |
def IntSort(ctx=None):
ctx = _get_ctx(ctx)
return ArithSortRef(Z3_mk_int_sort(ctx.ref()), ctx) |
class SNodeHostAccessor():
def __init__(self, snode):
if _ti_core.is_real(snode.data_type()):
write_func = snode.write_float
read_func = snode.read_float
else:
def write_func(key, value):
if (value >= 0):
snode.write_uint(key, value)
else:
snode.write_int(key, value)
if _ti_core.is_signed(snode.data_type()):
read_func = snode.read_int
else:
read_func = snode.read_uint
def getter(*key):
assert (len(key) == _ti_core.get_max_num_indices())
return read_func(key)
def setter(value, *key):
assert (len(key) == _ti_core.get_max_num_indices())
write_func(key, value)
if (impl.get_runtime().target_tape and impl.get_runtime().target_tape.grad_checker and (not impl.get_runtime().grad_replaced)):
for x in impl.get_runtime().target_tape.grad_checker.to_check:
assert (snode != x.snode.ptr), 'Overwritten is prohibitive when doing grad check.'
impl.get_runtime().target_tape.insert(write_func, (key, value))
self.getter = getter
self.setter = setter |
class _SplitDataset(torch.utils.data.Dataset):
def __init__(self, underlying_dataset, keys):
super(_SplitDataset, self).__init__()
self.underlying_dataset = underlying_dataset
self.keys = keys
def __getitem__(self, key):
return self.underlying_dataset[self.keys[key]]
def __len__(self):
return len(self.keys) |
def retrive_var(scopes):
var = []
for scope in scopes:
var += tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
return var |
def sdfg_with_children(A: dp.float32[(N, N)], B: dp.float32[(N, N)]):
def elements(i: _[0:N], j: _[0:N]):
def init():
(inp << A[(i, j)])
(out >> B[(i, j)])
out = inp
for k in range(4):
def do():
(inp << A[(i, j)])
(oin << B[(i, j)])
(out >> B[(i, j)])
out = (oin * inp) |
def init_test_mot16():
config['resume'] = '/home/ssm/ssj/weights/MOT17/weights0326-I50k-M80-G30/ssj300_0712_80000.pth'
config['mot_root'] = '/home/ssm/ssj/dataset/MOT16'
config['batch_size'] = 1
config['write_file'] = True
config['tensorboard'] = True
config['save_combine'] = False
config['type'] = 'test' |
def export_onnx(pretrained):
net = SYEISPNetS(channels=12)
checkpoint = torch.load(pretrained)
net.load_state_dict(checkpoint)
net.eval()
net = net.slim().eval()
net.body.block1.weight = nn.Parameter(net.body.block1.weight.reshape((3, 4, 12, 3, 3)).permute([1, 0, 2, 3, 4]).reshape((12, 12, 3, 3)))
net.body.block2.weight = nn.Parameter(net.body.block2.weight.reshape((3, 4, 12, 1, 1)).permute([1, 0, 2, 3, 4]).reshape((12, 12, 1, 1)))
net.body.block1.bias = nn.Parameter(net.body.block1.bias.reshape((3, 4)).permute([1, 0]).reshape(12))
net.body.block2.bias = nn.Parameter(net.body.block2.bias.reshape((3, 4)).permute([1, 0]).reshape(12))
net.body.bias = nn.Parameter(net.body.bias.reshape((3, 4)).permute([1, 0]).reshape(1, 12, 1, 1))
net.att[1].weight = nn.Parameter(net.att[1].weight.reshape((12, 3, 4, 1, 1)).permute([0, 2, 1, 3, 4]).reshape((12, 12, 1, 1)))
net.att[3].weight = nn.Parameter(net.att[3].weight.reshape((3, 4, 12, 1, 1)).permute([1, 0, 2, 3, 4]).reshape((12, 12, 1, 1)))
net.att[3].bias = nn.Parameter(net.att[3].bias.reshape((3, 4)).permute([1, 0]).reshape(12))
x = torch.rand(1, 4, 544, 960)
torch.onnx.export(net, x, 'model.onnx', opset_version=11)
torch.onnx.export(net, x, 'model_none.onnx', opset_version=11, dynamic_axes={'input': [2, 3], 'output': [2, 3]}) |
_level_function()
def from_avro_file(file, limit_entries=None, *, debug_forth=False, highlevel=True, behavior=None, attrs=None):
import awkward._connect.avro
if isinstance(file, (str, bytes, PathLike)):
file = fsdecode(file)
with open(file, 'rb') as opened_file:
(form, length, container) = awkward._connect.avro.ReadAvroFT(opened_file, limit_entries, debug_forth).outcontents
return _impl(form, length, container, highlevel, behavior, attrs)
elif ((not hasattr(file, 'read')) or (not hasattr(file, 'seek'))):
raise TypeError("'file' must either be a filename string or be a file-like object with 'read' and 'seek' methods")
else:
(form, length, container) = awkward._connect.avro.ReadAvroFT(file, limit_entries, debug_forth).outarr
return _impl(form, length, container, highlevel, behavior, attrs) |
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
self.pau = PAU()
def forward(self, x):
out = self.conv(self.pau(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out |
class ProcessGroupRpcAgentTestFixture(RpcAgentTestFixture):
def rpc_backend(self):
return rpc.backend_registry.BackendType['PROCESS_GROUP']
def rpc_backend_options(self):
try:
return self._rpc_backend_options
except AttributeError:
return rpc.backend_registry.construct_rpc_backend_options(self.rpc_backend, init_method=self.init_method, num_send_recv_threads=8)
_backend_options.setter
def rpc_backend_options(self, new_rpc_backend_options):
self._rpc_backend_options = new_rpc_backend_options
def get_shutdown_error_regex(self):
error_regexes = ['Encountered exception in ProcessGroupAgent::enqueueSend', 'Encountered exception in ProcessGroupAgent::listenLoop()', 'Exception in thread pool task', 'Connection reset by peer', 'Connection closed by peer']
return '|'.join(['({})'.format(error_str) for error_str in error_regexes])
def get_timeout_error_regex(self):
return 'RPC ran for more than' |
def train(args, model, classifier, train_loader, criterion, optimizer, epoch):
model.train()
classifier.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
total_feats = []
total_targets = []
end = time.time()
for (batch_idx, (input1, target)) in enumerate(tqdm(train_loader, disable=False)):
(input1, target) = (input1.float(), target.float())
(input1, target) = (input1.reshape((- 1), 3, args.image_size, args.image_size), target.reshape((- 1)))
(input1, target) = (input1.cuda(), target.cuda())
feats = model(input1)
output = classifier(feats)
output = output.view((- 1), 1).reshape((- 1))
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_size = target.size(0)
losses.update(loss.item(), batch_size)
total_feats.append(feats)
total_targets.append(target)
batch_time.update((time.time() - end))
end = time.time()
if (((batch_idx + 1) % args.print_freq) == 0):
print('Train: [{0}][{1}/{2}]\tBT {batch_time.val:.3f} ({batch_time.avg:.3f})\tDT {data_time.val:.3f} ({data_time.avg:.3f})\tloss {loss.val:.3f} ({loss.avg:.3f})'.format(epoch, (batch_idx + 1), len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses))
final_feats = torch.cat(total_feats).detach()
final_targets = torch.cat(total_targets).detach()
return (losses.avg, final_feats, final_targets) |
class TFLxmertMainLayer(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def _add_category_id_to_contiguous_id_maps_to_metadata(merged_categories: _MergedCategoriesT):
merged_categories_per_dataset = {}
for (contiguous_cat_id, cat_id) in enumerate(sorted(merged_categories.keys())):
for cat in merged_categories[cat_id]:
if (cat.dataset_name not in merged_categories_per_dataset):
merged_categories_per_dataset[cat.dataset_name] = defaultdict(list)
merged_categories_per_dataset[cat.dataset_name][cat_id].append((contiguous_cat_id, cat))
logger = logging.getLogger(__name__)
for (dataset_name, merged_categories) in merged_categories_per_dataset.items():
meta = MetadataCatalog.get(dataset_name)
if (not hasattr(meta, 'thing_classes')):
meta.thing_classes = []
meta.thing_dataset_id_to_contiguous_id = {}
meta.thing_dataset_id_to_merged_id = {}
else:
meta.thing_classes.clear()
meta.thing_dataset_id_to_contiguous_id.clear()
meta.thing_dataset_id_to_merged_id.clear()
logger.info(f'Dataset {dataset_name}: category ID to contiguous ID mapping:')
for (_cat_id, categories) in sorted(merged_categories.items()):
added_to_thing_classes = False
for (contiguous_cat_id, cat) in categories:
if (not added_to_thing_classes):
meta.thing_classes.append(cat.mapped_name)
added_to_thing_classes = True
meta.thing_dataset_id_to_contiguous_id[cat.id] = contiguous_cat_id
meta.thing_dataset_id_to_merged_id[cat.id] = cat.mapped_id
logger.info(f'{cat.id} ({cat.name}) -> {contiguous_cat_id}') |
def run_ddp(rank, world_size, prepared):
ddp_setup(rank, world_size)
prepared.cuda()
prepared = torch.nn.parallel.DistributedDataParallel(prepared, device_ids=[rank])
prepared.to(rank)
model_with_ddp = prepared
optimizer = torch.optim.SGD(model_with_ddp.parameters(), lr=0.0001)
train_one_epoch(model_with_ddp, criterion, optimizer, dataset, rank, 1)
ddp_cleanup() |
def write_embeddings(filename, dict, embeddings):
with open(filename, 'w') as file:
for i in range(len(embeddings)):
str = dict.idxToLabel[i].encode('utf-8')
for j in range(len(embeddings[0])):
str = (str + (' %5f' % embeddings[i][j]))
file.write((str + '\n')) |
class MovingAverageActionWrapperActorPolicy(BaseActorPolicy):
def __init__(self, policy, widow_size=8, initial_value=0):
super(BaseActorPolicy, self).__init__()
self.__widow_size = widow_size
self.__buffer = ([(initial_value / widow_size)] * widow_size)
self.__avg = initial_value
self.__p = 0
self.__start_smoothing = False
self.__initial_counter = 0
self.__policy = policy
def avg(self):
return self.__avg
def policy(self):
return self.__policy
def act(self, obs):
unsmoothed_action = self.__policy.act(obs)
self.__avg -= self.__buffer[self.__p]
self.__buffer[self.__p] = (unsmoothed_action / self.__widow_size)
self.__avg += self.__buffer[self.__p]
self.__p = ((self.__p + 1) % self.__widow_size)
if (not self.__start_smoothing):
self.__initial_counter += 1
if (self.__initial_counter >= self.__widow_size):
self.__start_smoothing = True
if self.__start_smoothing:
return self.__avg
else:
return unsmoothed_action |
def average_time_of_func(func, num_iter=10000, ms=False):
duration = timeit.timeit(func, number=num_iter)
avg_time = (duration / num_iter)
if ms:
avg_time *= 1000
logger.info('{} {} ms/iter'.format(func.__name__, avg_time))
else:
logger.info('{} {} s/iter'.format(func.__name__, avg_time))
return avg_time |
class MapTilingWithOverlapTest(unittest.TestCase):
def semantic_eq(self, tile_sizes):
A = np.random.rand(16, 16).astype(np.float32)
B1 = np.zeros((16, 16), dtype=np.float32)
B2 = np.zeros((16, 16), dtype=np.float32)
sdfg = copy.to_sdfg()
sdfg(inp=A, out=B1, I=A.shape[0], J=A.shape[1])
count = sdfg.apply_transformations(MapTilingWithOverlap, options={'tile_sizes': tile_sizes, 'lower_overlap': (1, 2), 'upper_overlap': (1, 2)})
self.assertGreater(count, 0)
sdfg(inp=A, out=B2, I=A.shape[0], J=A.shape[1])
self.assertTrue(np.allclose(B1, B2))
def test_semantic_eq(self):
self.semantic_eq([3, 3])
def test_semantic_eq_tile_size_1(self):
self.semantic_eq([1, 1]) |
def get_utterances_from_file(dialog_csv_file, dialog_csv_filename):
reader = csv.DictReader(dialog_csv_file)
path = dialog_csv_filename.split('\\')
return [_dict_to_dialog_utterance(du_dict, path[(- 1)]) for du_dict in reader] |
def test_weka(filename):
(data, meta) = loadarff(filename)
print(len(data.dtype))
print(data.size)
for i in meta:
print_attribute(i, meta[i], data[i]) |
def check_in_repo():
if (not os.path.isfile('setup.py')):
return 'Not in root-level PyTorch repo, no setup.py found'
with open('setup.py') as f:
s = f.read()
if ('PyTorch' not in s):
return "Not in PyTorch repo, 'PyTorch' not found in setup.py" |
class ImageNetDataset(Dataset):
def __init__(self, split, bucket_name, streaming=True, data_download_dir=None, transform=None):
assert (split in ['train', 'validation']), 'split {} not in (train, validation)'.format(split)
self._split = split
self._bucket_name = bucket_name
self._target_dir = data_download_dir
self._source_dir = os.path.join('imagenet/imagenet', self._split)
self._transform = transform
self._streaming = streaming
if (self._bucket_name is not None):
self._storage_client = storage.Client()
self._bucket = self._storage_client.bucket(bucket_name)
self._imgs_paths = []
self._labels = []
self._subdir_to_class = {}
class_count = 0
blobs = list_blobs(self._storage_client, self._bucket_name, prefix=self._source_dir)
for b in blobs:
path = b.name
self._imgs_paths.append(path)
sub_dir = path.split('/')[(- 2)]
if (sub_dir not in self._subdir_to_class):
self._subdir_to_class[sub_dir] = class_count
class_count += 1
self._labels.append(self._subdir_to_class[sub_dir])
print('There are {} records in dataset.'.format(len(self._imgs_paths)))
def __len__(self):
if (self._bucket_name is None):
return (1024 * 100)
return len(self._imgs_paths)
def __getitem__(self, idx):
if (self._bucket_name is None):
array = np.random.rand(256, 256, 3)
img = Image.fromarray(array, mode='RGB')
return (transformfn.to_tensor(img), np.random.choice(1000))
img_path = self._imgs_paths[idx]
blob = self._bucket.blob(img_path)
if self._streaming:
img_str = download_gcs_blob_with_backoff(blob)
else:
target_path = os.path.join(self._target_dir, img_path)
if (not os.path.exists(target_path)):
create_file_dirs(target_path)
print('downloading...')
img_str = download_gcs_blob_with_backoff(blob)
with open(target_path, 'wb') as f:
f.write(img_str)
else:
with open(target_path, 'rb') as f:
img_str = f.read()
img_bytes = BytesIO(img_str)
img = Image.open(img_bytes)
img = img.convert('RGB')
if (self._transform is not None):
img = self._transform(img)
return (img, self._labels[idx]) |
def test_reduce_1d_strided():
non_contiguous_array = np.arange(64, dtype=np.int64)[::3]
layout = ak.contents.NumpyArray(non_contiguous_array)
assert (not layout.is_contiguous)
assert (ak.sum(layout, axis=(- 1)) == np.sum(non_contiguous_array, axis=(- 1))) |
def read_concode_examples(filename, data_num):
examples = []
with open(filename) as f:
for (idx, line) in enumerate(f):
x = json.loads(line)
examples.append(Example(idx=idx, source=x['nl'].strip(), target=x['code'].strip()))
idx += 1
if (idx == data_num):
break
return examples |
def test_module_parameter_path():
x = nn.Variable((4, 3, 32, 32))
e = Example()
h = e(x)
e2 = Example()
assert (not e2.get_parameters()), "It doesn't have any parameters so far."
e2.set_parameters(e.get_parameters())
assert (e.get_parameters() == e2.get_parameters()), 'They have the same parameters.'
assert ('/conv/W' in e.get_parameters())
assert ('/conv/W' in e2.get_parameters()) |
def numpy_set_unused(v):
if (v is None):
return
assert isinstance(v, numpy.ndarray)
if isinstance(v.base, SharedNumpyArray):
assert v.base.is_in_use()
v.base.set_unused() |
class Decoder(DecoderBase):
tiu_head_length = 50
dma_head_length = 39
def decode_tiu_cmd(self, reg_buf: memoryview, *, offset, subnet_id) -> BaseTpuCmd:
head = TiuHead.from_buffer(reg_buf, offset)
op_info = tiu_index.get((bool(head.cmd_short), head.tsk_typ, head.tsk_eu_typ), None)
assert (op_info is not None), f'Unable to decode TIU code at offset {offset} out of {len(reg_buf)} total. Potential head identified as {head}'
op_clazz = op_class_dic[op_info.name]
reg = self.decode_reg(op_clazz, reg_buf, offset=offset)
buf = reg_buf[offset:(offset + (op_clazz.length // 8))]
cmd = op_info(reg, buf=buf, subnet_id=subnet_id)
return cmd
def decode_dma_cmd(self, reg_buf: memoryview, *, offset, subnet_id) -> BaseTpuCmd:
head = DmaHead.from_buffer(reg_buf, offset)
op_info = dma_index.get((bool(head.cmd_short), head.cmd_type, head.cmd_sp_func), None)
assert (op_info is not None), f'Unable to decode DMA code at offset {offset} out of {len(reg_buf)} total. Potential head identified as {head}'
op_clazz = op_class_dic[op_info.name]
reg = self.decode_reg(op_clazz, reg_buf, offset=offset)
buf = reg_buf[offset:(offset + (op_clazz.length // 8))]
cmd = op_info(reg, buf=buf, subnet_id=subnet_id)
return cmd
def decode_dma_cmds(self, reg_buf: memoryview, subnet_id=0, **_) -> List[BaseTpuCmd]:
offset = 0
res = []
while (offset < len(reg_buf)):
cmd = self.decode_dma_cmd(reg_buf, offset=offset, subnet_id=subnet_id)
offset += (cmd.reg.length // 8)
res.append(cmd)
if self.buf_is_end(reg_buf[offset:], cmd, dma_sys):
break
return res
def decode_tiu_cmds(self, reg_buf: memoryview, subnet_id=0, **_) -> List[BaseTpuCmd]:
offset = 0
res = []
while (offset < len(reg_buf)):
cmd = self.decode_tiu_cmd(reg_buf, offset=offset, subnet_id=subnet_id)
offset += (cmd.reg.length // 8)
res.append(cmd)
if self.buf_is_end(reg_buf[offset:], cmd, tiu_sys):
break
return res
def buf_is_end(self, reg_buf, operation: BaseTpuCmd, end_op):
is_sys = isinstance(operation.reg, end_op)
is_less_1024 = ((len(reg_buf) * 8) < 1025)
if (is_sys and is_less_1024 and (not np.any(np.frombuffer(reg_buf, np.uint8)))):
return True
return False |
def test_mrmr_regression_with_scores():
(selected_features, relevance, redundancy) = mrmr.polars.mrmr_regression(df=df_polars, K=4, target_column=target_column_regression, features=features, denominator='mean', only_same_domain=False, return_scores=True, show_progress=True)
assert (set(selected_features) == set(['some_null', 'feature_a']))
assert isinstance(relevance, pd.Series)
assert isinstance(redundancy, pd.DataFrame) |
def validate(val_loader, model, criterion, args, epoch, tb_logger):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5], prefix='Test: ')
model.eval()
with torch.no_grad():
end = time.time()
for (i, (images, target)) in enumerate(val_loader):
if (args.gpu is not None):
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
progress.display(i)
print(' * {top1.avg:.3f} {top5.avg:.3f}'.format(top1=top1, top5=top5))
if (args.gpu == 0):
tb_logger.log_value('Top1 Acc', top1.avg, epoch)
tb_logger.log_value('Top5 Acc', top5.avg, epoch)
return top1.avg |
def test_gc_head():
head = GCHead(in_channels=4, channels=4, num_classes=19)
assert (len(head.convs) == 2)
assert hasattr(head, 'gc_block')
inputs = [torch.randn(1, 4, 23, 23)]
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 23, 23)) |
def _evalcode_python(executor, code, input_type):
global_dict = gdb.parse_and_eval('PyEval_GetGlobals()')
local_dict = gdb.parse_and_eval('PyEval_GetLocals()')
if ((pointervalue(global_dict) == 0) or (pointervalue(local_dict) == 0)):
raise gdb.GdbError('Unable to find the locals or globals of the most recent Python function (relative to the selected frame).')
return executor.evalcode(code, input_type, global_dict, local_dict) |
class WSGenerator(dataset.Generator):
def __init__(self, sizes, density_alpha=1.3, rewire_alpha=2, rewire_beta=2, **kwargs):
super(WSGenerator, self).__init__(sizes, **kwargs)
self.density_alpha = density_alpha
self.rewire_alpha = rewire_alpha
self.rewire_beta = rewire_beta
def generate(self, size=None):
num_nodes = self._get_size(size)
curr_num_graphs = 0
density_alpha = self.density_alpha
density_mean = (np.log2(num_nodes) / num_nodes)
density_beta = ((density_alpha / density_mean) - density_alpha)
rewire_alpha = self.rewire_alpha
rewire_beta = self.rewire_beta
while (curr_num_graphs < 1):
k = int((np.random.beta(density_alpha, density_beta) * num_nodes))
k = max(k, 2)
p = np.random.beta(rewire_alpha, rewire_beta)
try:
graph = nx.connected_watts_strogatz_graph(num_nodes, k, p)
curr_num_graphs += 1
except:
pass
logging.debug('Generated {}-node W-S graph with average density: {}'.format(num_nodes, density_mean))
return graph |
def _get_repo(repo):
assert isinstance(repo, str)
obj = _repo_cache.get(repo)
if obj:
return obj
obj = _Repo(repo)
_repo_cache[repo] = obj
return obj |
_numpy_output(check_dtype=True)
def test_ufunc_square_f(A: dace.float32[10]):
return np.square(A) |
_paths
def parse_args(args=None, namespace=None):
parser = argparse.ArgumentParser(description='Extract audio from videos.')
parser.add_argument('-i', '--in_dir', default=pathlib.Path('data/vggsound/vggsound/'), type=pathlib.Path, help='input directory')
parser.add_argument('-o', '--out_dir', default=pathlib.Path('data/vggsound/audio/'), type=pathlib.Path, help='output directory')
parser.add_argument('-r', '--rate', default=16000, type=int, help='sampling rate')
parser.add_argument('-s', '--skip_existing', action='store_true', help='whether to skip existing outputs')
parser.add_argument('-e', '--ignore_exceptions', action='store_true', help='whether to ignore all exceptions')
parser.add_argument('-j', '--jobs', default=1, type=int, help='number of jobs')
parser.add_argument('-q', '--quiet', action='store_true', help='show warnings only')
return parser.parse_args(args=args, namespace=namespace) |
class XLMRobertaConverter(SpmConverter):
def vocab(self, proto):
vocab = [('<s>', 0.0), ('<pad>', 0.0), ('</s>', 0.0), ('<unk>', 0.0)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
vocab += [('<mask>', 0.0)]
return vocab
def unk_id(self, proto):
unk_id = 3
return unk_id
def post_processor(self):
return processors.TemplateProcessing(single='<s> $A </s>', pair='<s> $A </s> </s> $B </s>', special_tokens=[('<s>', self.original_tokenizer.convert_tokens_to_ids('<s>')), ('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))]) |
def converId(img_id):
img_id = img_id.split('-')
if ('train' in img_id[0]):
new_id = int(img_id[1])
elif ('val' in img_id[0]):
new_id = (int(img_id[1]) + 1000000)
elif ('test' in img_id[0]):
new_id = (int(img_id[1]) + 2000000)
else:
pdb.set_trace()
return new_id |
def layout():
children_list = [html.Div([html.H2('Daily proportion of women quoted'), html.Div(dcc.Markdown('\n The below charts showcase a 7-day moving average of the daily\n proportion of women quoted for each outlet since October 2018.\n The pink line indicates the linear trendline over this period.\n ')), html.Br(), dcc.Markdown('Select a start and end date from the widget below.'), html.Div(dcc.DatePickerRange(id='date-picker-range', min_date_allowed=date(2018, 10, 1), max_date_allowed=(datetime.today().date() - timedelta(days=1)), start_date=date(2018, 10, 1), end_date=(datetime.today().date() - timedelta(days=1)), initial_visible_month=datetime.today().date())), dcc.Store(id='stored-df-data'), html.Br(), html.H5('CBC News'), html.Div(dcc.Loading(id='loading-progress-1', children=[html.Div(dcc.Graph(id='cbc-news-graph'), className='chart')])), html.H5('CTV News'), html.Div(dcc.Loading(id='loading-progress-2', children=[html.Div(dcc.Graph(id='ctv-news-graph'), className='chart')])), html.H5('Global News'), html.Div(dcc.Loading(id='loading-progress-3', children=[html.Div(dcc.Graph(id='global-news-graph'), className='chart')])), html.H5('Huffington Post'), dcc.Markdown('\n HuffPost Canada stopped publishing as of March 2021, so we\n see the total number of articles for HuffPost drop to zero after\n this period.\n '), html.Div(dcc.Loading(id='loading-progress-4', children=[html.Div(dcc.Graph(id='huffington-post-graph'), className='chart')])), html.H5('National Post'), html.Div(dcc.Loading(id='loading-progress-5', children=[html.Div(dcc.Graph(id='national-post-graph'), className='chart')])), html.H5('The Globe And Mail'), html.Div(dcc.Loading(id='loading-progress-6', children=[html.Div(dcc.Graph(id='globe-and-mail-graph'), className='chart')])), html.H5('The Toronto Star'), html.Div(dcc.Loading(id='loading-progress-7', children=[html.Div(dcc.Graph(id='the-toronto-star-graph'), className='chart')]))])]
return children_list |
def convert_examples_to_features(examples, label_list, label_list_tagging, max_seq_length, tokenizer, sel_prob, train_type='train'):
label_map = {label: i for (i, label) in enumerate(label_list)}
label_tagging_map = {label: i for (i, label) in enumerate(label_list_tagging)}
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = None
tokens_b = None
if (example.text_b != 'NONE'):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, (max_seq_length - 3))
else:
label_disf_id = label_to_map(example.disf_label, label_tagging_map)
(tokens_a, disf_label) = random_word_no_prob(example.text_a, example.disf_label.strip().split(' '), label_disf_id, tokenizer)
if (len(tokens_a) > (max_seq_length - 2)):
tokens_a = tokens_a[:(max_seq_length - 2)]
disf_label = disf_label[:(max_seq_length - 2)]
if tokens_b:
tokens = ((['[CLS]'] + tokens_a) + ['[SEP]'])
segment_ids = ([0] * len(tokens))
tokens += (tokens_b + ['[SEP]'])
segment_ids += ([1] * (len(tokens_b) + 1))
label_id = label_map[example.label]
disf_label_id = ([(- 1)] * len(tokens))
else:
tokens = ((['[CLS]'] + tokens_a) + ['[SEP]'])
segment_ids = ([0] * len(tokens))
label_id = (- 1)
disf_label_id = (([(- 1)] + disf_label) + [(- 1)])
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
padding = ([0] * (max_seq_length - len(input_ids)))
padding_disf = ([(- 1)] * (max_seq_length - len(input_ids)))
input_ids += padding
input_mask += padding
segment_ids += padding
disf_label_id += padding_disf
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
assert (len(disf_label_id) == max_seq_length)
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, label_disf_id=disf_label_id))
return features |
class _PackageResourceReader():
def __init__(self, importer, fullname):
self.importer = importer
self.fullname = fullname
def open_resource(self, resource):
from io import BytesIO
return BytesIO(self.importer.load_binary(self.fullname, resource))
def resource_path(self, resource):
if (isinstance(self.importer.zip_reader, DirectoryReader) and self.importer.zip_reader.has_record(os.path.join(self.fullname, resource))):
return os.path.join(self.importer.zip_reader.directory, self.fullname, resource)
raise FileNotFoundError
def is_resource(self, name):
path = self.importer._zipfile_path(self.fullname, name)
return self.importer.zip_reader.has_record(path)
def contents(self):
from pathlib import Path
filename = self.fullname.replace('.', '/')
fullname_path = Path(self.importer._zipfile_path(self.fullname))
files = self.importer.zip_reader.get_all_records()
subdirs_seen = set()
for filename in files:
try:
relative = Path(filename).relative_to(fullname_path)
except ValueError:
continue
parent_name = relative.parent.name
if (len(parent_name) == 0):
(yield relative.name)
elif (parent_name not in subdirs_seen):
subdirs_seen.add(parent_name)
(yield parent_name) |
class MLPMergeModel(Model):
def __init__(self, output_dim, name='MLPMergeModel', hidden_sizes=(32, 32), concat_layer=(- 2), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_b_init=tf.zeros_initializer(), layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_sizes = hidden_sizes
self._concat_layer = concat_layer
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
def network_input_spec(self):
return ['input_var1', 'input_var2']
def _build(self, state_input, action_input, name=None):
del name
return mlp(input_var=state_input, output_dim=self._output_dim, hidden_sizes=self._hidden_sizes, input_var2=action_input, concat_layer=self._concat_layer, name='mlp_concat', hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, layer_normalization=self._layer_normalization) |
class LnStructured(BasePruningMethod):
PRUNING_TYPE = 'structured'
def __init__(self, amount, n, dim=(- 1)):
_validate_pruning_amount_init(amount)
self.amount = amount
self.n = n
self.dim = dim
def compute_mask(self, t, default_mask):
_validate_structured_pruning(t)
_validate_pruning_dim(t, self.dim)
tensor_size = t.shape[self.dim]
nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
nparams_tokeep = (tensor_size - nparams_toprune)
_validate_pruning_amount(nparams_toprune, tensor_size)
norm = _compute_norm(t, self.n, self.dim)
topk = torch.topk(norm, k=nparams_tokeep, largest=True)
def make_mask(t, dim, indices):
mask = torch.zeros_like(t)
slc = ([slice(None)] * len(t.shape))
slc[dim] = indices
mask[slc] = 1
return mask
if (nparams_toprune == 0):
mask = default_mask
else:
mask = make_mask(t, self.dim, topk.indices)
mask *= default_mask.to(dtype=mask.dtype)
return mask
def apply(cls, module, name, amount, n, dim, importance_scores=None):
return super(LnStructured, cls).apply(module, name, amount=amount, n=n, dim=dim, importance_scores=importance_scores) |
def get_logger(logpath, filepath, package_files=[], displaying=True, saving=True, debug=False):
logger = logging.getLogger()
if debug:
level = logging.DEBUG
else:
level = logging.INFO
logger.setLevel(level)
if saving:
info_file_handler = logging.FileHandler(logpath, mode='a')
info_file_handler.setLevel(level)
logger.addHandler(info_file_handler)
if displaying:
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
logger.addHandler(console_handler)
logger.info(filepath)
with open(filepath, 'r') as f:
logger.info(f.read())
for f in package_files:
logger.info(f)
with open(f, 'r') as package_f:
logger.info(package_f.read())
return logger |
class DirectoryLocator(Locator):
def __init__(self, path, **kwargs):
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if (not os.path.isdir(path)):
raise DistlibException(('Not a directory: %r' % path))
self.base_dir = path
def should_include(self, filename, parent):
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for (root, dirs, files) in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if (not self.recursive):
break
return result
def get_distribution_names(self):
result = set()
for (root, dirs, files) in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if (not self.recursive):
break
return result |
def get_scores_for_imputer(imputer, X_missing, y_missing):
estimator = make_pipeline(imputer, regressor)
impute_scores = cross_val_score(estimator, X_missing, y_missing, scoring='neg_mean_squared_error', cv=N_SPLITS)
return impute_scores |
def fit_str(string, colwidth=16):
if (len(string) < colwidth):
return (((colwidth - len(string)) * ' ') + string)
else:
return string[:colwidth] |
def boomerang_force_calculator(location, orientation):
gravity = np.array([0.0, 0.0, ((- 1.0) * sum(M))])
r_vectors = get_boomerang_r_vectors_15(location[0], orientation[0])
repulsion = 0.0
for k in range(len(r_vectors)):
h = r_vectors[k][2]
repulsion += np.array([0.0, 0.0, (((REPULSION_STRENGTH * (((h - A) / DEBYE_LENGTH) + 1)) * np.exp((((- 1.0) * (h - A)) / DEBYE_LENGTH))) / ((h - A) ** 2))])
return (repulsion + gravity) |
def test_nested_default_arg_reuse_2():
class MyClass():
def __call__(self, arr: dace.float64[20], qmin: float=0.0):
self.nested(arr, qmin)
def nested(self, arr: dace.float64[20], qmin: float):
arr[:] = qmin
a = MyClass()
def tester(arr: dace.float64[20], arr2: dace.float64[20], qmin: float):
a(arr, qmin=1.0)
a(arr2)
myarr = np.random.rand(20)
myarr2 = np.random.rand(20)
tester(myarr, myarr2, 2.0)
assert np.allclose(myarr, 1.0)
assert np.allclose(myarr2, 0.0) |
class AdainResBlk(nn.Module):
def __init__(self, dim_in, dim_out, style_dim=64, w_hpf=0, actv=nn.LeakyReLU(0.2), upsample='none'):
super().__init__()
self.w_hpf = w_hpf
self.actv = actv
self.upsample = UpSample(upsample)
self.learned_sc = (dim_in != dim_out)
self._build_weights(dim_in, dim_out, style_dim)
def _build_weights(self, dim_in, dim_out, style_dim=64):
self.conv1 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_out, dim_out, 3, 1, 1)
self.norm1 = AdaIN(style_dim, dim_in)
self.norm2 = AdaIN(style_dim, dim_out)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
x = self.upsample(x)
if self.learned_sc:
x = self.conv1x1(x)
return x
def _residual(self, x, s):
x = self.norm1(x, s)
x = self.actv(x)
x = self.upsample(x)
x = self.conv1(x)
x = self.norm2(x, s)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x, s):
out = self._residual(x, s)
if (self.w_hpf == 0):
out = ((out + self._shortcut(x)) / math.sqrt(2))
return out |
class SELayer(nn.Module):
def __init__(self, channels, ratio=16, conv_cfg=None, act_cfg=(dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, divisor=6.0))):
super(SELayer, self).__init__()
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert (len(act_cfg) == 2)
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(in_channels=channels, out_channels=make_divisible((channels // ratio), 8), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[0])
self.conv2 = ConvModule(in_channels=make_divisible((channels // ratio), 8), out_channels=channels, kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return (x * out) |
def get_padded_batch(file_list, batch_size, input_size, output_size, num_enqueuing_threads=4, num_epochs=1, shuffle=True):
file_queue = tf.train.string_input_producer(file_list, num_epochs=num_epochs, shuffle=shuffle)
reader = tf.TFRecordReader()
(_, serialized_example) = reader.read(file_queue)
sequence_features = {'inputs': tf.FixedLenSequenceFeature(shape=[input_size], dtype=tf.float32), 'labels': tf.FixedLenSequenceFeature(shape=[output_size], dtype=tf.float32), 'genders': tf.FixedLenSequenceFeature(shape=[2], dtype=tf.float32)}
(_, sequence) = tf.parse_single_sequence_example(serialized_example, sequence_features=sequence_features)
length = tf.shape(sequence['inputs'])[0]
capacity = (1000 + ((num_enqueuing_threads + 1) * batch_size))
queue = tf.PaddingFIFOQueue(capacity=capacity, dtypes=[tf.float32, tf.float32, tf.float32, tf.int32], shapes=[(None, input_size), (None, output_size), (1, 2), ()])
enqueue_ops = ([queue.enqueue([sequence['inputs'], sequence['labels'], sequence['genders'], length])] * num_enqueuing_threads)
tf.train.add_queue_runner(tf.train.QueueRunner(queue, enqueue_ops))
return queue.dequeue_many(batch_size) |
class _grid_encode(Function):
_fwd
def forward(ctx, inputs, embeddings, offsets, per_level_scale, base_resolution, calc_grad_inputs=False, gridtype=0, align_corners=False, interpolation=0):
inputs = inputs.contiguous()
(B, D) = inputs.shape
L = (offsets.shape[0] - 1)
C = embeddings.shape[1]
S = np.log2(per_level_scale)
H = base_resolution
if (torch.is_autocast_enabled() and ((C % 2) == 0)):
embeddings = embeddings.to(torch.half)
outputs = torch.empty(L, B, C, device=inputs.device, dtype=embeddings.dtype)
if calc_grad_inputs:
dy_dx = torch.empty(B, ((L * D) * C), device=inputs.device, dtype=embeddings.dtype)
else:
dy_dx = None
_backend.grid_encode_forward(inputs, embeddings, offsets, outputs, B, D, C, L, S, H, dy_dx, gridtype, align_corners, interpolation)
outputs = outputs.permute(1, 0, 2).reshape(B, (L * C))
ctx.save_for_backward(inputs, embeddings, offsets, dy_dx)
ctx.dims = [B, D, C, L, S, H, gridtype, interpolation]
ctx.align_corners = align_corners
return outputs
_bwd
def backward(ctx, grad):
(inputs, embeddings, offsets, dy_dx) = ctx.saved_tensors
(B, D, C, L, S, H, gridtype, interpolation) = ctx.dims
align_corners = ctx.align_corners
grad = grad.view(B, L, C).permute(1, 0, 2).contiguous()
grad_embeddings = torch.zeros_like(embeddings)
if (dy_dx is not None):
grad_inputs = torch.zeros_like(inputs, dtype=embeddings.dtype)
else:
grad_inputs = None
_backend.grid_encode_backward(grad, inputs, embeddings, offsets, grad_embeddings, B, D, C, L, S, H, dy_dx, grad_inputs, gridtype, align_corners, interpolation)
if (dy_dx is not None):
grad_inputs = grad_inputs.to(inputs.dtype)
return (grad_inputs, grad_embeddings, None, None, None, None, None, None, None) |
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
if (input_nc is None):
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv]
model = ((down + [submodule]) + up)
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = (down + up)
else:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = (((down + [submodule]) + up) + [nn.Dropout(0.5)])
else:
model = ((down + [submodule]) + up)
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1) |
class BoundingBox():
def __init__(self, image_name, class_id=None, coordinates=None, type_coordinates=CoordinatesType.ABSOLUTE, img_size=None, bb_type=BBType.GROUND_TRUTH, confidence=None, format=BBFormat.XYWH):
self._image_name = image_name
self._type_coordinates = type_coordinates
self._confidence = confidence
self._class_id = class_id
self._format = format
if ((bb_type == BBType.DETECTED) and (confidence is None)):
raise IOError("For bb_type='Detected', it is necessary to inform the confidence value.")
self._bb_type = bb_type
if (img_size is None):
self._width_img = None
self._height_img = None
else:
self._width_img = img_size[0]
self._height_img = img_size[1]
self.set_coordinates(coordinates, img_size=img_size, type_coordinates=self._type_coordinates)
def set_coordinates(self, coordinates, type_coordinates, img_size=None):
self._type_coordinates = type_coordinates
if ((type_coordinates == CoordinatesType.RELATIVE) and (img_size is None)):
raise IOError("Parameter 'img_size' is required. It is necessary to inform the image size.")
if (type_coordinates == CoordinatesType.RELATIVE):
self._width_img = img_size[0]
self._height_img = img_size[1]
if (self._format == BBFormat.XYWH):
(self._x, self._y, self._w, self._h) = convert_to_absolute_values(img_size, coordinates)
self._x2 = self._w
self._y2 = self._h
self._w = (self._x2 - self._x)
self._h = (self._y2 - self._y)
elif (self._format == BBFormat.XYX2Y2):
(x1, y1, x2, y2) = coordinates
self._x = round((x1 * self._width_img))
self._x2 = round((x2 * self._width_img))
self._y = round((y1 * self._height_img))
self._y2 = round((y2 * self._height_img))
self._w = (self._x2 - self._x)
self._h = (self._y2 - self._y)
else:
raise IOError('For relative coordinates, the format must be XYWH (x,y,width,height)')
else:
self._x = coordinates[0]
self._y = coordinates[1]
if (self._format == BBFormat.XYWH):
self._w = coordinates[2]
self._h = coordinates[3]
self._x2 = (self._x + self._w)
self._y2 = (self._y + self._h)
else:
self._x2 = coordinates[2]
self._y2 = coordinates[3]
self._w = (self._x2 - self._x)
self._h = (self._y2 - self._y)
self._x = float(self._x)
self._y = float(self._y)
self._w = float(self._w)
self._h = float(self._h)
self._x2 = float(self._x2)
self._y2 = float(self._y2)
def get_absolute_bounding_box(self, format=BBFormat.XYWH):
if (format == BBFormat.XYWH):
return (self._x, self._y, self._w, self._h)
elif (format == BBFormat.XYX2Y2):
return (self._x, self._y, self._x2, self._y2)
def get_relative_bounding_box(self, img_size=None):
if ((img_size is None) and (self._width_img is None) and (self._height_img is None)):
raise IOError("Parameter 'img_size' is required. It is necessary to inform the image size.")
if (img_size is not None):
return convert_to_relative_values((img_size[0], img_size[1]), (self._x, self._x2, self._y, self._y2))
else:
return convert_to_relative_values((self._width_img, self._height_img), (self._x, self._x2, self._y, self._y2))
def get_image_name(self):
return self._image_name
def get_confidence(self):
return self._confidence
def get_format(self):
return self._format
def set_class_id(self, class_id):
self._class_id = class_id
def set_bb_type(self, bb_type):
self._bb_type = bb_type
def get_class_id(self):
return self._class_id
def get_image_size(self):
return (self._width_img, self._height_img)
def get_area(self):
assert isclose((self._w * self._h), ((self._x2 - self._x) * (self._y2 - self._y)))
assert (self._x2 > self._x)
assert (self._y2 > self._y)
return (((self._x2 - self._x) + 1) * ((self._y2 - self._y) + 1))
def get_coordinates_type(self):
return self._type_coordinates
def get_bb_type(self):
return self._bb_type
def __str__(self):
abs_bb_xywh = self.get_absolute_bounding_box(format=BBFormat.XYWH)
abs_bb_xyx2y2 = self.get_absolute_bounding_box(format=BBFormat.XYX2Y2)
area = self.get_area()
return f'''image name: {self._image_name}
image size: {self.get_image_size()}
class: {self._class_id}
bb (XYWH): {abs_bb_xywh}
bb (X1Y1X2Y2): {abs_bb_xyx2y2}
area: {area}
bb_type: {self._bb_type}'''
def __eq__(self, other):
if (not isinstance(other, BoundingBox)):
return False
return (str(self) == str(other))
def compare(det1, det2):
det1BB = det1.getAbsoluteBoundingBox()
det1img_size = det1.getImageSize()
det2BB = det2.getAbsoluteBoundingBox()
det2img_size = det2.getImageSize()
if ((det1.get_class_id() == det2.get_class_id()) and (det1.get_confidence() == det2.get_confidence()) and (det1BB[0] == det2BB[0]) and (det1BB[1] == det2BB[1]) and (det1BB[2] == det2BB[2]) and (det1BB[3] == det2BB[3]) and (det1img_size[0] == det1img_size[0]) and (det2img_size[1] == det2img_size[1])):
return True
return False
def clone(bounding_box):
absBB = bounding_box.get_absolute_bounding_box(format=BBFormat.XYWH)
new_bounding_box = BoundingBox(bounding_box.get_image_name(), bounding_box.get_class_id(), absBB[0], absBB[1], absBB[2], absBB[3], type_coordinates=bounding_box.getCoordinatesType(), img_size=bounding_box.getImageSize(), bb_type=bounding_box.getbb_type(), confidence=bounding_box.getConfidence(), format=BBFormat.XYWH)
return new_bounding_box
def iou(boxA, boxB):
coords_A = boxA.get_absolute_bounding_box(format=BBFormat.XYX2Y2)
coords_B = boxB.get_absolute_bounding_box(format=BBFormat.XYX2Y2)
if (BoundingBox.have_intersection(coords_A, coords_B) is False):
return 0
interArea = BoundingBox.get_intersection_area(coords_A, coords_B)
union = BoundingBox.get_union_areas(boxA, boxB, interArea=interArea)
iou = (interArea / union)
assert (iou >= 0)
return iou
def have_intersection(boxA, boxB):
if isinstance(boxA, BoundingBox):
boxA = boxA.get_absolute_bounding_box(BBFormat.XYX2Y2)
if isinstance(boxB, BoundingBox):
boxB = boxB.get_absolute_bounding_box(BBFormat.XYX2Y2)
if (boxA[0] > boxB[2]):
return False
if (boxB[0] > boxA[2]):
return False
if (boxA[3] < boxB[1]):
return False
if (boxA[1] > boxB[3]):
return False
return True
def get_intersection_area(boxA, boxB):
if isinstance(boxA, BoundingBox):
boxA = boxA.get_absolute_bounding_box(BBFormat.XYX2Y2)
if isinstance(boxB, BoundingBox):
boxB = boxB.get_absolute_bounding_box(BBFormat.XYX2Y2)
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
return (((xB - xA) + 1) * ((yB - yA) + 1))
def get_union_areas(boxA, boxB, interArea=None):
area_A = boxA.get_area()
area_B = boxB.get_area()
if (interArea is None):
interArea = BoundingBox.get_intersection_area(boxA, boxB)
return float(((area_A + area_B) - interArea))
def get_amount_bounding_box_all_classes(bounding_boxes, reverse=False):
classes = list(set([bb._class_id for bb in bounding_boxes]))
ret = {}
for c in classes:
ret[c] = len(BoundingBox.get_bounding_box_by_class(bounding_boxes, c))
ret = {k: v for (k, v) in sorted(ret.items(), key=(lambda item: item[1]), reverse=reverse)}
return ret
def get_bounding_box_by_class(bounding_boxes, class_id):
return [bb for bb in bounding_boxes if (bb.get_class_id() == class_id)]
def get_bounding_boxes_by_image_name(bounding_boxes, image_name):
return [bb for bb in bounding_boxes if (bb.get_image_name() == image_name)]
def get_total_images(bounding_boxes):
return len(list(set([bb.get_image_name() for bb in bounding_boxes])))
def get_average_area(bounding_boxes):
areas = [bb.get_area() for bb in bounding_boxes]
return (sum(areas) / len(areas)) |
def set_separate_embeddings(model, mapping):
model.set_input_embeddings(MyEmbedding2(model.transformer.wte, mapping)) |
class SemEvalHook(Hook):
def __init__(self, batcher, placeholders, at_every_epoch):
self.batcher = batcher
self.placeholders = placeholders
self.at_every_epoch = at_every_epoch
def __call__(self, sess, epoch, iteration, model, loss):
if ((iteration == 0) and ((epoch % self.at_every_epoch) == 0)):
total = 0
correct = 0
truth_all = []
pred_all = []
for values in self.batcher:
total += len(values[(- 1)])
feed_dict = {}
for i in range(0, len(self.placeholders)):
feed_dict[self.placeholders[i]] = values[i]
truth = np.argmax(values[(- 1)], 1)
predicted = sess.run(tf.arg_max(tf.nn.softmax(model), 1), feed_dict=feed_dict)
correct += sum((truth == predicted))
truth_all.extend(truth)
pred_all.extend(predicted)
print(classification_report(truth_all, pred_all, target_names=['NONE', 'AGAINST', 'FAVOR'], digits=4)) |
def test_ngrams_for_evaluation():
from speechbrain.lm.counting import ngrams_for_evaluation
assert (list(ngrams_for_evaluation(['a', 'b', 'c'], max_n=3)) == [('b', ('a',)), ('c', ('a', 'b'))])
assert (list(ngrams_for_evaluation(['a', 'b', 'c'], max_n=3, predict_first=True)) == [('a', ()), ('b', ('a',)), ('c', ('a', 'b'))]) |
def test_hist():
hist = glob.glob('test_trainer_outputs/history.npy')
assert (len(hist) == 1) |
def execute_onnx(model, input_dict, return_full_exec_context=False, start_node=None, end_node=None):
if (not model.check_all_tensor_shapes_specified()):
raise Exception('Found unspecified tensor shapes, try infer_shapes')
ret = model.analysis(ta.nodes_topologically_sorted)
assert (ret['nodes_topologically_sorted'] is True), 'Nodes must be\n topologically sorted.'
graph = model.graph
execution_context = model.make_empty_exec_context()
for inp_name in input_dict.keys():
if (inp_name in execution_context):
if (execution_context[inp_name].shape == input_dict[inp_name].shape):
execution_context[inp_name] = input_dict[inp_name]
else:
raise Exception(('Shape mismatch for provided input %s: found %s expected %s ' % (inp_name, str(execution_context[inp_name].shape), str(input_dict[inp_name].shape))))
model_exec_mode = model.get_metadata_prop('exec_mode')
if ((model_exec_mode is None) or (model_exec_mode == '')):
opset_version = model.model.opset_import[0].version
subgraph = []
if (start_node is None):
start_node = model.graph.node[0]
if (end_node is None):
end_node = model.graph.node[(- 1)]
start_ind = model.get_node_index(start_node)
end_ind = (model.get_node_index(end_node) + 1)
assert (end_ind >= start_ind), 'Start/end nodes must define valid subgraph'
subgraph = graph.node[start_ind:end_ind]
for node in subgraph:
if (get_sanitize_quant_tensors() != 0):
execution_context = sanitize_quant_values(model, node.input, execution_context)
execute_node(node, execution_context, graph, return_full_exec_context, opset_version)
if (get_sanitize_quant_tensors() != 0):
execution_context = sanitize_quant_values(model, node.output, execution_context)
elif (model_exec_mode == 'remote_pynq'):
remote_exec(model, execution_context)
elif (model_exec_mode == 'rtlsim'):
rtlsim_exec(model, execution_context)
else:
raise Exception('Metadata property "exec_mode" is set to an unknown value.\n Can be left unset or has to be set to "remote_pynq" for remote execution\n on PYNQ board or "rtlsim" for execution using pyverilator!')
if return_full_exec_context:
return execution_context
else:
output_dict = dict()
for out_tensor in graph.output:
out_name = out_tensor.name
output_dict[out_name] = execution_context[out_name]
return output_dict |
def normalize_args_vectorspace(*args, **kwds):
from sage.rings.integer_ring import ZZ
if (len(args) == 1):
V = args[0]
try:
degree = V.dimension_relative()
except AttributeError:
degree = V.dimension()
ring = V.base_ring()
if (len(args) == 2):
(degree, ring) = args
try:
ring = ZZ(ring)
from sage.rings.finite_rings.finite_field_constructor import FiniteField
var = kwds.get('var', 'a')
ring = FiniteField(ring, var)
except (ValueError, TypeError):
pass
return (ZZ(degree), ring) |
def test_conv1d_same_out():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.conv = rf.Conv1d(in_dim, in_dim, 4, padding='same')
def __call__(self, x: rf.Tensor) -> Tensor:
(x, _) = self.conv(x, in_spatial_dim=time_dim)
return x
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step) |
def make_cc_vector(sequence_list, lag, phyche_value, k):
phyche_values = list(phyche_value.values())
len_phyche_value = len(phyche_values[0])
vec_cc = []
for sequence in sequence_list:
len_seq = len(sequence)
each_vec = []
for temp_lag in range(1, (lag + 1)):
for i1 in range(len_phyche_value):
for i2 in range(len_phyche_value):
if (i1 != i2):
ave_phyche_value1 = 0.0
ave_phyche_value2 = 0.0
for j in range((((len_seq - temp_lag) - k) + 1)):
nucleotide = sequence[j:(j + k)]
ave_phyche_value1 += float(phyche_value[nucleotide][i1])
ave_phyche_value2 += float(phyche_value[nucleotide][i2])
ave_phyche_value1 /= len_seq
ave_phyche_value2 /= len_seq
temp_sum = 0.0
for j in range((((len_seq - temp_lag) - k) + 1)):
nucleotide1 = sequence[j:(j + k)]
nucleotide2 = sequence[(j + temp_lag):((j + temp_lag) + k)]
temp_sum += ((float(phyche_value[nucleotide1][i1]) - ave_phyche_value1) * (float(phyche_value[nucleotide2][i2]) - ave_phyche_value2))
each_vec.append(round((temp_sum / (((len_seq - temp_lag) - k) + 1)), 3))
vec_cc.append(each_vec)
return vec_cc |
def _default_template_ctx_processor():
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if (appctx is not None):
rv['g'] = appctx.g
if (reqctx is not None):
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv |
def parse_args():
parser = argparse.ArgumentParser(description='None')
parser.add_argument('--config', help='config file path')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--phase', choices=['test', 'train'], default='test')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument('--load_from', default=None, help='the checkpoint file to load from')
parser.add_argument('--resume_from', default=None, help='the checkpoint file to resume from')
parser.add_argument('--gpus', type=int, default=1, help='number of gpus(only applicable to non-distributed training)')
parser.add_argument('--random_conns', action='store_true', default=False)
parser.add_argument('--distributed', action='store_true', default=False)
parser.add_argument('--eval_interim', action='store_true', default=False)
parser.add_argument('--save_output', action='store_true', default=False)
parser.add_argument('--no_cuda', action='store_true', default=False)
parser.add_argument('--force', action='store_true', default=False)
parser.add_argument('--k_num2', type=str, default='60')
parser.add_argument('--k_num3', type=str, default='100')
args = parser.parse_args()
return args |
class Runner():
def __init__(self):
self.t0 = time()
self.hours_pretrained = 0
if c.TEST_ONLY:
print('TESTING ONLY')
self.tst_file_list = []
for data_dir in c.TEST_DIRS:
self.tst_file_list += sorted(glob.glob((data_dir + '/**/*.wav'), recursive=True))
else:
print('Generator: {}'.format(c.MODEL))
print('Training filter(s): {}'.format(str(c.FILTERS_TRAIN)))
print('Unseen validation filter(s): {}'.format(str(c.FILTERS_VALID)))
trn_file_list = []
for data_dir in c.TRAIN_DIRS:
trn_file_list += sorted(glob.glob((data_dir + '/**/*.wav'), recursive=True))
n_songs_valid = min(c.N_SONGS_VALID, (len(trn_file_list) - 1))
self.val_file_list = trn_file_list[(- n_songs_valid):]
trn_file_list = trn_file_list[:(- n_songs_valid)]
trn_dataset = DatasetAudio(trn_file_list, c.SAMPLE_LEN, c.CUTOFF, c.FILTERS_TRAIN)
collate_fn = filter_collate
self.trn_loader = DataLoader(trn_dataset, batch_size=c.BATCH_SIZE, shuffle=True, num_workers=c.NUM_WORKERS, collate_fn=collate_fn)
self.gen_model = GenModel(batchnorm=c.BATCHNORM, dropout=c.DROPOUT).to(c.DEVICE)
self.gen_optimizer = torch.optim.Adam(self.gen_model.parameters(), lr=c.LEARNING_RATE)
if c.ADAPTIVE_LR:
if ('loss' in c.METRIC_TRAIN):
scheduler_mode = 'min'
elif ('snr' in c.METRIC_TRAIN):
scheduler_mode = 'max'
self.gen_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.gen_optimizer, mode=scheduler_mode, factor=c.LR_FACTOR, threshold=1e-06, verbose=False, patience=c.PATIENCE, cooldown=c.PATIENCE)
self.mse_loss = torch.nn.MSELoss().to(c.DEVICE)
self.iter_total = 0
self.iter_val = c.ITER_VAL
self.first_epoch = True
if c.LOAD_MODEL:
self.load_model()
else:
self.gen_model.apply(u.weights_init_normal)
def train(self):
self.gen_model.train()
train_averager = u.MovingAverages()
while ((self.iter_total < c.MAX_ITER) and (self.gen_optimizer.param_groups[0]['lr'] > c.MIN_LR)):
for (x, t) in self.trn_loader:
x = x.to(c.DEVICE)
t = t.to(c.DEVICE)
self.gen_optimizer.zero_grad()
y = self.gen_model(x)
gen_loss = self.mse_loss(y, t)
gen_loss.backward()
self.gen_optimizer.step()
with torch.no_grad():
train_snr = u.snr_torch(y, t)
train_averager({'gen_loss': gen_loss, 'snr': train_snr})
self.iter_total += 1
if ((self.iter_total % self.iter_val) == 0):
train_performance = train_averager.get()
train_averager.reset()
if c.VALID:
val_seen_performance = self.run_songs(c.FILTERS_TRAIN, 'valid')
val_unseen_performance = self.run_songs(c.FILTERS_VALID, 'valid')
self.print_performance(train_performance, val_seen_performance, val_unseen_performance)
if c.SAVE_MODEL:
self.save_model()
lr_old = self.gen_optimizer.param_groups[0]['lr']
if c.ADAPTIVE_LR:
self.gen_scheduler.step(train_performance['gen_loss'])
lr_new = self.gen_optimizer.param_groups[0]['lr']
if (lr_new != lr_old):
print('New learning rate: {:.1e}'.format(lr_new))
self.first_epoch = False
def run_songs(self, filters, mode):
self.gen_model.eval()
averager = u.MovingAverages()
output = True
overwrite = True
if (mode is 'valid'):
song_list = self.val_file_list
duration = c.DURATION_VALID
start = c.START_VALID
elif (mode is 'test'):
song_list = self.tst_file_list
duration = c.DURATION_TEST
start = 0
else:
raise ValueError('Mode can be valid or test')
with torch.no_grad():
for (i, song_path) in enumerate(song_list):
if (len(filters) == 1):
filter_ = filters[0]
else:
filter_ = filters[i]
results = self.run_single_song(song_path, filter_, c.CUTOFF, duration=duration, start=start, save=output, overwrite=overwrite)
averager(results)
performance = averager.get()
averager.reset()
return performance
def save_model(self):
data = {'iteration': self.iter_total, 'hours': self.hours_pretrained, 'gen_model_state_dict': self.gen_model.state_dict(), 'gen_optimizer_state_dict': self.gen_optimizer.state_dict()}
torch.save(data, os.path.join(c.MODEL_DIR, (c.SAVE_NAME + '.pt')))
def load_model(self):
checkpoint = torch.load(os.path.join(c.MODEL_DIR, c.LOAD_MODEL), map_location=c.DEVICE)
self.iter_init = checkpoint['iteration']
self.iter_total = checkpoint['iteration']
self.hours_pretrained = checkpoint['hours']
self.gen_model.load_state_dict(checkpoint['gen_model_state_dict'])
self.gen_optimizer.load_state_dict(checkpoint['gen_optimizer_state_dict'])
print('Model loaded from {}'.format(os.path.join(c.MODEL_DIR, c.LOAD_MODEL)))
if c.OVERWRITE_LR:
for param_group in self.gen_optimizer.param_groups:
param_group['lr'] = c.LEARNING_RATE
message = 'Learning rate set to {:.2e}'.format(c.LEARNING_RATE)
print(message)
def run_single_song(self, hq_path, filter_, cutoff, duration=None, start=0, save=True, overwrite=True):
self.gen_model.eval()
with torch.no_grad():
song_data = SingleSong(c.WAV_SAMPLE_LEN, filter_, hq_path, cutoff=cutoff, duration=duration, start=start)
song_loader = DataLoader(song_data, batch_size=c.WAV_BATCH_SIZE, shuffle=False, num_workers=c.NUM_WORKERS)
y_full = song_data.preallocate()
song_averager = u.MovingAverages()
idx_start_chunk = 0
for (x, t) in song_loader:
x = x.to(c.DEVICE)
t = t.to(c.DEVICE)
y = self.gen_model(x)
loss = F.mse_loss(y, t)
song_averager({'loss': loss})
idx_end_chunk = (idx_start_chunk + y.shape[0])
y_full[idx_start_chunk:idx_end_chunk] = y
idx_start_chunk = idx_end_chunk
y_full = u.torch2np(y_full)
y_full = np.concatenate(y_full, axis=(- 1))
(x_full, t_full) = song_data.get_full_signals()
y_full = np.clip(y_full, (- 1), (1 - np.finfo(np.float32).eps))
performance = song_averager.get()
song_averager.reset()
snr_ = u.snr(y_full, t_full)
performance.update({'snr': snr_})
if self.first_epoch:
snr_input = u.snr(x_full, t_full)
performance.update({'input_snr': snr_input})
if save:
song_name = hq_path.split('/')[(- 1)].split('.')[0]
if ('mixture' in song_name):
song_name = hq_path.split('/')[(- 2)]
problem_str = [' - ', ' & ', ' &', "'", ' ']
for s in problem_str:
song_name = song_name.replace(s, '_')
if (not overwrite):
song_name = ((u.pad_str_zeros(str(self.iter_total), 7) + '_') + song_name)
wavfile.write(os.path.join(c.GENERATION_DIR, (((song_name + '_') + filter_[0]) + '.wav')), c.SAMPLE_RATE, y_full.T)
return performance
def print_performance(self, train_performance, val_seen_performance, val_unseen_performance):
seconds = int((time() - self.t0))
hours = (seconds / 3600.0)
hours += self.hours_pretrained
msg = ''
if self.first_epoch:
msg += 'Validation input SNRs | Seen filter: {:6.2f} | Unseen filter: {:6.2f}\n'.format(val_seen_performance['input_snr'], val_unseen_performance['input_snr'])
msg += '{:24s} | SNR: {:6.2f} | Gen loss: {:7.2e}'.format('Training', train_performance['snr'], train_performance['gen_loss'])
msg += '\n{:24s} | SNR: {:6.2f} | Gen loss: {:7.2e}'.format('Validation | Seen filter', val_seen_performance['snr'], val_seen_performance['loss'])
msg += ' | Unseen filter | SNR: {:6.2f} | Gen loss: {:7.2e}'.format(val_unseen_performance['snr'], val_unseen_performance['loss'])
msg += ' | Iterations:{:7d} | Elapsed:{:3.1f}h\n'.format(self.iter_total, hours)
print(msg)
def run(self):
if (not c.TEST_ONLY):
self.train()
else:
for filter_ in c.FILTERS_TEST:
tst_performance = self.run_songs([filter_], 'test')
minutes = ((time() - self.t0) / 60.0)
print('Filter: {:2.0f}th order {:8s} | SNRs: | Input: {:6.2f} | Output: {:6.2f} | Elapsed: {:4.1f}m'.format(filter_[1], filter_[0], tst_performance['input_snr'], tst_performance['snr'], minutes)) |
def edit_distance(str1, str2):
try:
import Levenshtein
d = (Levenshtein.distance(str1, str2) / float(max(len(str1), len(str2))))
except:
d = (1.0 - SequenceMatcher((lambda x: (x == ' ')), str1, str2).ratio())
return d |
def normal_prior(prior_std):
def prior_fn(dtype, shape, name, trainable, add_variable_fn):
tfd = tfp.distributions
dist = tfd.Normal(loc=tf.zeros(shape, dtype), scale=dtype.as_numpy_dtype(prior_std))
batch_ndims = tf.size(input=dist.batch_shape_tensor())
return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
return prior_fn |
class RecencyWeightedVariance(Variance):
mean_class = ExponentialMovingAverage
def __init__(self, recency_weight: float, **kwargs):
super().__init__(**kwargs)
self.recency_weight = recency_weight
def recency_weight(self):
return self._recency_weight
_weight.setter
def recency_weight(self, weight: float):
assert (0.0 < weight <= 1.0)
self._recency_weight = weight
self.ex.recency_weight = weight
self.ex2.recency_weight = weight
def drop(self, x):
pass |
def Cyclic(R, n=None, homog=False, singular=None):
from .rational_field import RationalField
if n:
if (n > R.ngens()):
raise ArithmeticError('n must be <= R.ngens()')
else:
n = R.ngens()
if (singular is None):
from sage.interfaces.singular import singular as singular_default
singular = singular_default
singular.lib('polylib')
R2 = R.change_ring(RationalField())
R2._singular_().set_ring()
if (not homog):
I = singular.cyclic(n)
else:
I = singular.cyclic(n).homog(R2.gen((n - 1)))
return R2.ideal(I).change_ring(R) |
class AdvContrastiveNMT(nn.Module):
def __init__(self, args):
super(AdvContrastiveNMT, self).__init__()
self.tau = args.tau
self.pos_eps = args.pos_eps
self.neg_eps = args.neg_eps
self.t5_model = T5ForConditionalGeneration.from_pretrained(args.t5_model)
self.projection = nn.Sequential(nn.Linear(args.hidden_size, args.hidden_size), nn.ReLU())
def forward(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, lm_labels, adv=False):
encoder = self.t5_model.get_encoder()
decoder = self.t5_model.get_decoder()
encoder_outputs = encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=None, head_mask=None)
hidden_states = encoder_outputs[0]
decoder_outputs = decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=None, past_key_value_states=None, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=None, use_cache=None)
sequence_output = decoder_outputs[0]
sequence_output = (sequence_output * (self.t5_model.model_dim ** (- 0.5)))
lm_logits = self.t5_model.lm_head(sequence_output)
decoder_outputs = ((lm_logits,) + decoder_outputs[1:])
vocab_size = lm_logits.size((- 1))
criterion = nn.CrossEntropyLoss(ignore_index=(- 100))
nll = criterion(lm_logits.view((- 1), vocab_size), lm_labels.view((- 1)))
if adv:
proj_enc_h = self.projection(hidden_states)
proj_dec_h = self.projection(sequence_output)
avg_doc = self.avg_pool(proj_enc_h, attention_mask)
avg_abs = self.avg_pool(proj_dec_h, decoder_attention_mask)
cos = nn.CosineSimilarity(dim=(- 1))
cont_crit = nn.CrossEntropyLoss()
sim_matrix = cos(avg_doc.unsqueeze(1), avg_abs.unsqueeze(0))
perturbed_dec = self.generate_adv(sequence_output, lm_labels)
batch_size = input_ids.size(0)
proj_pert_dec_h = self.projection(perturbed_dec)
avg_pert = self.avg_pool(proj_pert_dec_h, decoder_attention_mask)
adv_sim = cos(avg_doc, avg_pert).unsqueeze(1)
pos_dec_hidden = self.generate_cont_adv(hidden_states, attention_mask, sequence_output, decoder_attention_mask, lm_logits, self.tau, self.pos_eps)
avg_pos_dec = self.avg_pool(self.projection(pos_dec_hidden), decoder_attention_mask)
pos_sim = cos(avg_doc, avg_pos_dec).unsqueeze((- 1))
logits = (torch.cat([sim_matrix, adv_sim], 1) / self.tau)
identity = torch.eye(batch_size, device=input_ids.device)
pos_sim = (identity * pos_sim)
neg_sim = sim_matrix.masked_fill((identity == 1), 0)
new_sim_matrix = (pos_sim + neg_sim)
new_logits = torch.cat([new_sim_matrix, adv_sim], 1)
labels = torch.arange(batch_size, device=input_ids.device)
cont_loss = cont_crit(logits, labels)
new_cont_loss = cont_crit(new_logits, labels)
cont_loss = (0.5 * (cont_loss + new_cont_loss))
return (nll, cont_loss)
else:
return nll
def generate_adv(self, dec_hiddens, lm_labels):
dec_hiddens = dec_hiddens.detach()
dec_hiddens.requires_grad = True
lm_logits = self.t5_model.lm_head(dec_hiddens)
criterion = nn.CrossEntropyLoss(ignore_index=(- 100))
loss = criterion(lm_logits.view((- 1), lm_logits.size((- 1))), lm_labels.view((- 1)))
loss.backward()
dec_grad = dec_hiddens.grad.detach()
l2_norm = torch.norm(dec_grad, dim=(- 1))
dec_grad /= (l2_norm.unsqueeze((- 1)) + 1e-12)
perturbed_dec = (dec_hiddens + (self.neg_eps * dec_grad.detach()))
perturbed_dec = perturbed_dec
self.zero_grad()
return perturbed_dec
def generate_cont_adv(self, enc_hiddens, enc_mask, dec_hiddens, dec_mask, lm_logits, tau, eps):
enc_hiddens = enc_hiddens.detach()
dec_hiddens = dec_hiddens.detach()
lm_logits = lm_logits.detach()
dec_hiddens.requires_grad = True
avg_enc = self.avg_pool(self.projection(enc_hiddens), enc_mask)
avg_dec = self.avg_pool(self.projection(dec_hiddens), dec_mask)
cos = nn.CosineSimilarity(dim=(- 1))
logits = (cos(avg_enc.unsqueeze(1), avg_dec.unsqueeze(0)) / tau)
cont_crit = nn.CrossEntropyLoss()
labels = torch.arange(avg_enc.size(0), device=enc_hiddens.device)
loss = cont_crit(logits, labels)
loss.backward()
dec_grad = dec_hiddens.grad.detach()
l2_norm = torch.norm(dec_grad, dim=(- 1))
dec_grad /= (l2_norm.unsqueeze((- 1)) + 1e-12)
perturb_dec_hidden = (dec_hiddens + (eps * dec_grad))
perturb_dec_hidden = perturb_dec_hidden.detach()
perturb_dec_hidden.requires_grad = True
perturb_logits = self.t5_model.lm_head(perturb_dec_hidden)
true_probs = F.softmax(lm_logits, (- 1))
true_probs = (true_probs * dec_mask.unsqueeze((- 1)).float())
perturb_log_probs = F.log_softmax(perturb_logits, (- 1))
kl_crit = nn.KLDivLoss(reduction='sum')
vocab_size = lm_logits.size((- 1))
kl = kl_crit(perturb_log_probs.view((- 1), vocab_size), true_probs.view((- 1), vocab_size))
kl = (kl / torch.sum(dec_mask).float())
kl.backward()
kl_grad = perturb_dec_hidden.grad.detach()
l2_norm = torch.norm(kl_grad, dim=(- 1))
kl_grad /= (l2_norm.unsqueeze((- 1)) + 1e-12)
perturb_dec_hidden = (perturb_dec_hidden - (eps * kl_grad))
return perturb_dec_hidden
def avg_pool(self, hidden_states, mask):
length = torch.sum(mask, 1, keepdim=True).float()
mask = mask.unsqueeze(2)
hidden = hidden_states.masked_fill((mask == 0), 0.0)
avg_hidden = (torch.sum(hidden, 1) / length)
return avg_hidden |
def main():
save_dir = f'exp/{args.probe_type}/{args.eval_dataset}/{args.framework}_{args.text_type}_{args.text_rep}/{args.lr}_{args.batch_size}'
(pretrain_model, _, config) = get_model(args)
(task_type, output_dim, loss_fn) = get_cls_config(args)
model = CLSLayer(audio_encoder=pretrain_model.audio_encoder, audio_dim=256, output_dim=output_dim, task_type=task_type, probe_type=args.probe_type, dropout=args.dropout, loss_fn=loss_fn)
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
pretrained_object = torch.load(f'{save_dir}/best.pth', map_location='cpu')
state_dict = pretrained_object['state_dict']
model.load_state_dict(state_dict)
test_loader = get_dataloader(args=args, split='TEST')
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
cudnn.benchmark = True
model.eval()
(predictions, groudturths) = ([], [])
for batch in tqdm(test_loader):
x = batch['audio']
y = batch['binary']
if (args.gpu is not None):
x = x.cuda(args.gpu, non_blocking=True)
y = y.cuda(args.gpu, non_blocking=True)
with torch.no_grad():
predict = model.test_forward(x)
predictions.append(predict.mean(0, True).detach().cpu())
groudturths.append(y.detach().cpu())
logits = torch.cat(predictions, dim=0)
targets = torch.cat(groudturths, dim=0)
if (args.eval_dataset in ['fma', 'gtzan', 'emotify']):
results = get_evaluation(targets.numpy(), logits.numpy(), test_loader.dataset.list_of_label, 'multiclass')
else:
results = get_evaluation(targets.numpy(), logits.numpy(), test_loader.dataset.list_of_label, 'multilabel')
with open(os.path.join(save_dir, f'results.json'), mode='w') as io:
json.dump(results, io, indent=4) |
_module()
class mit_b1(MixVisionTransformer):
def __init__(self, **kwargs):
super(mit_b1, self).__init__(patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], **kwargs) |
def main(file_name, starting_value):
file_name = file_name
starting_value = starting_value
training_data = []
for i in list(range(4))[::(- 1)]:
print((i + 1))
time.sleep(1)
last_time = time.time()
paused = False
print('STARTING!!!')
while True:
if (not paused):
screen = grab_screen(region=(44, 852, (44 + 278), 1050))
keys = key_check()
output = keys_to_output(keys)
training_data.append([screen, output])
if ((len(training_data) % 100) == 0):
print(len(training_data))
if (len(training_data) == 500):
np.save(file_name, training_data)
print('SAVED')
training_data = []
starting_value += 1
file_name = (('training_data/' + training_dataset) + '/training_data-{}.npy'.format(starting_value))
keys = key_check()
if ('T' in keys):
if paused:
paused = False
print('unpaused!')
time.sleep(1)
else:
print('Pausing!')
paused = True
time.sleep(1) |
class NystromformerForMultipleChoice(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def hop(entities, constraints, top_predicates, verbose=False, max_triples=500000, bl_p=[68655]):
n_constraints = len(constraints)
if entities:
n_constraints += 1
top_entities = (entities + constraints)
all_entities_ids = [_id for e in top_entities for _id in e]
top_predicates_ids = [_id for p in top_predicates for _id in p if (_id not in bl_p)]
activations = defaultdict(int)
offset = 0
na = 0
while True:
kg.configure_hops(1, top_predicates_ids, namespace, True)
(entities, predicate_ids, adjacencies) = kg.compute_hops(all_entities_ids, max_triples, offset)
na += sum([len(a) for a in adjacencies])
if (not entities):
answers = [{a_id: a_score} for (a_id, a_score) in activations.items()]
return (answers, na)
offset += max_triples
entities_dict = {k: v for (v, k) in enumerate(entities)}
A = generate_adj_sp(adjacencies, len(entities), include_inverse=True)
(row, col, data) = ([], [], [])
for (i, concept_ids) in enumerate(top_entities):
for (entity_id, score) in concept_ids.items():
if (entity_id in entities_dict):
local_id = entities_dict[entity_id]
row.append(i)
col.append(local_id)
data.append(score)
x = sp.csr_matrix((data, (row, col)), shape=(len(top_entities), len(entities)))
ye = sp.csr_matrix((len(top_entities), len(entities)))
if top_predicates_ids:
yp = sp.csr_matrix((len(top_predicates), len(entities)))
for (i, concept_ids) in enumerate(top_predicates):
p = np.zeros([len(predicate_ids)])
for (p_id, score) in concept_ids.items():
if (p_id in predicate_ids):
local_id = predicate_ids.index(p_id)
p[local_id] = score
_A = sum((p * A))
_y = (x _A)
_y[(_y > 1)] = 1
yp[i] = _y.sum(0)
ye += _y
y = sp.vstack([ye, yp])
else:
y = (x sum(A))
sum_a = sum(y)
sum_a_norm = (sum_a.toarray()[0] / (len(top_predicates) + n_constraints))
sum_a_norm[(sum_a_norm > 1)] = 1
y_counts = binarize(y, threshold=0.0)
count_a = sum(y_counts).toarray()[0]
y = ((sum_a_norm + count_a) / ((len(top_predicates) + n_constraints) + 1))
assert (y.shape[0] == len(entities))
top = np.argwhere((y > 0)).T.tolist()[0]
if (len(top) > 0):
activations1 = np.asarray(entities)[top]
for (i, e) in enumerate(entities):
if (e in activations1):
activations[e] += y[i]
else:
y_p = np.argmax(y)
max_cs = y[y_p]
if (max_cs != 0):
top = np.argwhere((y == max_cs)).T.tolist()[0]
activations1 = np.asarray(entities)[top]
for (i, e) in enumerate(entities):
if (e in activations1):
activations[e] += y[i] |
def _reject_cdef_modifier_in_py(s, name):
if ((s.sy == 'IDENT') and (name in _CDEF_MODIFIERS)):
s.error(("Cannot use cdef modifier '%s' in Python function signature. Use a decorator instead." % name), fatal=False)
return p_ident(s)
return name |
class OneOf():
def __init__(self, contents):
self._contents = contents
def contents(self):
return self._contents
def __eq__(self, other):
if isinstance(other, OneOf):
return (set(self._contents) == set(other._contents))
else:
return False
def __repr__(self):
return f'OneOf({self._contents!r})'
def __str__(self):
return f"oneof-{'-'.join((str(x).replace('unknown-', '') for x in self._contents))}" |
def run_ml_pipeline(nlpPipelineDF, num_topics, max_iterations, vocabSize, minDF, maxDF):
cv = CountVectorizer(inputCol='allTokens', outputCol='features', vocabSize=vocabSize, minDF=minDF, maxDF=maxDF, minTF=1.0)
idf = IDF(inputCol='features', outputCol='idf')
lda = LDA(k=num_topics, maxIter=max_iterations, optimizer='online', seed=1, learningOffset=100.0, learningDecay=0.51)
mlPipeline = Pipeline(stages=[cv, idf, lda])
mlModel = mlPipeline.fit(nlpPipelineDF)
ldaModel = mlModel.stages[2]
return (mlModel, ldaModel) |
def main():
args = get_args()
out = args.out
(max_peaks, min_inten) = (args.max_peaks, args.min_inten)
(num_bins, upper_limit) = (args.num_bins, args.upper_limit)
num_workers = args.num_workers
form_folder = Path(args.form_folder)
form_files = list(form_folder.glob('*.json'))
if (out is None):
out = (form_folder.parent / f'{form_folder.stem}_binned.p')
spec_names = [i.stem.replace('pred_', '') for i in form_files]
read_dag_file = partial(bin_form_file, max_peaks=max_peaks, upper_limit=upper_limit, num_bins=num_bins, min_inten=min_inten)
if (num_workers > 0):
outs = common.chunked_parallel(form_files, read_dag_file, max_cpu=num_workers)
(binned, smis) = zip(*outs)
else:
outs = [read_dag_file(i) for i in form_files]
(binned, smis) = zip(*outs)
binned_stack = np.concatenate(binned, 0)
output = {'preds': binned_stack, 'smiles': smis, 'spec_names': spec_names, 'num_bins': num_bins, 'upper_limit': upper_limit}
with open(out, 'wb') as fp:
pickle.dump(output, fp) |
def meta_training(train_dataset, valid_dataset, model, classifier, lr=None, optimizer=None, epochs=100, episodes=1000, ways=5, shots=5, query_num=15, report_epoch=1, lr_step_epoch=10, save_model_epoch=20, save_model_root='~/trained_models'):
lr = (0.001 if (lr is None) else lr)
if (optimizer is None):
optimizer = paddle.optimizer.Adam(learning_rate=lr, parameters=(model.parameters() + classifier.parameters()))
module_info = utils.get_info_str('baseline', train_dataset, 'conv', (str(ways) + 'ways'), (str(shots) + 'shots'))
if (type(lr) is float):
train_info = utils.get_info_str(('lr' + str(lr)), ('episodes' + str(episodes)))
else:
train_info = utils.get_info_str(('lr' + str(lr.base_lr)), lr, ('episodes' + str(episodes)))
module_dir = utils.process_root(save_model_root, module_info)
train_dir = utils.process_root(module_dir, train_info)
report_file = (train_dir + '/training_report.txt')
utils.clear_file(report_file)
for epoch in range(epochs):
(train_loss, train_acc, valid_loss, valid_acc) = (0.0, 0.0, 0.0, 0.0)
for _ in tqdm(range(episodes), desc=('epoch ' + str((epoch + 1)))):
optimizer.clear_grad()
task = train_dataset.sample_task_set(ways=ways, shots=shots, query_num=query_num)
task.transfer_backend('tensor')
model.train()
classifier.train()
(support_embeddings, query_embeddings) = (model(task.support_data), model(task.query_data))
(support_score, query_score) = (classifier(support_embeddings), classifier(query_embeddings))
prototypes = protonet.get_prototypes(support_score, task.support_labels, ways, shots)
(loss, acc) = _get_prediction(prototypes, query_score, task.query_labels)
train_loss += loss.numpy()[0]
train_acc += acc
loss.backward()
optimizer.step()
if ((((epoch + 1) % report_epoch) == 0) or ((epoch + 1) == epochs)):
model.eval()
classifier.eval()
task = valid_dataset.sample_task_set(ways=ways, shots=shots, query_num=query_num)
task.transfer_backend('tensor')
(support_embeddings, query_embeddings) = (model(task.support_data), model(task.query_data))
(support_score, query_score) = (classifier(support_embeddings), classifier(query_embeddings))
prototypes = protonet.get_prototypes(support_score, task.support_labels, ways, shots)
(loss, acc) = _get_prediction(prototypes, query_score, task.query_labels)
valid_loss += loss.numpy()[0]
valid_acc += acc
(train_loss, train_acc) = ((train_loss / episodes), (train_acc / episodes))
(valid_loss, valid_acc) = ((valid_loss / episodes), (valid_acc / episodes))
if ((type(lr) is not float) and (((epoch + 1) % lr_step_epoch) == 0)):
lr.step()
if ((((epoch + 1) % report_epoch) == 0) or ((epoch + 1) == epochs)):
utils.print_training_info((epoch + 1), train_loss, train_acc, valid_loss, valid_acc, report_file=report_file, info=[module_info, train_info])
if ((((epoch + 1) % save_model_epoch) == 0) or ((epoch + 1) == epochs)):
paddle.save(model.state_dict(), (((train_dir + '/epoch') + str((epoch + 1))) + '.params'))
paddle.save(classifier.state_dict(), (((train_dir + '/epoch') + str((epoch + 1))) + '_classifier.params'))
return (train_dir, model, classifier) |
class AdjacencyField(Field[torch.Tensor]):
_already_warned_namespaces: Set[str] = set()
def __init__(self, indices: List[Tuple[(int, int)]], sequence_field: SequenceField, labels: List[str]=None, label_namespace: str='labels', padding_value: int=(- 1)) -> None:
self.indices = indices
self.labels = labels
self.sequence_field = sequence_field
self._label_namespace = label_namespace
self._padding_value = padding_value
self._indexed_labels: List[int] = None
self._maybe_warn_for_namespace(label_namespace)
field_length = sequence_field.sequence_length()
if (len(set(indices)) != len(indices)):
raise ConfigurationError(f'Indices must be unique, but found {indices}')
if (not all([((0 <= index[1] < field_length) and (0 <= index[0] < field_length)) for index in indices])):
raise ConfigurationError(f'Label indices and sequence length are incompatible: {indices} and {field_length}')
if ((labels is not None) and (len(indices) != len(labels))):
raise ConfigurationError(f'Labelled indices were passed, but their lengths do not match: {labels}, {indices}')
def _maybe_warn_for_namespace(self, label_namespace: str) -> None:
if (not (self._label_namespace.endswith('labels') or self._label_namespace.endswith('tags'))):
if (label_namespace not in self._already_warned_namespaces):
logger.warning("Your label namespace was '%s'. We recommend you use a namespace ending with 'labels' or 'tags', so we don't add UNK and PAD tokens by default to your vocabulary. See documentation for `non_padded_namespaces` parameter in Vocabulary.", self._label_namespace)
self._already_warned_namespaces.add(label_namespace)
def count_vocab_items(self, counter: Dict[(str, Dict[(str, int)])]):
if ((self._indexed_labels is None) and (self.labels is not None)):
for label in self.labels:
counter[self._label_namespace][label] += 1
def index(self, vocab: Vocabulary):
if ((self._indexed_labels is None) and (self.labels is not None)):
self._indexed_labels = [vocab.get_token_index(label, self._label_namespace) for label in self.labels]
def get_padding_lengths(self) -> Dict[(str, int)]:
return {'num_tokens': self.sequence_field.sequence_length()}
def as_tensor(self, padding_lengths: Dict[(str, int)]) -> torch.Tensor:
desired_num_tokens = padding_lengths['num_tokens']
tensor = (torch.ones(desired_num_tokens, desired_num_tokens) * self._padding_value)
labels = (self._indexed_labels or [1 for _ in range(len(self.indices))])
for (index, label) in zip(self.indices, labels):
tensor[index] = label
return tensor
def empty_field(self) -> 'AdjacencyField':
empty_list: List[Tuple[(int, int)]] = []
adjacency_field = AdjacencyField(empty_list, self.sequence_field.empty_field(), padding_value=self._padding_value)
return adjacency_field
def __str__(self) -> str:
length = self.sequence_field.sequence_length()
formatted_labels = ''.join([(('\t\t' + labels) + '\n') for labels in textwrap.wrap(repr(self.labels), 100)])
formatted_indices = ''.join([(('\t\t' + index) + '\n') for index in textwrap.wrap(repr(self.indices), 100)])
return f'''AdjacencyField of length {length}
with indices:
{formatted_indices}
and labels:
{formatted_labels} in namespace: '{self._label_namespace}'.''' |
class TestThresholdedRelu(serial.SerializedTestCase):
(input=hu.tensor(), engine=st.sampled_from(['', 'CUDNN']), **hu.gcs)
def test_thresholded_relu_1(self, input, gc, dc, engine):
X = input
op = core.CreateOperator('ThresholdedRelu', ['X'], ['Y'], engine=engine)
def defaultRef(X):
Y = np.copy(X)
Y[(Y <= 1.0)] = 0.0
return (Y,)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertReferenceChecks(gc, op, [X], defaultRef)
(input=hu.tensor(), alpha=st.floats(min_value=1.0, max_value=5.0), engine=st.sampled_from(['', 'CUDNN']), **hu.gcs)
def test_thresholded_relu_2(self, input, alpha, gc, dc, engine):
X = input
op = core.CreateOperator('ThresholdedRelu', ['X'], ['Y'], alpha=alpha, engine=engine)
def ref(X):
Y = np.copy(X)
Y[(Y <= alpha)] = 0.0
return (Y,)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertReferenceChecks(gc, op, [X], ref)
(input=hu.tensor(), alpha=st.floats(min_value=1.1, max_value=5.0), engine=st.sampled_from(['', 'CUDNN']), **hu.gcs)
(deadline=10000)
def test_thresholded_relu_3(self, input, alpha, gc, dc, engine):
X = TestThresholdedRelu.fix_input(input)
op = core.CreateOperator('ThresholdedRelu', ['X'], ['Y'], alpha=float(alpha), engine=engine)
self.assertGradientChecks(gc, op, [X], 0, [0])
def fix_input(input):
input += (0.02 * np.sign(input))
return input |
class ArrayStreamer(BaseStreamer):
def __init__(self, shuffle=False):
self.shuffle = shuffle
def iter(self, X, y=None):
indices = list(range(len(X)))
if self.shuffle:
np.random.shuffle(indices)
if (y is None):
for i in indices:
(yield X[i])
else:
assert (len(X) == len(y))
for i in indices:
(yield (X[i], y[i])) |
def do_naive_bayes_prediction(X, observed_class_distribution: dict, attribute_observers: dict):
observed_class_sum = sum(observed_class_distribution.values())
if ((observed_class_distribution == {}) or (observed_class_sum == 0.0)):
return {0: 0.0}
votes = {}
for (class_index, observed_class_val) in observed_class_distribution.items():
votes[class_index] = (observed_class_val / observed_class_sum)
if attribute_observers:
for att_idx in range(len(X)):
if (att_idx in attribute_observers):
obs = attribute_observers[att_idx]
tmp = (votes[class_index] * obs.probability_of_attribute_value_given_class(X[att_idx], class_index))
votes[class_index] = (tmp if (not math.isnan(tmp)) else 0)
return votes |
def main():
set_seeds(2020)
args = vars(parser.parse_args())
alphabet = Protein()
cfgs = []
data_cfg = config.DataConfig(args['data_config'])
cfgs.append(data_cfg)
if (args['lm_model_config'] is None):
model_cfg = config.ModelConfig(args['model_config'], input_dim=len(alphabet), num_classes=1)
cfgs += [model_cfg]
else:
lm_model_cfg = config.ModelConfig(args['lm_model_config'], idx='lm_model_config', input_dim=len(alphabet))
model_cfg = config.ModelConfig(args['model_config'], input_dim=len(alphabet), lm_dim=((lm_model_cfg.num_layers * lm_model_cfg.hidden_dim) * 2), num_classes=1)
cfgs += [model_cfg, lm_model_cfg]
if (model_cfg.model_type == 'RNN'):
pr_model_cfg = config.ModelConfig(args['pr_model_config'], idx='pr_model_config', model_type='MLP', num_classes=1)
if pr_model_cfg.projection:
pr_model_cfg.set_input_dim(model_cfg.embedding_dim)
else:
pr_model_cfg.set_input_dim((model_cfg.hidden_dim * 2))
cfgs.append(pr_model_cfg)
run_cfg = config.RunConfig(args['run_config'], sanity_check=args['sanity_check'])
cfgs.append(run_cfg)
(output, save_prefix) = set_output(args, 'train_fluorescence_log')
os.environ['CUDA_VISIBLE_DEVICES'] = (args['device'] if (args['device'] is not None) else '')
(device, data_parallel) = (torch.device(('cuda' if torch.cuda.is_available() else 'cpu')), (torch.cuda.device_count() > 1))
config.print_configs(args, cfgs, device, output)
flag_rnn = (model_cfg.model_type == 'RNN')
flag_lm_model = (args['lm_model_config'] is not None)
flag_lm_loss = (run_cfg.lm_loss_lambda != (- 1))
start = Print(' '.join(['start loading a train dataset:', data_cfg.path['train']]), output)
dataset_train = fluorescence.load_fluorescence(data_cfg, 'train', alphabet, args['sanity_check'])
dataset_train = dataset.Seq_dataset(*dataset_train, alphabet, run_cfg, flag_rnn, model_cfg.max_len)
collate_fn = (dataset.collate_sequences if flag_rnn else None)
iterator_train = torch.utils.data.DataLoader(dataset_train, run_cfg.batch_size_train, collate_fn=collate_fn, shuffle=True)
end = Print(' '.join(['loaded', str(len(dataset_train)), 'sequences']), output)
Print(' '.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print(' '.join(['start loading a dev dataset:', data_cfg.path['dev']]), output)
dataset_dev = fluorescence.load_fluorescence(data_cfg, 'dev', alphabet, args['sanity_check'])
dataset_dev = dataset.Seq_dataset(*dataset_dev, alphabet, run_cfg, flag_rnn, model_cfg.max_len)
iterator_dev = torch.utils.data.DataLoader(dataset_dev, run_cfg.batch_size_eval, collate_fn=collate_fn)
end = Print(' '.join(['loaded', str(len(dataset_dev)), 'sequences']), output)
Print(' '.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start initializing a model', output)
models_list = []
if (not flag_rnn):
model = plus_tfm.PLUS_TFM(model_cfg)
elif (not flag_lm_model):
model = plus_rnn.PLUS_RNN(model_cfg)
else:
model = p_elmo.P_ELMo(model_cfg)
models_list.append([model, '', flag_lm_model, flag_rnn, False])
if flag_lm_model:
lm_model = p_elmo.P_ELMo_lm(lm_model_cfg)
models_list.append([lm_model, 'lm', True, False, False])
if flag_rnn:
pr_model = mlp.MLP(pr_model_cfg, per_seq=True)
models_list.append([pr_model, 'pr', False, True, False])
(params, pr_params) = ([], [])
for (model, idx, frz, _, _) in models_list:
if frz:
continue
elif (idx != 'pr'):
params += [p for p in model.parameters() if p.requires_grad]
else:
pr_params += [p for p in model.parameters() if p.requires_grad]
load_models(args, models_list, device, data_parallel, output, tfm_cls=flag_rnn)
get_loss = (plus_rnn.get_loss if flag_rnn else plus_tfm.get_loss)
end = Print('end initializing a model', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start setting trainer configurations', output)
optim = torch.optim.Adam([{'params': params, 'lr': run_cfg.learning_rate}, {'params': pr_params, 'lr': run_cfg.pr_learning_rate}])
tasks_list = []
tasks_list.append(['cls', [], ['rho', 'r']])
if flag_lm_loss:
tasks_list.append(['lm', [], ['acc']])
trainer = Trainer(models_list, get_loss, run_cfg, tasks_list, optim)
trainer_args = {}
trainer_args['data_parallel'] = data_parallel
trainer_args['paired'] = False
if flag_rnn:
trainer_args['projection'] = pr_model_cfg.projection
trainer_args['regression'] = True
if flag_rnn:
trainer_args['evaluate_cls'] = plus_rnn.evaluate_cls_protein
else:
trainer_args['evaluate_cls'] = plus_tfm.evaluate_cls_protein
end = Print('end setting trainer configurations', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start training a model', output)
Print(trainer.get_headline(), output)
for epoch in range(run_cfg.num_epochs):
dataset_train.set_augment(flag_lm_loss)
for (B, batch) in enumerate(iterator_train):
batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch]
trainer.train(batch, trainer_args)
if ((B % 10) == 0):
print('# epoch [{}/{}] train {:.1%} loss={:.4f}'.format((epoch + 1), run_cfg.num_epochs, (B / len(iterator_train)), trainer.loss_train), end='\r', file=sys.stderr)
print((' ' * 150), end='\r', file=sys.stderr)
dataset_dev.set_augment(False)
trainer.set_exec_flags(['cls', 'lm'], [True, False])
for (b, batch) in enumerate(iterator_dev):
batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch]
trainer.evaluate(batch, trainer_args)
if ((b % 10) == 0):
print('# cls {:.1%} loss={:.4f}'.format((b / len(iterator_dev)), trainer.loss_eval), end='\r', file=sys.stderr)
print((' ' * 150), end='\r', file=sys.stderr)
if flag_lm_loss:
dataset_dev.set_augment(True)
trainer.set_exec_flags(['cls', 'lm'], [False, True])
for (b, batch) in enumerate(iterator_dev):
batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch]
trainer.evaluate(batch, trainer_args)
if ((b % 10) == 0):
print('# lm {:.1%} loss={:.4f}'.format((b / len(iterator_dev)), trainer.loss_eval), end='\r', file=sys.stderr)
print((' ' * 150), end='\r', file=sys.stderr)
trainer.save(save_prefix)
Print(trainer.get_log((epoch + 1), args=trainer_args), output)
trainer.set_exec_flags(['cls', 'lm'], [True, True])
trainer.reset()
if (trainer.patience == 0):
break
end = Print('end training a model', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True) |
class VectorField(MultivectorField):
def __init__(self, vector_field_module, name=None, latex_name=None):
MultivectorField.__init__(self, vector_field_module, 1, name=name, latex_name=latex_name)
MultivectorField._init_derived(self)
self._init_dependencies()
def _repr_(self):
description = 'Vector field '
if (self._name is not None):
description += (self._name + ' ')
return self._final_repr(description)
def _new_instance(self):
return type(self)(self._vmodule)
def _init_dependencies(self):
self._lie_der_along_self = {}
def _del_dependencies(self):
if (self._lie_der_along_self != {}):
for (idtens, tens) in self._lie_der_along_self.items():
del tens._lie_derivatives[id(self)]
self._lie_der_along_self.clear()
def __call__(self, scalar):
if (scalar._tensor_type == (0, 1)):
return scalar(self)
if (scalar._tensor_type != (0, 0)):
raise TypeError('the argument must be a scalar field')
resu = scalar.differential()(self)
if (not resu.is_immutable()):
if ((self._name is not None) and (scalar._name is not None)):
name = f'{self._name}({scalar._name})'
else:
name = None
if ((self._latex_name is not None) and (scalar._latex_name is not None)):
latex_name = f'''{self._latex_name}\left({scalar._latex_name}
ight)'''
else:
latex_name = None
resu.set_name(name=name, latex_name=latex_name)
return resu
(max_range=8, scale=1, color='blue')
def plot(self, chart=None, ambient_coords=None, mapping=None, chart_domain=None, fixed_coords=None, ranges=None, number_values=None, steps=None, parameters=None, label_axes=True, **extra_options):
from sage.rings.infinity import Infinity
from sage.misc.functional import numerical_approx
from sage.misc.latex import latex
from sage.plot.graphics import Graphics
from sage.manifolds.chart import RealChart
from sage.manifolds.utilities import set_axes_labels
from sage.parallel.decorate import parallel
from sage.parallel.parallelism import Parallelism
max_range = extra_options.pop('max_range')
scale = extra_options.pop('scale')
color = extra_options.pop('color')
if (chart is None):
chart = self._domain.default_chart()
elif (not isinstance(chart, RealChart)):
raise TypeError(('{} is not a chart on a real '.format(chart) + 'manifold'))
if (chart_domain is None):
chart_domain = self._domain.default_chart()
elif (not isinstance(chart_domain, RealChart)):
raise TypeError(('{} is not a chart on a '.format(chart_domain) + 'real manifold'))
elif (not chart_domain.domain().is_subset(self._domain)):
raise ValueError(('the domain of {} is not '.format(chart_domain) + 'included in the domain of {}'.format(self)))
coords_full = tuple(chart_domain[:])
if (fixed_coords is None):
coords = coords_full
else:
fixed_coord_list = fixed_coords.keys()
coords = []
for coord in coords_full:
if (coord not in fixed_coord_list):
coords.append(coord)
coords = tuple(coords)
if (ambient_coords is None):
ambient_coords = chart[:]
elif (not isinstance(ambient_coords, tuple)):
ambient_coords = tuple(ambient_coords)
nca = len(ambient_coords)
if ((nca != 2) and (nca != 3)):
raise ValueError(('the number of ambient coordinates must be ' + 'either 2 or 3, not {}'.format(nca)))
if (ranges is None):
ranges = {}
ranges0 = {}
for coord in coords:
if (coord in ranges):
ranges0[coord] = (numerical_approx(ranges[coord][0]), numerical_approx(ranges[coord][1]))
else:
bounds = chart_domain._bounds[coords_full.index(coord)]
xmin0 = bounds[0][0]
xmax0 = bounds[1][0]
if (xmin0 == (- Infinity)):
xmin = numerical_approx((- max_range))
elif bounds[0][1]:
xmin = numerical_approx(xmin0)
else:
xmin = numerical_approx((xmin0 + 0.001))
if (xmax0 == Infinity):
xmax = numerical_approx(max_range)
elif bounds[1][1]:
xmax = numerical_approx(xmax0)
else:
xmax = numerical_approx((xmax0 - 0.001))
ranges0[coord] = (xmin, xmax)
ranges = ranges0
if (number_values is None):
if (nca == 2):
number_values = 9
else:
number_values = 5
if (not isinstance(number_values, dict)):
number_values0 = {}
for coord in coords:
number_values0[coord] = number_values
number_values = number_values0
if (steps is None):
steps = {}
for coord in coords:
if (coord not in steps):
steps[coord] = ((ranges[coord][1] - ranges[coord][0]) / (number_values[coord] - 1))
else:
number_values[coord] = (1 + int(((ranges[coord][1] - ranges[coord][0]) / steps[coord])))
dom = chart_domain.domain()
vector = self.restrict(dom)
if (vector.parent().destination_map() is dom.identity_map()):
if (mapping is not None):
vector = mapping.pushforward(vector)
mapping = None
nc = len(coords_full)
ncp = len(coords)
xx = ([0] * nc)
if (fixed_coords is not None):
if (len(fixed_coords) != (nc - ncp)):
raise ValueError('bad number of fixed coordinates')
for (fc, val) in fixed_coords.items():
xx[coords_full.index(fc)] = val
ind_coord = []
for coord in coords:
ind_coord.append(coords_full.index(coord))
resu = Graphics()
ind = ([0] * ncp)
ind_max = ([0] * ncp)
ind_max[0] = number_values[coords[0]]
xmin = [ranges[cd][0] for cd in coords]
step_tab = [steps[cd] for cd in coords]
nproc = Parallelism().get('tensor')
if ((nproc != 1) and (nca == 2)):
list_xx = []
while (ind != ind_max):
for i in range(ncp):
xx[ind_coord[i]] = (xmin[i] + (ind[i] * step_tab[i]))
if chart_domain.valid_coordinates(*xx, tolerance=1e-13, parameters=parameters):
list_xx.append((xx * 1))
ret = 1
for pos in range((ncp - 1), (- 1), (- 1)):
imax = (number_values[coords[pos]] - 1)
if (ind[pos] != imax):
ind[pos] += ret
ret = 0
elif (ret == 1):
if (pos == 0):
ind[pos] = (imax + 1)
else:
ind[pos] = 0
ret = 1
lol = (lambda lst, sz: [lst[i:(i + sz)] for i in range(0, len(lst), sz)])
ind_step = max(1, int(((len(list_xx) / nproc) / 2)))
local_list = lol(list_xx, ind_step)
listParalInput = [(vector, dom, ind_part, chart_domain, chart, ambient_coords, mapping, scale, color, parameters, extra_options) for ind_part in local_list]
(p_iter='multiprocessing', ncpus=nproc)
def add_point_plot(vector, dom, xx_list, chart_domain, chart, ambient_coords, mapping, scale, color, parameters, extra_options):
count = 0
for xx in xx_list:
point = dom(xx, chart=chart_domain)
part = vector.at(point).plot(chart=chart, ambient_coords=ambient_coords, mapping=mapping, scale=scale, color=color, print_label=False, parameters=parameters, **extra_options)
if (count == 0):
local_resu = part
else:
local_resu += part
count += 1
return local_resu
for (ii, val) in add_point_plot(listParalInput):
resu += val
else:
while (ind != ind_max):
for i in range(ncp):
xx[ind_coord[i]] = (xmin[i] + (ind[i] * step_tab[i]))
if chart_domain.valid_coordinates(*xx, tolerance=1e-13, parameters=parameters):
point = dom(xx, chart=chart_domain)
resu += vector.at(point).plot(chart=chart, ambient_coords=ambient_coords, mapping=mapping, scale=scale, color=color, print_label=False, parameters=parameters, **extra_options)
ret = 1
for pos in range((ncp - 1), (- 1), (- 1)):
imax = (number_values[coords[pos]] - 1)
if (ind[pos] != imax):
ind[pos] += ret
ret = 0
elif (ret == 1):
if (pos == 0):
ind[pos] = (imax + 1)
else:
ind[pos] = 0
ret = 1
if label_axes:
if (nca == 2):
resu._extra_kwds['axes_labels'] = [(('$' + latex(ac)) + '$') for ac in ambient_coords]
else:
labels = [str(ac) for ac in ambient_coords]
resu = set_axes_labels(resu, *labels)
return resu
def bracket(self, other):
return MultivectorField.bracket(self, other)
def curl(self, metric=None):
if (self._domain.dim() < 3):
raise ValueError(('the curl is not defined in dimension lower ' + 'than 3'))
default_metric = (metric is None)
if default_metric:
metric = self._domain.metric()
der = self.down(metric).exterior_derivative()
resu = der.hodge_dual(metric).up(metric)
if (self._name is not None):
if default_metric:
resu._name = 'curl({})'.format(self._name)
resu._latex_name = (('\\mathrm{curl}\\left(' + self._latex_name) + '\\right)')
else:
resu._name = 'curl_{}({})'.format(metric._name, self._name)
resu._latex_name = (((('\\mathrm{curl}_{' + metric._latex_name) + '}\\left(') + self._latex_name) + '\\right)')
for restrict in resu._restrictions.values():
restrict.set_name(resu._name, latex_name=resu._latex_name)
return resu
def dot_product(self, other, metric=None):
default_metric = (metric is None)
if default_metric:
metric = self._ambient_domain.metric()
dest_map = self.parent().destination_map()
if (dest_map != metric.parent().base_module().destination_map()):
metric = metric.along(dest_map)
if (dest_map != other.parent().destination_map()):
other = other.along(dest_map)
resu = metric(self, other)
if (default_metric and (self._name is not None) and (other._name is not None)):
resu._name = '{}.{}'.format(self._name, other._name)
resu._latex_name = (((('{' + self._latex_name) + '}\\cdot{') + other._latex_name) + '}')
for restrict in resu._restrictions.values():
restrict.set_name(resu._name, latex_name=resu._latex_name)
return resu
dot = dot_product
def norm(self, metric=None):
default_metric = (metric is None)
if default_metric:
metric = self._ambient_domain.metric()
dest_map = self.parent().destination_map()
if (dest_map != metric.parent().base_module().destination_map()):
metric = metric.along(dest_map)
resu = metric(self, self).sqrt()
if (self._name is not None):
if default_metric:
resu._name = '|{}|'.format(self._name)
resu._latex_name = (('\\left\\|' + self._latex_name) + '\\right\\|')
else:
resu._name = '|{}|_{}'.format(self._name, metric._name)
resu._latex_name = (((('\\left\\|' + self._latex_name) + '\\right\\| _{') + metric._latex_name) + '}')
for restrict in resu._restrictions.values():
restrict.set_name(resu._name, latex_name=resu._latex_name)
return resu
def cross_product(self, other, metric=None):
if (self._ambient_domain.dim() != 3):
raise ValueError(('the cross product is not defined in dimension ' + 'different from 3'))
default_metric = (metric is None)
if default_metric:
metric = self._ambient_domain.metric()
dest_map = self.parent().destination_map()
if (dest_map == metric.parent().base_module().destination_map()):
eps = metric.volume_form(1)
else:
eps = metric.volume_form(1).along(dest_map)
if (dest_map != other.parent().destination_map()):
other = other.along(dest_map)
resu = (eps.contract(1, 2, self.wedge(other), 0, 1) / 2)
if (default_metric and (self._name is not None) and (other._name is not None)):
resu._name = '{} x {}'.format(self._name, other._name)
resu._latex_name = (((('{' + self._latex_name) + '}\\times{') + other._latex_name) + '}')
for restrict in resu._restrictions.values():
restrict.set_name(resu._name, latex_name=resu._latex_name)
return resu
cross = cross_product |
def latent_optimise(zs, fake_labels, gen_model, dis_model, conditional_strategy, latent_op_step, latent_op_rate, latent_op_alpha, latent_op_beta, trans_cost, default_device):
batch_size = zs.shape[0]
for step in range(latent_op_step):
drop_mask = (torch.FloatTensor(batch_size, 1).uniform_() > (1 - latent_op_rate)).to(default_device)
(z_gradients, z_gradients_norm) = calc_derv(zs, fake_labels, dis_model, conditional_strategy, default_device, gen_model)
delta_z = ((latent_op_alpha * z_gradients) / (latent_op_beta + z_gradients_norm))
zs = torch.clamp((zs + (drop_mask * delta_z)), (- 1.0), 1.0)
if trans_cost:
if (step == 0):
transport_cost = (delta_z.norm(2, dim=1) ** 2).mean()
else:
transport_cost += (delta_z.norm(2, dim=1) ** 2).mean()
return (zs, trans_cost)
else:
return zs |
def test_estimate_bandwidth():
bandwidth = estimate_bandwidth(X, n_samples=200)
assert (0.9 <= bandwidth <= 1.5) |
class ExecutorBase():
def __init__(self, n_workers: int, verbose: bool=False):
self.verbose = verbose
self.n_workers = n_workers
self.n_free_workers = n_workers
self.n_busy_workers = 0
self._queue = []
self._running_tasks = []
self._completed_tasks = []
def age(self) -> float:
raise NotImplementedError
def is_running(self) -> bool:
all_tasks_todo = (len(self._queue) + len(self._running_tasks))
if (all_tasks_todo > 0):
return True
else:
return False
def status(self) -> Dict:
status = {'n_free_workers': self.n_free_workers, 'n_busy_workers': self.n_busy_workers, 'n_completed_tasks': len(self._completed_tasks), 'n_queue': len(self._queue), 't': self.age, 'is_running': self.is_running}
if self.verbose:
print(f'''{self.__class__.__name__}.status:
{status}''')
return status
def _validate_job(self, job: dict) -> None:
assert ('x' in job.keys())
assert ('f' in job.keys())
assert callable(job['f'])
def run_until_n_free(self, n_desired_free_workers) -> None:
raise NotImplementedError
def run_until_empty(self) -> None:
raise NotImplementedError
def add_job_to_queue(self, job: Union[(Dict, List)]) -> None:
if self.verbose:
print(f'''{self.__class__.__name__}.queue_job: queuing job:
{job}''')
if isinstance(job, list):
for j in job:
self._queue.append(j)
else:
self._queue.append(job)
self._update_internal_state()
def _update_internal_state(self) -> None:
raise NotImplementedError
def get_completed_jobs(self) -> List:
if self.verbose:
print(f'{self.__class__.__name__}.get_completed_jobs: Getting completed jobs')
out = self._completed_tasks
self._completed_tasks = []
return out
def get_array_of_running_jobs(self) -> np.ndarray:
list_of_jobs = self.get_list_of_running_jobs()
if (len(list_of_jobs) > 0):
x_busy = np.vstack([job['x'] for job in list_of_jobs])
else:
x_busy = None
return x_busy
def get_list_of_running_jobs(self) -> List:
if self.verbose:
print(f'{self.__class__.__name__}.get_running_jobs')
return self._running_tasks |
def parameter_parser():
parser = argparse.ArgumentParser(description='Run GETNext.')
parser.add_argument('--seed', type=int, default=42, help='Random seed')
parser.add_argument('--device', type=str, default=device, help='')
parser.add_argument('--data-adj-mtx', type=str, default='dataset/NYC/graph_A.csv', help='Graph adjacent path')
parser.add_argument('--data-node-feats', type=str, default='dataset/NYC/graph_X.csv', help='Graph node features path')
parser.add_argument('--data-train', type=str, default='dataset/NYC/NYC_train.csv', help='Training data path')
parser.add_argument('--data-val', type=str, default='dataset/NYC/NYC_val.csv', help='Validation data path')
parser.add_argument('--short-traj-thres', type=int, default=2, help='Remove over-short trajectory')
parser.add_argument('--time-units', type=int, default=48, help='Time unit is 0.5 hour, 24/0.5=48')
parser.add_argument('--time-feature', type=str, default='norm_in_day_time', help='The name of time feature in the data')
parser.add_argument('--poi-embed-dim', type=int, default=128, help='POI embedding dimensions')
parser.add_argument('--user-embed-dim', type=int, default=128, help='User embedding dimensions')
parser.add_argument('--gcn-dropout', type=float, default=0.3, help='Dropout rate for gcn')
parser.add_argument('--gcn-nhid', type=list, default=[32, 64], help='List of hidden dims for gcn layers')
parser.add_argument('--transformer-nhid', type=int, default=1024, help='Hid dim in TransformerEncoder')
parser.add_argument('--transformer-nlayers', type=int, default=2, help='Num of TransformerEncoderLayer')
parser.add_argument('--transformer-nhead', type=int, default=2, help='Num of heads in multiheadattention')
parser.add_argument('--transformer-dropout', type=float, default=0.3, help='Dropout rate for transformer')
parser.add_argument('--time-embed-dim', type=int, default=32, help='Time embedding dimensions')
parser.add_argument('--cat-embed-dim', type=int, default=32, help='Category embedding dimensions')
parser.add_argument('--time-loss-weight', type=int, default=10, help='Scale factor for the time loss term')
parser.add_argument('--node-attn-nhid', type=int, default=128, help='Node attn map hidden dimensions')
parser.add_argument('--batch', type=int, default=20, help='Batch size.')
parser.add_argument('--epochs', type=int, default=200, help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.001, help='Initial learning rate.')
parser.add_argument('--lr-scheduler-factor', type=float, default=0.1, help='Learning rate scheduler factor')
parser.add_argument('--weight_decay', type=float, default=0.0005, help='Weight decay (L2 loss on parameters).')
parser.add_argument('--save-weights', action='store_true', default=True, help='whether save the model')
parser.add_argument('--save-embeds', action='store_true', default=False, help='whether save the embeddings')
parser.add_argument('--workers', type=int, default=0, help='Num of workers for dataloader.')
parser.add_argument('--project', default='runs/train', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')
parser.add_argument('--mode', type=str, default='client', help='python console use only')
parser.add_argument('--port', type=int, default=64973, help='python console use only')
return parser.parse_args() |
('/list_sessions', methods=['GET'])
def list_sessions():
limit = request.args.get('limit', None)
skip = request.args.get('skip', None)
return api.get_all_sessions(limit, skip) |
def test_nothing_on_after_test_case_execution():
stopping = DummyStopping()
stopping.after_test_case_execution_inside_thread(None, None) |
class Discriminator(nn.Module):
def __init__(self, nc):
super(Discriminator, self).__init__()
self.enc = Encoder(nc)
self.dec = Decoder(nc, True)
def forward(self, input):
return self.dec(self.enc(input)) |
def test_chrono_duration_subtraction_equivalence():
date1 = datetime.datetime.today()
date2 = datetime.datetime.today()
diff = (date2 - date1)
cpp_diff = m.test_chrono4(date2, date1)
assert (cpp_diff.days == diff.days)
assert (cpp_diff.seconds == diff.seconds)
assert (cpp_diff.microseconds == diff.microseconds) |
def test_isinstance():
objects = ([tuple(), dict(), m.Pet('Polly', 'parrot')] + ([m.Dog('Molly')] * 4))
expected = (True, True, True, True, True, False, False)
assert (m.check_instances(objects) == expected) |
def ascii_art(*obj, **kwds):
(separator, baseline, sep_baseline) = _ascii_art_factory.parse_keywords(kwds)
if kwds:
raise ValueError('unknown keyword arguments: {0}'.format(list(kwds)))
if (len(obj) == 1):
return _ascii_art_factory.build(obj[0], baseline=baseline)
if (not isinstance(separator, AsciiArt)):
separator = _ascii_art_factory.build(separator, baseline=sep_baseline)
elif (sep_baseline is not None):
from copy import copy
separator = copy(separator)
separator._baseline = sep_baseline
return _ascii_art_factory.concatenate(obj, separator, empty_ascii_art, baseline=baseline) |
def collate(samples, pad_idx, eos_idx, left_pad_source=False, left_pad_target=False):
if (len(samples) == 0):
return {}
def merge(key, left_pad, move_eos_to_beginning=False):
return data_utils.collate_tokens([s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning)
id = np.array([s['id'] for s in samples])
src_tokens = merge('source', left_pad=left_pad_source)
src_lengths = torch.LongTensor([s['source'].ne(pad_idx).long().sum() for s in samples])
code_images = np.array([s['code_image'] for s in samples])
code_masks = torch.cat([sample['code_mask'] for sample in samples])
prev_output_tokens = None
target = None
if (samples[0].get('target', None) is not None):
target = merge('target', left_pad=left_pad_target)
tgt_lengths = torch.LongTensor([s['target'].ne(pad_idx).long().sum() for s in samples])
ntokens = tgt_lengths.sum().item()
if (samples[0].get('prev_output_tokens', None) is not None):
prev_output_tokens = merge('prev_output_tokens', left_pad=left_pad_target)
else:
ntokens = src_lengths.sum().item()
batch = {'id': id, 'nsentences': len(samples), 'ntokens': ntokens, 'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths, 'code_masks': code_masks, 'prev_output_tokens': prev_output_tokens}, 'code_images': code_images, 'target': target}
return batch |
def qa_for_label_file(dict_paragraphs: dict, label_file2: str, output_dir: str, separate=True):
with open(label_file2, 'r') as fp:
labels = json.load(fp)
dict_paragraphs_train = {}
for key in labels.keys():
dict_paragraphs_train.update({key: dict_paragraphs.get(key)})
base_case_to_qa_file(dict_paragraphs_train, output_dir, separate=separate) |
class OracleSelectionMethod(SelectionMethod):
name = 'test-domain validation set (oracle)'
def run_acc(self, run_records):
run_records = run_records.filter((lambda r: (len(r['args']['test_envs']) == 1)))
if (not len(run_records)):
return None
test_env = run_records[0]['args']['test_envs'][0]
test_out_acc_key = 'env{}_out_acc'.format(test_env)
test_in_acc_key = 'env{}_in_acc'.format(test_env)
chosen_record = run_records.sorted((lambda r: r['step']))[(- 1)]
return {'val_acc': chosen_record[test_out_acc_key], 'test_acc': chosen_record[test_in_acc_key]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.