code stringlengths 101 5.91M |
|---|
class RoFormerConverter(Converter):
def converted(self) -> Tokenizer:
from .models.roformer.tokenization_utils import JiebaPreTokenizer
vocab = self.original_tokenizer.vocab
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token)))
strip_accents = False
do_lower_case = False
if hasattr(self.original_tokenizer, 'basic_tokenizer'):
strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents
do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case
tokenizer.normalizer = normalizers.BertNormalizer(clean_text=True, handle_chinese_chars=False, strip_accents=strip_accents, lowercase=do_lower_case)
tokenizer.pre_tokenizer = pre_tokenizers.PreTokenizer.custom(JiebaPreTokenizer(vocab))
cls = str(self.original_tokenizer.cls_token)
sep = str(self.original_tokenizer.sep_token)
cls_token_id = self.original_tokenizer.cls_token_id
sep_token_id = self.original_tokenizer.sep_token_id
tokenizer.post_processor = processors.TemplateProcessing(single=f'{cls}:0 $A:0 {sep}:0', pair=f'{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1', special_tokens=[(cls, cls_token_id), (sep, sep_token_id)])
tokenizer.decoder = decoders.WordPiece(prefix='##')
return tokenizer |
class Writer():
def __init__(self, width=None, height=None, size=None, greyscale=Default, alpha=False, bitdepth=8, palette=None, transparent=None, background=None, gamma=None, compression=None, interlace=False, planes=None, colormap=None, maxval=None, chunk_limit=(2 ** 20), x_pixels_per_unit=None, y_pixels_per_unit=None, unit_is_meter=False):
(width, height) = check_sizes(size, width, height)
del size
if ((not is_natural(width)) or (not is_natural(height))):
raise ProtocolError('width and height must be integers')
if ((width <= 0) or (height <= 0)):
raise ProtocolError('width and height must be greater than zero')
if ((width > ((2 ** 31) - 1)) or (height > ((2 ** 31) - 1))):
raise ProtocolError('width and height cannot exceed 2**31-1')
if (alpha and (transparent is not None)):
raise ProtocolError('transparent colour not allowed with alpha channel')
try:
len(bitdepth)
except TypeError:
bitdepth = (bitdepth,)
for b in bitdepth:
valid = (is_natural(b) and (1 <= b <= 16))
if (not valid):
raise ProtocolError(('each bitdepth %r must be a positive integer <= 16' % (bitdepth,)))
palette = check_palette(palette)
alpha = bool(alpha)
colormap = bool(palette)
if ((greyscale is Default) and palette):
greyscale = False
greyscale = bool(greyscale)
if colormap:
color_planes = 1
planes = 1
else:
color_planes = (3, 1)[greyscale]
planes = (color_planes + alpha)
if (len(bitdepth) == 1):
bitdepth *= planes
(bitdepth, self.rescale) = check_bitdepth_rescale(palette, bitdepth, transparent, alpha, greyscale)
if (bitdepth < 8):
assert (greyscale or palette)
assert (not alpha)
if (bitdepth > 8):
assert (not palette)
transparent = check_color(transparent, greyscale, 'transparent')
background = check_color(background, greyscale, 'background')
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamma = gamma
self.greyscale = greyscale
self.alpha = alpha
self.colormap = colormap
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
self.palette = palette
self.x_pixels_per_unit = x_pixels_per_unit
self.y_pixels_per_unit = y_pixels_per_unit
self.unit_is_meter = bool(unit_is_meter)
self.color_type = (((4 * self.alpha) + (2 * (not greyscale))) + (1 * self.colormap))
assert (self.color_type in (0, 2, 3, 4, 6))
self.color_planes = color_planes
self.planes = planes
self.psize = ((self.bitdepth / 8) * self.planes)
def write(self, outfile, rows):
vpr = (self.width * self.planes)
def check_rows(rows):
for (i, row) in enumerate(rows):
try:
wrong_length = (len(row) != vpr)
except TypeError:
wrong_length = False
if wrong_length:
raise ProtocolError(('Expected %d values but got %d values, in row %d' % (vpr, len(row), i)))
(yield row)
if self.interlace:
fmt = 'BH'[(self.bitdepth > 8)]
a = array(fmt, itertools.chain(*check_rows(rows)))
return self.write_array(outfile, a)
nrows = self.write_passes(outfile, check_rows(rows))
if (nrows != self.height):
raise ProtocolError(('rows supplied (%d) does not match height (%d)' % (nrows, self.height)))
return nrows
def write_passes(self, outfile, rows):
if self.rescale:
rows = rescale_rows(rows, self.rescale)
if (self.bitdepth < 8):
rows = pack_rows(rows, self.bitdepth)
elif (self.bitdepth == 16):
rows = unpack_rows(rows)
return self.write_packed(outfile, rows)
def write_packed(self, outfile, rows):
self.write_preamble(outfile)
if (self.compression is not None):
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
data = bytearray()
i = (- 1)
for (i, row) in enumerate(rows):
data.append(0)
data.extend(row)
if (len(data) > self.chunk_limit):
compressed = compressor.compress(data)
if len(compressed):
write_chunk(outfile, b'IDAT', compressed)
data = bytearray()
compressed = compressor.compress(bytes(data))
flushed = compressor.flush()
if (len(compressed) or len(flushed)):
write_chunk(outfile, b'IDAT', (compressed + flushed))
write_chunk(outfile, b'IEND')
return (i + 1)
def write_preamble(self, outfile):
outfile.write(signature)
write_chunk(outfile, b'IHDR', struct.pack('!2I5B', self.width, self.height, self.bitdepth, self.color_type, 0, 0, self.interlace))
if (self.gamma is not None):
write_chunk(outfile, b'gAMA', struct.pack('!L', int(round((self.gamma * 100000.0)))))
if self.rescale:
write_chunk(outfile, b'sBIT', struct.pack(('%dB' % self.planes), *[s[0] for s in self.rescale]))
if self.palette:
(p, t) = make_palette_chunks(self.palette)
write_chunk(outfile, b'PLTE', p)
if t:
write_chunk(outfile, b'tRNS', t)
if (self.transparent is not None):
if self.greyscale:
fmt = '!1H'
else:
fmt = '!3H'
write_chunk(outfile, b'tRNS', struct.pack(fmt, *self.transparent))
if (self.background is not None):
if self.greyscale:
fmt = '!1H'
else:
fmt = '!3H'
write_chunk(outfile, b'bKGD', struct.pack(fmt, *self.background))
if ((self.x_pixels_per_unit is not None) and (self.y_pixels_per_unit is not None)):
tup = (self.x_pixels_per_unit, self.y_pixels_per_unit, int(self.unit_is_meter))
write_chunk(outfile, b'pHYs', struct.pack('!LLB', *tup))
def write_array(self, outfile, pixels):
if self.interlace:
if (type(pixels) != array):
fmt = 'BH'[(self.bitdepth > 8)]
pixels = array(fmt, pixels)
return self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
return self.write_passes(outfile, self.array_scanlines(pixels))
def array_scanlines(self, pixels):
vpr = (self.width * self.planes)
stop = 0
for y in range(self.height):
start = stop
stop = (start + vpr)
(yield pixels[start:stop])
def array_scanlines_interlace(self, pixels):
fmt = 'BH'[(self.bitdepth > 8)]
vpr = (self.width * self.planes)
for lines in adam7_generate(self.width, self.height):
for (x, y, xstep) in lines:
ppr = int(math.ceil(((self.width - x) / float(xstep))))
reduced_row_len = (ppr * self.planes)
if (xstep == 1):
offset = (y * vpr)
(yield pixels[offset:(offset + vpr)])
continue
row = array(fmt)
row.extend(pixels[0:reduced_row_len])
offset = ((y * vpr) + (x * self.planes))
end_offset = ((y + 1) * vpr)
skip = (self.planes * xstep)
for i in range(self.planes):
row[i::self.planes] = pixels[(offset + i):end_offset:skip]
(yield row) |
def add_T_label(img, label, bbox, draw_bg=True, text_bg_color=(255, 255, 255), text_color=(0, 0, 0)):
text_width = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)[0][0]
text_height = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)[0][1]
x_center = ((bbox[0] + bbox[2]) // 2)
y_top = (bbox[1] - 50)
cv2.line(img, (x_center, bbox[1]), (x_center, y_top), text_bg_color, 3)
y_bottom = y_top
y_top = ((y_bottom - text_height) - 5)
x_left = ((x_center - (text_width // 2)) - 5)
x_right = ((x_center + (text_width // 2)) + 5)
if draw_bg:
cv2.rectangle(img, (x_left, (y_top - 3)), (x_right, y_bottom), text_bg_color, (- 1))
cv2.putText(img, label, ((x_left + 5), (y_bottom - 7)), cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2)
return img |
def get_word_idx(context, wordss, idx):
spanss = get_2d_spans(context, wordss)
idx = spanss[idx[0]][idx[1]][0]
return idx |
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ''
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if (argv is None):
argv = sys.argv[1:]
with _patch_usage():
setup(script_args=(['-q', 'easy_install', '-v'] + argv), script_name=(sys.argv[0] or 'easy_install'), distclass=DistributionWithoutHelpCommands, **kw) |
.parametrize('device', ['cpu', 'cuda'])
.parametrize('seed', [[[(- 0.5), 0, 0.5], [1, (- 2), 1]], [[3, 0, 1, 2, 0], [0, (- 1), 0]], [2, 3]])
def test_compatibility(device, seed, T=100, D=2):
mlpg = diffsptk.MaximumLikelihoodParameterGeneration(T, seed=seed)
if U.is_array(seed[0]):
opt = ' '.join([('-d ' + ' '.join([str(w) for w in window])) for window in seed])
else:
opt = ('-r ' + ' '.join([str(width) for width in seed]))
H = (len(seed) + 1)
tmp1 = 'mlpg.tmp1'
tmp2 = 'mlpg.tmp2'
tmp3 = 'mlpg.tmp3'
U.check_compatibility(device, mlpg, [f'nrand -s 1 -l {((T * D) * H)} > {tmp1}', f'step -l {((T * D) * H)} > {tmp2}', f'merge -l {(D * H)} -L {(D * H)} {tmp1} {tmp2} > {tmp3}'], f'cat {tmp1}', f'mlpg -l {D} {opt} -R 1 {tmp3}', [f'rm {tmp1} {tmp2} {tmp3}'], dx=(D * H), dy=D) |
.parametrize('indices', [None, [1, 3]])
def test_check_method_params(indices):
X = np.random.randn(4, 2)
_params = {'list': [1, 2, 3, 4], 'array': np.array([1, 2, 3, 4]), 'sparse-col': sp.csc_matrix([1, 2, 3, 4]).T, 'sparse-row': sp.csc_matrix([1, 2, 3, 4]), 'scalar-int': 1, 'scalar-str': 'xxx', 'None': None}
result = _check_method_params(X, params=_params, indices=indices)
indices_ = (indices if (indices is not None) else list(range(X.shape[0])))
for key in ['sparse-row', 'scalar-int', 'scalar-str', 'None']:
assert (result[key] is _params[key])
assert (result['list'] == _safe_indexing(_params['list'], indices_))
assert_array_equal(result['array'], _safe_indexing(_params['array'], indices_))
assert_allclose_dense_sparse(result['sparse-col'], _safe_indexing(_params['sparse-col'], indices_)) |
class TwoAlignedDataset():
def initialize(self, opt):
assert (opt.isTrain == True)
opt1 = opt
opt1.phase = opt.phase1
opt1.dataset_model = 'aligned'
self.dataset1 = AlignedDataset()
self.dataset1.initialize(opt1)
opt2 = opt
opt2.phase = opt.phase2
opt2.dataset_model = 'aligned'
self.dataset2 = AlignedDataset()
self.dataset2.initialize(opt2)
def __getitem__(self, index):
w = self.dataset1.opt.loadSize
h = self.dataset1.opt.loadSize
w_offset = random.randint(0, max(0, ((w - self.dataset1.opt.fineSize) - 1)))
h_offset = random.randint(0, max(0, ((h - self.dataset1.opt.fineSize) - 1)))
is_flip = (random.random() < 0.5)
item1 = self.dataset1.get_item(index, w_offset, h_offset, is_flip)
item2 = self.dataset2.get_item(index, w_offset, h_offset, is_flip)
return {'dataset1_input': item1, 'dataset2_input': item2}
def __len__(self):
assert (len(self.dataset1) == len(self.dataset2))
return len(self.dataset1)
def name(self):
return 'TwoAlignedDataset' |
def test_pixel_size_setter():
persimgr = PersistenceImager(birth_range=(0, 1), pers_range=(0, 2), pixel_size=1)
persimgr.pixel_size = 0.75
np.testing.assert_equal(persimgr.pixel_size, 0.75)
np.testing.assert_equal(persimgr._pixel_size, 0.75)
np.testing.assert_equal(persimgr.birth_range, ((- 0.25), 1.25))
np.testing.assert_equal(persimgr._birth_range, ((- 0.25), 1.25))
np.testing.assert_equal(persimgr.pers_range, ((- 0.125), 2.125))
np.testing.assert_equal(persimgr._pers_range, ((- 0.125), 2.125))
np.testing.assert_equal(persimgr.height, 2.25)
np.testing.assert_equal(persimgr._height, 2.25)
np.testing.assert_equal(persimgr.width, 1.5)
np.testing.assert_equal(persimgr._width, 1.5)
np.testing.assert_equal(persimgr.resolution, (2, 3))
np.testing.assert_equal(persimgr._resolution, (2, 3))
np.testing.assert_array_equal(persimgr._ppnts, [(- 0.125), 0.625, 1.375, 2.125])
np.testing.assert_array_equal(persimgr._bpnts, [(- 0.25), 0.5, 1.25]) |
def minimal_grid(x, y, tol=1e-06, error_scale=1.0, y_reference=None):
import numpy as np
from scipy.interpolate import CubicSpline as spline
from scipy.signal import find_peaks
deg = 3
if (y_reference is None):
y_reference = y
error_scale = np.asarray(error_scale)
if (np.ndim(error_scale) == 0):
if (error_scale != 1.0):
tol /= error_scale[()]
def error(ydiff):
return ydiff
else:
def error(ydiff):
return (error_scale * ydiff)
def next_sample(y, y_greedy, sign):
if (sign[0] == 1):
errors = error((y - y_greedy))
else:
errors = error((y_greedy - y))
peaks = find_peaks(errors)[0]
peaks = peaks[(np.abs(errors[peaks]) > tol)]
sign[0] *= (- 1)
if (not peaks.size):
peaks = find_peaks((- errors))[0]
peaks = peaks[(np.abs(errors[peaks]) > tol)]
sign[0] *= (- 1)
if (not peaks.size):
return None
return peaks
include_sample = np.zeros(len(x), dtype=bool)
include_sample[np.linspace(0, (len(x) - 1), num=(deg + 1), dtype=int)] = True
sign = [1]
for _ in range(len(x)):
s = spline(x[include_sample], y[include_sample])
i_next = next_sample(y_reference, s(x), sign)
if (i_next is None):
break
include_sample[i_next] = True
return include_sample |
def do_learning(extractor, model, optimizer, loader, k_step, device):
model.train()
acc_list = []
loss_list = []
for (idx, (img, mask, lbl_1, lbl_2, lbl_1_oh, lbl_2_oh)) in enumerate(loader):
if (idx >= k_step):
break
if (mask.sum() == 0):
continue
img = img.to(device)
mask = mask.to(device)
lbl_1 = lbl_1.to(device)
lbl_2 = lbl_2.to(device)
lbl_1_oh = lbl_1_oh.to(device)
lbl_2_oh = lbl_2_oh.to(device)
feature_map = extractor(img)
(lbl_2_pred, lbl_1_pred) = model(feature_map, lbl_1_oh, lbl_2_oh)
lbl_1_loss = get_loss(lbl_1_pred, lbl_1, mask)
lbl_2_loss = get_loss(lbl_2_pred, lbl_2, mask)
loss = (lbl_1_loss + lbl_2_loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lbl_1_p = torch.argmax(lbl_1_pred, dim=1)
lbl_2_p = torch.argmax(lbl_2_pred, dim=1)
lbl_p = ((lbl_1_p * args.n_class) + lbl_2_p)
lbl_gt = ((lbl_1 * args.n_class) + lbl_2)
idx = torch.eq(lbl_p, lbl_gt)
mask_new = mask.mul(idx)
accuracy = (torch.sum(mask_new) / torch.sum(mask))
acc_list.append(accuracy.item())
loss_list.append(loss.item())
return (acc_list, loss_list) |
class TestConsumeOp(unittest.TestCase):
def test_jit_consume_op(self):
iters = 6
def foo(x):
for i in range(iters):
result = torch.ops.operator_benchmark._consume(torch.sum(x))
return result
r = torch.jit.trace(foo, torch.rand(2, 2))
graph = str(r.graph)
occurance = graph.count('aten::sum')
x = torch.rand(2, 2)
value = r(x)
self.assertEqual(value, torch.sum(x))
self.assertEqual(occurance, iters) |
def to_float(x, name='ToFloat'):
try:
return tf.to_float(x, name)
except AttributeError:
return tf.compat.v1.to_float(x, name) |
def test_grad_add_check_numerics_ops():
with make_scope() as session:
x = tf.Variable(initial_value=0.0, name='x')
session.run(x.initializer)
y = (1.0 / x)
grad_x = tf.gradients(y, x)[0]
print('grad_x:', grad_x.eval())
assert_equal(str(float('-inf')), '-inf')
assert_equal(str(grad_x.eval()), '-inf')
session.run(x.assign(1.0))
opt = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = opt.minimize(y, var_list=[x])
check = add_check_numerics_ops([train_op])
session.run(check)
session.run(x.assign(0.0))
try:
session.run(check)
except tf.errors.InvalidArgumentError as exc:
print(('Expected exception: %r' % exc))
else:
assert False, 'should have raised an exception' |
def get_aircraft_datasets(train_transform, test_transform, train_classes=range(60), open_set_classes=range(60, 100), balance_open_set_eval=False, split_train_val=True, seed=0):
np.random.seed(seed)
train_dataset_whole = FGVCAircraft(root=aircraft_root, transform=train_transform, split='trainval')
train_dataset_whole = subsample_classes(train_dataset_whole, include_classes=train_classes)
(train_dataset_split, val_dataset_split) = get_train_val_split(train_dataset_whole)
val_dataset_split.transform = test_transform
test_dataset_known = FGVCAircraft(root=aircraft_root, transform=test_transform, split='test')
test_dataset_known = subsample_classes(test_dataset_known, include_classes=train_classes)
test_dataset_unknown = FGVCAircraft(root=aircraft_root, transform=test_transform, split='test')
test_dataset_unknown = subsample_classes(test_dataset_unknown, include_classes=open_set_classes)
if balance_open_set_eval:
(test_dataset_known, test_dataset_unknown) = get_equal_len_datasets(test_dataset_known, test_dataset_unknown)
train_dataset = (train_dataset_split if split_train_val else train_dataset_whole)
val_dataset = (val_dataset_split if split_train_val else test_dataset_known)
all_datasets = {'train': train_dataset, 'val': val_dataset, 'test_known': test_dataset_known, 'test_unknown': test_dataset_unknown}
return all_datasets |
def import_module_error_class(module_name):
def decorate(cls):
def import_error_init(*args, **kwargs):
raise ImportError(f'Please install {module_name} to use {cls.__name__}.')
cls.__init__ = MethodType(import_error_init, cls)
return cls
return decorate |
class HybridQA_Dataset():
def __init__(self, config):
print('Loading HybridQA ')
self.table_dir = config.table_dir
self.text_dir = config.text_dir
self.table_id_list = []
g = os.walk(self.table_dir)
for (_, _, file_list) in g:
for file_name in file_list:
self.table_id_list.append(file_name.replace('.json', ''))
self.table_num = len(self.table_id_list)
self.dataset = {}
for table_id in tqdm(self.table_id_list):
self.dataset[table_id] = self.load_data_sample(table_id)
print('Data loaded. Number of tables: {}'.format(self.table_num))
def load_data_sample(self, table_id):
with open(((self.table_dir + table_id) + '.json'), 'r') as f:
table_sample = json.load(f)
with open(((self.text_dir + table_id) + '.json'), 'r') as f:
text_sample = json.load(f)
return {'table_id': table_id, 'table': table_sample, 'text': text_sample} |
def generate_ChangePoint(inter_prob, intra_prob, alpha):
cps = [15, 30, 60, 75, 90, 105, 135]
fname = (((((('ChangePoint_' + str(inter_prob)) + '_') + str(intra_prob)) + '_') + str(alpha)) + '.txt')
cps_sizes = []
cps_probs = []
sizes_1 = [250, 250]
probs_1 = construct_SBM_block(sizes_1, inter_prob, intra_prob)
sizes_2 = [125, 125, 125, 125]
probs_2 = construct_SBM_block(sizes_2, inter_prob, intra_prob)
sizes_3 = ([50] * 10)
probs_3 = construct_SBM_block(sizes_3, inter_prob, intra_prob)
list_sizes = []
list_sizes.append(sizes_1)
list_sizes.append(sizes_2)
list_sizes.append(sizes_3)
list_probs = []
list_probs.append(probs_1)
list_probs.append(probs_2)
list_probs.append(probs_3)
list_idx = 1
sizes = sizes_2
probs = probs_2
maxt = 150
G_0 = nx.stochastic_block_model(sizes, probs)
G_0 = nx.Graph(G_0)
G_t = G_0
G_times = []
G_times.append(G_t)
for t in range(maxt):
if (t in cps):
if ((list_idx + 1) > (len(list_sizes) - 1)):
list_idx = 0
else:
list_idx = (list_idx + 1)
sizes = list_sizes[list_idx]
probs = list_probs[list_idx]
G_t = SBM_snapshot(G_t, alpha, sizes, probs)
G_times.append(G_t)
print(('generating ' + str(t)), end='\r')
else:
G_t = SBM_snapshot(G_t, alpha, sizes, probs)
G_times.append(G_t)
print(('generating ' + str(t)), end='\r')
to_edgelist(G_times, fname) |
class DefaultDict(dict):
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value |
def _linear_normalize(weights):
weights = torch.max(weights, torch.zeros_like(weights))
if (torch.sum(weights) > 1e-08):
return (weights / torch.sum(weights))
return torch.zeros_like(weights) |
class Trie():
def __init__(self, eos):
self.root = TreeNode()
self.eos = eos
def insert(self, word):
cur = self.root
for c in word:
cur = cur.child[c]
def get_next_layer(self, word):
cur = self.root
for c in word:
cur = cur.child.get(c)
if (cur is None):
return [self.eos]
return list(cur.child.keys()) |
(config_path='../hydra_config', config_name='black_box_opt')
def main(config):
random.seed(None)
log_config = flatten_config(OmegaConf.to_container(config, resolve=True), sep='/')
log_config = {'/'.join(('config', key)): val for (key, val) in log_config.items()}
wandb.login(host=config.wandb_host)
wandb.init(project='lambo', config=log_config, mode=config.wandb_mode, group=config.exp_name)
config['job_name'] = wandb.run.name
(config, _) = startup(config)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
try:
tokenizer = hydra.utils.instantiate(config.tokenizer)
bb_task = hydra.utils.instantiate(config.task, tokenizer=tokenizer, candidate_pool=[])
project_root = Path(os.getcwd()).parents[2]
(base_candidates, base_targets, all_seqs, all_targets) = bb_task.task_setup(config, project_root=project_root)
max_chol_sz = config.surrogate.get('max_cholesky_size', int(100000.0))
with max_cholesky_size(max_chol_sz):
optimizer = hydra.utils.instantiate(config.optimizer, bb_task=config.task, surrogate=config.surrogate, acquisition=config.acquisition, encoder=config.encoder, tokenizer=tokenizer)
metrics = optimizer.optimize(base_candidates, base_targets, all_seqs, all_targets, log_prefix=config.task.log_prefix)
metrics = {key.split('/')[(- 1)]: val for (key, val) in metrics.items()}
ret_val = metrics['hypervol_rel']
except Exception as err:
logging.exception(err)
ret_val = float('NaN')
wandb.finish()
return ret_val |
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) |
class OrderedEnqueuer(SequenceEnqueuer):
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
self.shuffle = shuffle
self.workers = 0
self.executor = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return ((self.stop_signal is not None) and (not self.stop_signal.is_set()))
def start(self, workers=1, max_queue_size=10):
if self.use_multiprocessing:
self.executor = multiprocessing.Pool(workers)
else:
self.executor = ThreadPool(workers)
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _run(self):
sequence = list(range(len(self.sequence)))
while True:
if self.shuffle:
random.shuffle(sequence)
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(self.executor.apply_async(get_index, (self.sequence, i)), block=True)
self.sequence.on_epoch_end()
def get(self):
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
if (inputs is not None):
(yield inputs)
except Exception as e:
self.stop()
raise StopIteration(e)
def stop(self, timeout=None):
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.executor.close()
self.executor.join()
self.run_thread.join(timeout) |
(scope='module')
def nlp_pipeline():
nlp = stanza.Pipeline(dir=TEST_MODELS_DIR, lang='en')
return nlp |
class ActionNormWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
assert isinstance(env.action_space, gym.spaces.Dict), env.action_space
ac_space = []
self._low = {}
self._high = {}
for (k, space) in env.action_space.spaces.items():
if isinstance(space, gym.spaces.Box):
self._low[k] = low = space.low
self._high[k] = high = space.high
space = gym.spaces.Box((- np.ones_like(low)), np.ones_like(high), dtype=np.float32)
ac_space.append((k, space))
self.action_space = gym.spaces.Dict(ac_space)
def step(self, action):
action = action.copy()
for k in self._low:
action[k] = ((((action[k] + 1) / 2) * (self._high[k] - self._low[k])) + self._low[k])
action[k] = np.clip(action[k], self._low[k], self._high[k])
return self.env.step(action) |
def load_data():
X_df = pd.read_csv(('/projects/leelab2/data/AD_DATA/Nicasia/processed' + '/PCG_normalized/no_covar_correction/MSBB_RNA.tsv'), sep='\t')
y_df = pd.read_csv(('/projects/leelab2/data/AD_DATA/Nicasia/processed' + '/samples_neuropath_prenorm/MSBB_RNA.tsv'), sep='\t')
X_df = X_df.T
X_df.columns = X_df.iloc[0]
X_df.drop('PCG', axis=0, inplace=True)
X_df.dropna(how='any', axis=1, inplace=True)
X_df.index = X_df.index.astype(int)
y_df.set_index('sample_name', inplace=True)
y_df = y_df.loc[X_df.index]
y_df.dropna(how='any', subset=['AD'], inplace=True)
X_df = X_df.loc[y_df.index]
y = y_df['AD'].values.astype(int)
X_df = X_df.astype(float)
(X_train_total, X_test, y_train_total, y_test) = train_test_split(X_df, y, test_size=0.15, random_state=0, stratify=y)
(X_train, X_vald, y_train, y_vald) = train_test_split(X_train_total, y_train_total, test_size=0.15, random_state=0, stratify=y_train_total)
return (X_train_total, y_train_total, X_train, y_train, X_vald, y_vald, X_test, y_test) |
class NumericTestCase(TorchTestCase):
def testNumericBatchNorm(self):
a = torch.rand(16, 10)
bn = nn.BatchNorm2d(10, momentum=1, eps=1e-05, affine=False)
bn.train()
a_var1 = Variable(a, requires_grad=True)
b_var1 = bn(a_var1)
loss1 = b_var1.sum()
loss1.backward()
a_var2 = Variable(a, requires_grad=True)
a_mean2 = a_var2.mean(dim=0, keepdim=True)
a_std2 = torch.sqrt(handy_var(a_var2, unbias=False).clamp(min=1e-05))
b_var2 = ((a_var2 - a_mean2) / a_std2)
loss2 = b_var2.sum()
loss2.backward()
self.assertTensorClose(bn.running_mean, a.mean(dim=0))
self.assertTensorClose(bn.running_var, handy_var(a))
self.assertTensorClose(a_var1.data, a_var2.data)
self.assertTensorClose(b_var1.data, b_var2.data)
self.assertTensorClose(a_var1.grad, a_var2.grad) |
class TestCharSvm(unittest.TestCase):
def test_charsvm(self):
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
run_experiment(predictor_fn=(lambda : CharSvmPredictor()), output_dir=temp_path, n_examples=4, n_trials=1, dataset='gnad10', config=None, n_test_examples=10)
results = get_results(temp_path)
self.assertEqual(1, len(results))
result = results[0]
self.assertGreater(result['acc'], 35)
self.assertGreater(result['mf1'], 36) |
class Triples():
def __init__(self, ranking, seed=12345):
random.seed(seed)
self.seed = seed
ranking = Ranking.cast(ranking)
self.ranking_provenance = ranking.provenance()
self.qid2rankings = ranking.todict()
def create(self, positives, depth):
assert all(((len(x) == 2) for x in positives))
assert all(((maxBest <= maxDepth) for (maxBest, maxDepth) in positives)), positives
self.positives = positives
self.depth = depth
Triples = []
NonEmptyQIDs = 0
for (processing_idx, qid) in enumerate(self.qid2rankings):
l = sample_for_query(qid, self.qid2rankings[qid], positives, depth, False, None)
NonEmptyQIDs += (len(l) > 0)
Triples.extend(l)
if ((processing_idx % 10000) == 0):
print_message(f'#> Done with {(processing_idx + 1)} questions! {str((len(Triples) / 1000))}k triples for {NonEmptyQIDs} unqiue QIDs.')
print_message(f'#> Sub-sample the triples (if > {MAX_NUM_TRIPLES})..')
print_message(f'#> len(Triples) = {len(Triples)}')
if (len(Triples) > MAX_NUM_TRIPLES):
Triples = random.sample(Triples, MAX_NUM_TRIPLES)
print_message('#> Shuffling the triples...')
random.shuffle(Triples)
self.Triples = Examples(data=Triples)
return Triples
def save(self, new_path):
provenance = Provenance()
provenance.source = 'Triples::create'
provenance.seed = self.seed
provenance.positives = self.positives
provenance.depth = self.depth
provenance.ranking = self.ranking_provenance
Examples(data=self.Triples, provenance=provenance).save(new_path) |
def generate_jsfile(dirpath, name, out_path):
tiuProcessor = TIU(dirpath)
tiu_instance = tiuProcessor.process_file()
gdmaProcessor = DMA(dirpath, 'GDMA')
gdma_instances = gdmaProcessor.process_file()
sdmaProcessor = DMA(dirpath, 'SDMA')
sdma_instances = sdmaProcessor.process_file()
cdmaProcessor = DMA(dirpath, 'CDMA')
cdma_instances = cdmaProcessor.process_file()
processors = [tiuProcessor, gdmaProcessor, sdmaProcessor, cdmaProcessor]
chipArchArgs = None
for processor in processors:
if processor.chipArgs:
chipArchArgs = processor.chipArgs
break
global CHIP_ARCH
CHIP_ARCH = chipArchArgs['Chip Arch']
print('CHIP_ARCH:', CHIP_ARCH)
core_num = int(chipArchArgs['Core Num'])
ddrBw = pd.to_numeric(chipArchArgs['DDR Max BW(GB/s)'])
L2Bw = pd.to_numeric(chipArchArgs['L2 Max BW(GB/s)'])
categories = ['TPU_BD', 'TPU_GDMA']
if (len(sdma_instances) > 0):
categories.append('TPU_SDMA')
if (len(cdma_instances) > 0):
categories.append('TPU_CDMA')
lmem_size = int(chipArchArgs['Tpu Lmem Size'])
lane_num = int(chipArchArgs['NPU Num'])
lane_size = (lmem_size // lane_num)
lmem_partition = generate_partition(lmem_size, lane_num, 'BANK')
cycle_data_dict = {f'time_data{i}': [] for i in range(0, core_num)}
lmem_op_dict = {f'lmem_op_record{i}': [] for i in range(0, core_num)}
max_corenum = max(len(tiu_instance), len(gdma_instances), len(sdma_instances), len(cdma_instances))
for idx in range(max_corenum):
if (idx < len(tiu_instance)):
tiudf = tiu_instance[idx]
process_data('TIU', tiudf, idx, 0, [ddrBw, L2Bw], lane_num, cycle_data_dict, lmem_op_dict, lane_size)
if (idx < len(gdma_instances)):
gdmadf = gdma_instances[idx]
process_data('GDMA', gdmadf, idx, 1, [ddrBw, L2Bw], lane_num, cycle_data_dict, lmem_op_dict, lane_size)
if (idx < len(sdma_instances)):
sdmadf = sdma_instances[idx]
process_data('SDMA', sdmadf, idx, categories.index('TPU_SDMA'), [ddrBw, L2Bw], lane_num, cycle_data_dict, lmem_op_dict, lane_size)
if (idx < len(cdma_instances)):
cdmadf = cdma_instances[idx]
process_data('CDMA', cdmadf, idx, categories.index('TPU_CDMA'), [ddrBw, L2Bw], lane_num, cycle_data_dict, lmem_op_dict, lane_size)
summary = SummaryProcessor(tiuProcessor, gdmaProcessor, sdmaProcessor, cdmaProcessor)
summarydf = summary.make_summary()
summary_data = [[(str(x) if isinstance(x, Decimal) else x) for x in lst] for lst in summarydf.values.tolist()]
if (CHIP_ARCH == 'sg2260'):
(ddr_ratios, l2m_ratios) = calculate_ratios(cycle_data_dict)
else:
(ddr_ratios, l2m_ratios) = ([], [])
data_filepath = f'{out_path}/profile_data.js'
with open(data_filepath, 'w') as js:
js.write(f'''let page_caption = "PerfAI: {name}"
''')
js.write(f'''let platform = "Platform: {CHIP_ARCH}"
''')
js.write(f'''let configs = {chipArchArgs}
''')
js.write('let summary_caption= "Summary Table"\n')
js.write(f'''let summary_header = {summarydf.columns.tolist()}
''')
js.write(f'''let summary_data = {summary_data}
''')
js.write(f'''let ddr_bandwidth = {ddrBw}
''')
js.write(f'''let l2_bandwidth = {L2Bw}
''')
js.write(f'''let ddr_ratios = {ddr_ratios}
''')
js.write(f'''let l2m_ratios = {l2m_ratios}
''')
js.write(f'''let categories = {categories}
''')
time_header = ['category', 'begin_time', 'end_time', 'Duration', 'func_type', 'height', 'cmd', 'func_name', 'uArchRate/BW', 'Data Type', 'Info', 'Msg_Id', 'Sd/Wt_Count']
filter_cols = [time_header.index(c) for c in ['category', 'func_type']]
js.write(f'''let filter_cols = {filter_cols}
''')
js.write(f'''let lmem_partition = {lmem_partition}
''')
js.write(f'''let time_header = {time_header}
''')
for lmem_op in lmem_op_dict.keys():
js_content = ''
for (i, sublist) in enumerate(lmem_op_dict[lmem_op]):
js_content += f'''{sublist},
'''
js.write(f'''window.{lmem_op} = [{js_content}]
''')
for keyname in cycle_data_dict.keys():
js_content = ''
for (i, sublist) in enumerate(cycle_data_dict[keyname]):
js_content += f'''{sublist},
'''
js.write(f'''window.{keyname} = [{js_content}]
''') |
def _recalculateCenters(y, balancedCluster, k):
Centers = []
kAux = 0
while (kAux < k):
vectorAux = np.zeros(len(y))
for i in range(0, len(balancedCluster)):
if (int(kAux) == int(balancedCluster[i])):
for j in range(0, len(y)):
vectorAux[j] += y[(j, i)]
vectorAux /= k
Centers.append(vectorAux)
kAux += 1
return Centers |
def convert_vi_vsfc(paths, dataset_name, *args):
in_directory = os.path.join(paths['SENTIMENT_BASE'], 'vietnamese', '_UIT-VSFC')
out_directory = paths['SENTIMENT_DATA_DIR']
process_vsfc_vietnamese.main(in_directory, out_directory, dataset_name) |
_params({'y_true': ['array-like'], 'y_pred': ['array-like'], 'sample_weight': ['array-like', None]}, prefer_skip_nested_validation=True)
def macro_averaged_mean_absolute_error(y_true, y_pred, *, sample_weight=None):
(_, y_true, y_pred) = _check_targets(y_true, y_pred)
if (sample_weight is not None):
sample_weight = column_or_1d(sample_weight)
else:
sample_weight = np.ones(y_true.shape)
check_consistent_length(y_true, y_pred, sample_weight)
labels = unique_labels(y_true, y_pred)
mae = []
for possible_class in labels:
indices = np.flatnonzero((y_true == possible_class))
mae.append(mean_absolute_error(y_true[indices], y_pred[indices], sample_weight=sample_weight[indices]))
return (np.sum(mae) / len(mae)) |
def _get_boolean_value(value):
if (value.lower() == TRUE):
return True
else:
return False |
def thwart_lemma_3_5(k, n, m, a, b, c, d=0, complement=False, explain_construction=False):
from sage.arith.misc import is_prime_power
from sage.rings.finite_rings.finite_field_constructor import FiniteField as GF
if complement:
(a, b, c) = ((n - a), (n - b), (n - c))
if explain_construction:
return ((('Lemma 3.5 with n={},m={},a={},b={},c={},d={} from:\n' + ' Charles J.Colbourn, Jeffrey H. Dinitz, Mieczyslaw Wojtas,\n') + ' Thwarts in transversal designs,\n') + ' Designs, Codes and Cryptography 5, no. 3 (1995): 189-197.').format(n, m, a, b, c, d)
assert is_prime_power(n), 'n(={}) must be a prime power'.format(n)
assert ((a <= n) and (b <= n) and (c <= n) and (d <= n)), 'a,b,c,d (={},{},{},{}) must be <=n(={})'.format(a, b, c, d, n)
assert (((a + b) + c) <= (n + 1)), '{}={}+{}+{}=a+b+c>n+1={}+1 violates the assumptions'.format(((a + b) + c), a, b, c, n)
assert (((k + 3) + bool(d)) <= (n + 1)), 'There exists no OA({},{}).'.format(((k + 3) + bool(d)), n)
G = GF(n, prefix='x')
G_set = sorted(G)
assert ((G_set[0] == G.zero()) and (G_set[1] == G.one())), 'problem with the ordering of {}'.format(G)
G_to_int = {v: i for (i, v) in enumerate(G_set)}
OA = [[G_to_int[(i + (x * j))] for i in G_set for j in G_set] for x in G_set[1:((k + 2) + bool(d))]]
OA.insert(0, [j for i in range(n) for j in range(n)])
OA.insert(0, [i for i in range(n) for j in range(n)])
OA = sorted(zip(*OA))
OA = [list((B[3:] + B[:3])) for B in OA]
third_complement = set((B[(- 1)] for B in OA if ((B[(- 3)] < a) and (B[(- 2)] < b))))
assert ((n - len(third_complement)) >= c)
first_set = list(range(a))
second_set = list(range(b))
third_set = [x for x in range(n) if (x not in third_complement)][:c]
last_sets = [first_set, second_set, third_set]
if complement:
last_sets = [set(range(n)).difference(s) for s in last_sets]
sizes = [len(_) for _ in last_sets]
last_sets_dict = [{v: i for (i, v) in enumerate(s)} for s in last_sets]
for (i, D) in enumerate(last_sets_dict):
kk = ((len(OA[0]) - 3) + i)
for R in OA:
R[kk] = (D[R[kk]] if (R[kk] in D) else None)
if d:
for R in OA:
if (R[(- 4)] >= d):
R[(- 4)] = None
sizes.insert(0, d)
return wilson_construction(OA, k, n, m, sizes, check=False) |
def _called_with_cfg(*args, **kwargs):
if (len(args) and isinstance(args[0], _CfgNode)):
return True
if isinstance(kwargs.pop('cfg', None), _CfgNode):
return True
return False |
def test_wrap_experiment_invalid_options():
prefix = 'wrap_exp_invalid_options'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
_hard_rmtree(exp_path)
logdir = 'data/local/wrap_exp_invalid_options/test_exp'
_experiment(prefix=prefix)
def test_exp(ctxt):
del ctxt
with pytest.raises(ValueError):
test_exp(dict(logdir=logdir)) |
class ProcessContext():
def __init__(self, processes, error_queues):
_python_version_check()
self.error_queues = error_queues
self.processes = processes
self.sentinels = {process.sentinel: index for (index, process) in enumerate(processes)}
def pids(self):
return [int(process.pid) for process in self.processes]
def join(self, timeout=None):
if (len(self.sentinels) == 0):
return True
ready = multiprocessing.connection.wait(self.sentinels.keys(), timeout=timeout)
error_index = None
for sentinel in ready:
index = self.sentinels.pop(sentinel)
process = self.processes[index]
process.join()
if (process.exitcode != 0):
error_index = index
break
if (error_index is None):
return (len(self.sentinels) == 0)
for process in self.processes:
if process.is_alive():
process.terminate()
process.join()
if self.error_queues[error_index].empty():
exitcode = self.processes[error_index].exitcode
if (exitcode < 0):
name = signal.Signals((- exitcode)).name
raise Exception(('process %d terminated with signal %s' % (error_index, name)))
else:
raise Exception(('process %d terminated with exit code %d' % (error_index, exitcode)))
original_trace = self.error_queues[error_index].get()
msg = ('\n\n-- Process %d terminated with the following error:\n' % error_index)
msg += original_trace
raise Exception(msg) |
def yiq_to_rgb(y, i, q):
r = ((y + (0. * i)) + (0. * q))
g = ((y - (0. * i)) - (0. * q))
b = ((y - (1. * i)) + (1. * q))
if (r < 0.0):
r = 0.0
if (g < 0.0):
g = 0.0
if (b < 0.0):
b = 0.0
if (r > 1.0):
r = 1.0
if (g > 1.0):
g = 1.0
if (b > 1.0):
b = 1.0
return (r, g, b) |
def get_project_path(ExpID):
full_path = glob.glob(('Experiments/*%s*' % ExpID))
assert (len(full_path) == 1), 'There should be only ONE folder with <ExpID> in its name'
return full_path[0] |
def to_type(handle: int) -> Object:
t = sim.simGetObjectType(handle)
if (t == sim.sim_object_shape_type):
return Shape(handle)
elif (t == sim.sim_object_dummy_type):
return Dummy(handle)
elif (t == sim.sim_object_path_type):
return CartesianPath(handle)
elif (t == sim.sim_object_joint_type):
return Joint(handle)
elif (t == sim.sim_object_visionsensor_type):
return VisionSensor(handle)
elif (t == sim.sim_object_forcesensor_type):
return ForceSensor(handle)
elif (t == sim.sim_object_proximitysensor_type):
return ProximitySensor(handle)
elif (t == sim.sim_object_camera_type):
return Camera(handle)
elif (t == sim.sim_object_octree_type):
return Octree(handle)
raise ValueError |
class BUDUDrp1mat(SpectralMatrix):
def assemble(self, method):
(test, trial) = (self.testfunction, self.trialfunction)
assert isinstance(test[0], UD)
assert isinstance(trial[0], UD)
assert (test[0].quad == 'LG')
k = np.arange((test[0].N - 1))
d = {0: ((2 * k) + 2)}
d = {0: (((4 * (k + 1)) / ((2 * k) + 1)) / ((2 * k) + 3)), 1: (((4 / ((2 * k[:(- 1)]) + 1)) / ((2 * k[:(- 1)]) + 3)) / ((2 * k[:(- 1)]) + 5)), 2: ((((- 2) * (k[:(- 2)] + 2)) / ((2 * k[:(- 2)]) + 3)) / ((2 * k[:(- 2)]) + 5))}
d[(- 1)] = d[1].copy()
d[(- 2)] = d[2].copy()
return d |
def main(args=None):
args = parse_args(args=args)
utils.set_random_seed(args['seed'])
logger.info('Running tagger in {} mode'.format(args['mode']))
if (args['mode'] == 'train'):
train(args)
else:
evaluate(args) |
(name='random_basis_func_cls', params=[RandomFourierFeatures, RandomFourierFeaturesCosine])
def _random_basis_func_cls_fixture(request):
return request.param |
def create_model_single_conv2d(input_shape):
inputs = Input(shape=input_shape)
outputs = Conv2D(2, 3)(inputs)
return keras.Model(inputs=inputs, outputs=outputs) |
class Registry(Printable):
__objects: Dict[(Tuple[(str, str, str)], Registrable)]
def __init__(self):
self.__objects = {}
def register(self, scope: str, type: str, name: str, obj: Registrable) -> Registrable:
assert ((scope, type, name) not in self.__objects), 'object with name {} already exist.'.format(name)
obj.doRegister(scope, type, name)
self.__objects[(scope, type, name)] = obj
return self.__objects[(scope, type, name)]
def get(self, scope: str, type: str, name: str) -> Registrable:
assert ((scope, type, name) in self.__objects), 'object with name {} does not exist.'.format(name)
return self.__objects[(scope, type, name)]
def has(self, scope: str, type: str, name: str) -> bool:
return ((scope, type, name) in self.__objects)
def getByType(self, scope: str, type: str) -> List[Registrable]:
rslt: List[Registrable] = []
for (key, obj) in self.__objects.items():
(s, t, _) = key
if ((s == scope) and (t == type)):
rslt.append(obj)
return rslt
def getAll(self) -> Dict[(Tuple[(str, str, str)], Registrable)]:
return self.__objects
def getByScope(self, scope: str) -> List[Registrable]:
rslt: List[Registrable] = []
for (key, obj) in self.__objects.items():
(s, _, _) = key
if (s == scope):
rslt.append(obj)
return rslt
def print(self, indent: int):
out = ((' ' * indent) + 'Registry:\n')
indent += 4
for (keys, val) in self.__objects.items():
[scope, type, name] = keys
out += ((' ' * indent) + 'Object {}/{}/{}:\n'.format(scope, type, name))
out += val.print((indent + 4))
return out |
.parametrize('valid_index', [[[0, 1]]])
def test_find_lambda_control_star_output(valid_index: List[List[int]]) -> None:
assert find_lambda_control_star(r_hat, valid_index, lambdas) |
class CC3MDataset(BaseDataset):
def __init__(self, *args, split='', **kwargs):
assert (split in ['train', 'val', 'test'])
self.split = split
self.metadata = None
self._load_metadata()
if (split == 'train'):
names = ['cc3m_train']
elif (split == 'val'):
names = ['cc3m_val']
elif (split == 'test'):
names = ['cc3m_val']
print(names, ': ', len(self.metadata), 'samples in total.')
super().__init__(*args, **kwargs, names=names, text_column_name='caption')
def _load_metadata(self):
metadata_dir = './meta_data/cc3m'
split_files = {'train': 'cc3m_training_success_full.tsv', 'val': 'cc3m_validation_success_full.tsv', 'test': 'cc3m_validation_success_full.tsv'}
target_split_fp = split_files[self.split]
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t')
self.metadata = metadata
def _get_image_path(self, sample):
rel_dir = 'training'
if (self.split != 'train'):
rel_dir = 'validation'
rel_fp = os.path.join(rel_dir, sample[1])
return (os.path.join(self.data_dir, rel_fp), rel_fp)
def _get_caption(self, sample):
return sample[0]
def get_raw_image(self, sample):
(abs_fp, rel_fp) = self._get_image_path(sample)
img = Image.open(abs_fp).convert('RGB')
if (img is None):
raise Exception('Invalid img!', rel_fp)
else:
return img
def get_image(self, index, sample, image_key='image'):
image = self.get_raw_image(sample)
image_tensor = self.image_aug(image, self.transforms)
return {'video': image_tensor, 'vid_index': sample[1], 'cap_index': index, 'raw_index': index}
def get_false_image(self, rep, image_key='image'):
random_index = random.randint(0, (len(self.metadata) - 1))
sample = self.metadata.iloc[random_index]
image = self.get_raw_image(sample)
image_tensor = self.image_aug(image, self.transforms)
return {f'false_video_{rep}': image_tensor}
def get_text(self, raw_index, sample):
text = sample[0]
encoding = self.tokenizer(text, padding='max_length', truncation=True, max_length=self.max_text_len, return_special_tokens_mask=True)
return {'text': (text, encoding), 'vid_index': sample[1], 'cap_index': raw_index, 'raw_index': raw_index}
def get_false_text(self, rep):
random_index = random.randint(0, (len(self.metadata) - 1))
sample = self.metadata.iloc[random_index]
text = sample[0]
encoding = self.tokenizer(text, truncation=True, max_length=self.max_text_len, return_special_tokens_mask=True)
return {f'false_text_{rep}': (text, encoding)}
def get_suite(self, index):
result = None
while (result is None):
sample = self.metadata.iloc[index]
ret = dict()
ret.update(self.get_image(index, sample))
if (not self.image_only):
txt = self.get_text(index, sample)
ret.update({'replica': (True if (txt['cap_index'] > 0) else False)})
ret.update(txt)
for i in range(self.draw_false_image):
ret.update(self.get_false_image(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
return ret
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
return self.get_suite(index) |
def register_Ns3EpcUeNas_methods(root_module, cls):
cls.add_constructor([param('ns3::EpcUeNas const &', 'arg0')])
cls.add_constructor([])
cls.add_method('ActivateEpsBearer', 'void', [param('ns3::EpsBearer', 'bearer'), param('ns3::Ptr< ns3::EpcTft >', 'tft')])
cls.add_method('Connect', 'void', [])
cls.add_method('Connect', 'void', [param('uint16_t', 'cellId'), param('uint16_t', 'dlEarfcn')])
cls.add_method('ConnectMc', 'void', [param('uint16_t', 'cellId'), param('uint16_t', 'dlEarfcn'), param('uint16_t', 'mmWaveCellId')])
cls.add_method('Disconnect', 'void', [])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('GetAsSapUser', 'ns3::LteAsSapUser *', [])
cls.add_method('GetCsgId', 'uint32_t', [], is_const=True)
cls.add_method('GetState', 'ns3::EpcUeNas::State', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')])
cls.add_method('SetAsSapProvider', 'void', [param('ns3::LteAsSapProvider *', 's')])
cls.add_method('SetCsgId', 'void', [param('uint32_t', 'csgId')])
cls.add_method('SetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'dev')])
cls.add_method('SetForwardUpCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')])
cls.add_method('SetImsi', 'void', [param('uint64_t', 'imsi')])
cls.add_method('SetMmWaveAsSapProvider', 'void', [param('ns3::LteAsSapProvider *', 's')])
cls.add_method('StartCellSelection', 'void', [param('uint16_t', 'dlEarfcn')])
return |
class FullyShardedDataParallel(FSDP):
def __init__(self, *args, use_sharded_state: bool=False, **kwargs):
if (not has_FSDP):
raise ImportError('Cannot find FullyShardedDataParallel. Please install fairscale with: pip install fairscale')
super().__init__(*args, **kwargs)
self.use_sharded_state = use_sharded_state
def unwrapped_module(self) -> torch.nn.Module:
if self.flatten_parameters:
return self.module.module
else:
return self.module
def state_dict(self, destination=None, prefix='', keep_vars=False):
if self.use_sharded_state:
return super().local_state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
elif (self.rank == 0):
return super().state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
else:
super().state_dict()
return (destination or {})
def load_state_dict(self, state_dict, strict=True, model_cfg=None):
if self.use_sharded_state:
return super().load_local_state_dict(state_dict, strict=strict)
else:
state_dict = dist_utils.broadcast_object(state_dict, src_rank=0, group=self.process_group)
return super().load_state_dict(state_dict, strict=strict) |
def get_chinese_rouge_function(rouge_type: str) -> Callable[([str, str], float)]:
char_tokenizer = ChineseTokenizer()
scorer = rouge_scorer.RougeScorer([rouge_type], use_stemmer=True, tokenizer=char_tokenizer)
return partial(rouge_score, scorer=scorer, rouge_type=rouge_type) |
def _check_lat_long(val: Any, clean: bool) -> Any:
if (val in NULL_VALUES):
return ((((None,) * 8) + (0,)) if clean else False)
mch = re.match(LAT_LONG_PATTERN, re.sub("''", '"', str(val)))
if (not mch):
return ((((None,) * 8) + (1,)) if clean else False)
if ((not mch.group('deg')) or (not mch.group('deg2'))):
return ((((None,) * 8) + (1,)) if clean else False)
mins = (float(mch.group('min')) if mch.group('min') else 0)
secs = (float(mch.group('sec')) if mch.group('sec') else 0)
dds = ((float(mch.group('deg')) + (mins / 60)) + (secs / 3600))
hem = (mch.group('dir_back') or mch.group('dir_front'))
mins2 = (float(mch.group('min2')) if mch.group('min2') else 0)
secs2 = (float(mch.group('sec2')) if mch.group('sec2') else 0)
dds2 = ((float(mch.group('deg2')) + (mins2 / 60)) + (secs2 / 3600))
hem2 = (mch.group('dir_back2') or mch.group('dir_front2'))
if ((not (0 <= mins < 60)) or (not (0 <= mins2 < 60)) or (not (0 <= secs < 60)) or (not (0 <= secs2 < 60)) or (hem and (not (0 <= float(mch.group('deg')) <= 90))) or (hem2 and (not (0 <= float(mch.group('deg2')) <= 180))) or ((not hem) and (abs(float(mch.group('deg'))) > 90)) or ((not hem2) and (abs(float(mch.group('deg2'))) > 180)) or (abs(dds) > 90) or (abs(dds2) > 180) or (sum([(mch.group('dir_back') is not None), (mch.group('dir_front') is not None)]) > 1) or (sum([(mch.group('dir_back2') is not None), (mch.group('dir_front2') is not None)]) > 1)):
return ((((None,) * 8) + (1,)) if clean else False)
return ((dds, mins, secs, hem, dds2, mins2, secs2, hem2, 2) if clean else True) |
def remove_output_labels(s) -> str:
label = re.compile('^o+[0-9]+ (=|:) |^ *')
lines = s.split('\n')
matches = [label.match(l) for l in lines if l]
if (not matches):
return s
n = min(((m.end() - m.start()) for m in matches))
return '\n'.join((l[n:] for l in lines)) |
class WidthSelFunc(Protocol):
def __call__(self, table: Table, attrs: List[str], centers: List[Any], params: Dict[(str, Any)]) -> Query:
... |
class DefaultWorker(Worker):
def __init__(self, *, seed, max_path_length, worker_number):
super().__init__(seed=seed, max_path_length=max_path_length, worker_number=worker_number)
self.agent = None
self.env = None
self._observations = []
self._last_observations = []
self._actions = []
self._rewards = []
self._terminals = []
self._lengths = []
self._agent_infos = defaultdict(list)
self._env_infos = defaultdict(list)
self._prev_obs = None
self._path_length = 0
self.worker_init()
def worker_init(self):
deterministic.set_seed((self._seed + self._worker_number))
def update_agent(self, agent_update):
if isinstance(agent_update, (dict, tuple, np.ndarray)):
self.agent.set_param_values(agent_update)
elif (agent_update is not None):
self.agent = agent_update
def update_env(self, env_update):
if (env_update is not None):
if isinstance(env_update, EnvUpdate):
self.env = env_update(self.env)
elif isinstance(env_update, gym.Env):
if (self.env is not None):
self.env.close()
self.env = env_update
else:
raise TypeError('Uknown environment update type.')
def start_rollout(self):
self._path_length = 0
self._prev_obs = self.env.reset()
self.agent.reset()
def step_rollout(self):
if (self._path_length < self._max_path_length):
(a, agent_info) = self.agent.get_action(self._prev_obs)
(next_o, r, d, env_info) = self.env.step(a)
self._observations.append(self._prev_obs)
self._rewards.append(r)
self._actions.append(a)
for (k, v) in agent_info.items():
self._agent_infos[k].append(v)
for (k, v) in env_info.items():
self._env_infos[k].append(v)
self._path_length += 1
self._terminals.append(d)
if (not d):
self._prev_obs = next_o
return False
self._lengths.append(self._path_length)
self._last_observations.append(self._prev_obs)
return True
def collect_rollout(self):
observations = self._observations
self._observations = []
last_observations = self._last_observations
self._last_observations = []
actions = self._actions
self._actions = []
rewards = self._rewards
self._rewards = []
terminals = self._terminals
self._terminals = []
env_infos = self._env_infos
self._env_infos = defaultdict(list)
agent_infos = self._agent_infos
self._agent_infos = defaultdict(list)
for (k, v) in agent_infos.items():
agent_infos[k] = np.asarray(v)
for (k, v) in env_infos.items():
env_infos[k] = np.asarray(v)
lengths = self._lengths
self._lengths = []
return TrajectoryBatch(self.env.spec, np.asarray(observations), np.asarray(last_observations), np.asarray(actions), np.asarray(rewards), np.asarray(terminals), dict(env_infos), dict(agent_infos), np.asarray(lengths, dtype='i'))
def rollout(self):
self.start_rollout()
while (not self.step_rollout()):
pass
return self.collect_rollout()
def shutdown(self):
self.env.close() |
def _save(im, fp, filename):
try:
rawmode = RAWMODE[im.mode]
except KeyError:
raise OSError(('cannot write mode %s as JPEG' % im.mode))
info = im.encoderinfo
dpi = [round(x) for x in info.get('dpi', (0, 0))]
quality = info.get('quality', (- 1))
subsampling = info.get('subsampling', (- 1))
qtables = info.get('qtables')
if (quality == 'keep'):
quality = (- 1)
subsampling = 'keep'
qtables = 'keep'
elif (quality in presets):
preset = presets[quality]
quality = (- 1)
subsampling = preset.get('subsampling', (- 1))
qtables = preset.get('quantization')
elif (not isinstance(quality, int)):
raise ValueError('Invalid quality setting')
else:
if (subsampling in presets):
subsampling = presets[subsampling].get('subsampling', (- 1))
if (isinstance(qtables, str) and (qtables in presets)):
qtables = presets[qtables].get('quantization')
if (subsampling == '4:4:4'):
subsampling = 0
elif (subsampling == '4:2:2'):
subsampling = 1
elif (subsampling == '4:2:0'):
subsampling = 2
elif (subsampling == '4:1:1'):
subsampling = 2
elif (subsampling == 'keep'):
if (im.format != 'JPEG'):
raise ValueError("Cannot use 'keep' when original image is not a JPEG")
subsampling = get_sampling(im)
def validate_qtables(qtables):
if (qtables is None):
return qtables
if isinstance(qtables, str):
try:
lines = [int(num) for line in qtables.splitlines() for num in line.split('#', 1)[0].split()]
except ValueError:
raise ValueError('Invalid quantization table')
else:
qtables = [lines[s:(s + 64)] for s in range(0, len(lines), 64)]
if isinstance(qtables, (tuple, list, dict)):
if isinstance(qtables, dict):
qtables = convert_dict_qtables(qtables)
elif isinstance(qtables, tuple):
qtables = list(qtables)
if (not (0 < len(qtables) < 5)):
raise ValueError('None or too many quantization tables')
for (idx, table) in enumerate(qtables):
try:
if (len(table) != 64):
raise TypeError
table = array.array('B', table)
except TypeError:
raise ValueError('Invalid quantization table')
else:
qtables[idx] = list(table)
return qtables
if (qtables == 'keep'):
if (im.format != 'JPEG'):
raise ValueError("Cannot use 'keep' when original image is not a JPEG")
qtables = getattr(im, 'quantization', None)
qtables = validate_qtables(qtables)
extra = b''
icc_profile = info.get('icc_profile')
if icc_profile:
ICC_OVERHEAD_LEN = 14
MAX_BYTES_IN_MARKER = 65533
MAX_DATA_BYTES_IN_MARKER = (MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN)
markers = []
while icc_profile:
markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER])
icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:]
i = 1
for marker in markers:
size = struct.pack('>H', ((2 + ICC_OVERHEAD_LEN) + len(marker)))
extra += (((((b'\xff\xe2' + size) + b'ICC_PROFILE\x00') + o8(i)) + o8(len(markers))) + marker)
i += 1
progressive = (info.get('progressive', False) or info.get('progression', False))
optimize = info.get('optimize', False)
exif = info.get('exif', b'')
if isinstance(exif, Image.Exif):
exif = exif.tobytes()
im.encoderconfig = (quality, progressive, info.get('smooth', 0), optimize, info.get('streamtype', 0), dpi[0], dpi[1], subsampling, qtables, extra, exif)
bufsize = 0
if (optimize or progressive):
if (im.mode == 'CMYK'):
bufsize = ((4 * im.size[0]) * im.size[1])
elif ((quality >= 95) or (quality == (- 1))):
bufsize = ((2 * im.size[0]) * im.size[1])
else:
bufsize = (im.size[0] * im.size[1])
bufsize = max(ImageFile.MAXBLOCK, bufsize, (len(exif) + 5), (len(extra) + 1))
ImageFile._save(im, fp, [('jpeg', ((0, 0) + im.size), 0, rawmode)], bufsize) |
def test():
form = ak.forms.from_dict({'class': 'RecordArray', 'fields': ['muon', 'jet'], 'contents': [{'class': 'ListOffsetArray', 'offsets': 'i64', 'content': {'class': 'RecordArray', 'fields': ['pt', 'eta', 'phi', 'crossref'], 'contents': [{'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': 'muon_pt!'}, {'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': 'muon_eta!'}, {'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': 'muon_phi!'}, {'class': 'ListOffsetArray', 'offsets': 'i64', 'content': {'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': 'muon_crossref_content!'}, 'parameters': {}, 'form_key': 'muon_crossref_index!'}], 'parameters': {}, 'form_key': 'muon_record!'}, 'parameters': {}, 'form_key': 'muon_list!'}, {'class': 'ListOffsetArray', 'offsets': 'i64', 'content': {'class': 'RecordArray', 'fields': ['pt', 'eta', 'phi', 'crossref', 'thing1'], 'contents': [{'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': 'jet_pt!'}, {'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': 'jet_eta!'}, {'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': 'jet_phi!'}, {'class': 'ListOffsetArray', 'offsets': 'i64', 'content': {'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': 'jet_crossref_content!'}, 'parameters': {}, 'form_key': 'jet_crossref_index!'}, {'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': 'jet_thing1!'}], 'parameters': {}, 'form_key': 'jet_record!'}, 'parameters': {}, 'form_key': 'jet_list!'}], 'parameters': {}, 'form_key': 'outer!'})
(ttlayout, report) = ak.typetracer.typetracer_with_report(form)
ttarray = ak.Array(ttlayout)
pairs = ak.cartesian([ttarray.muon, ttarray.jet], axis=1, nested=True)
(a, b) = ak.unzip(pairs)
assert (report.data_touched == ['muon_list!', 'jet_list!']) |
class SpectralNormalization(tf.keras.layers.Wrapper):
def __init__(self, layer, power_iterations=1, **kwargs):
super(SpectralNormalization, self).__init__(layer, **kwargs)
if (power_iterations <= 0):
raise ValueError('`power_iterations` should be greater than zero, got `power_iterations={}`'.format(power_iterations))
self.power_iterations = power_iterations
self._initialized = False
def build(self, input_shape):
super().build(input_shape)
if hasattr(self.layer, 'kernel'):
self.w = self.layer.kernel
elif hasattr(self.layer, 'embeddings'):
self.w = self.layer.embeddings
else:
raise AttributeError("{} object has no attribute 'kernel' nor 'embeddings'".format(type(self.layer).__name__))
self.w_shape = self.w.shape.as_list()
self.u = self.add_weight(shape=(1, self.w_shape[(- 1)]), initializer=tf.initializers.TruncatedNormal(stddev=0.02), trainable=False, name='sn_u', dtype=self.w.dtype)
def call(self, inputs, training=False):
if training:
self.normalize_weights()
output = self.layer(inputs)
return output
def normalize_weights(self):
w = tf.reshape(self.w, [(- 1), self.w_shape[(- 1)]])
u = self.u
with tf.name_scope('spectral_normalize'):
for _ in range(self.power_iterations):
v = tf.math.l2_normalize(tf.matmul(u, w, transpose_b=True))
u = tf.math.l2_normalize(tf.matmul(v, w))
u = tf.stop_gradient(u)
v = tf.stop_gradient(v)
sigma = tf.matmul(tf.matmul(v, w), u, transpose_b=True)
self.u.assign(tf.cast(u, self.u.dtype))
self.w.assign(tf.cast(tf.reshape((self.w / sigma), self.w_shape), self.w.dtype))
def get_config(self):
config = {'power_iterations': self.power_iterations}
base_config = super().get_config()
return {**base_config, **config} |
def vgg_a(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='vgg_a', fc_conv_padding='VALID', global_pool=False):
with tf.variable_scope(scope, 'vgg_a', [inputs]) as sc:
end_points_collection = (sc.original_name_scope + '_end_points')
with slim.arg_scope([slim.conv2d, slim.max_pool2d], outputs_collections=end_points_collection):
net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[(sc.name + '/fc8')] = net
return (net, end_points) |
class RefBox():
"Ray doesn't dereference ObjectRefs if they're nested in another object. So we use this to take advantage of that.\n
ref: ray.ObjectRef |
()
('model_path', type=str)
('dataset_name', type=str)
('--im-size', default=None, type=int)
('--multiscale/--singlescale', default=False, is_flag=True)
('--blend/--no-blend', default=True, is_flag=True)
('--window-size', default=None, type=int)
('--window-stride', default=None, type=int)
('--window-batch-size', default=4, type=int)
('--save-images/--no-save-images', default=False, is_flag=True)
('-frac-dataset', '--frac-dataset', default=1.0, type=float)
def main(model_path, dataset_name, im_size, multiscale, blend, window_size, window_stride, window_batch_size, save_images, frac_dataset):
model_dir = Path(model_path).parent
ptu.set_gpu_mode(True)
distributed.init_process()
(model, variant) = load_model(model_path)
patch_size = model.patch_size
model.eval()
model.to(ptu.device)
if ptu.distributed:
model = DDP(model, device_ids=[ptu.device], find_unused_parameters=True)
cfg = config.load_config()
dataset_cfg = cfg['dataset'][dataset_name]
normalization = variant['dataset_kwargs']['normalization']
if (im_size is None):
im_size = dataset_cfg.get('im_size', variant['dataset_kwargs']['image_size'])
if (window_size is None):
window_size = dataset_cfg.get('window_size', variant['dataset_kwargs']['crop_size'])
if (window_stride is None):
window_stride = dataset_cfg.get('window_stride', variant['dataset_kwargs']['crop_size'])
dataset_kwargs = dict(dataset=dataset_name, image_size=im_size, crop_size=im_size, patch_size=patch_size, batch_size=1, num_workers=10, split='val', normalization=normalization, crop=False, rep_aug=False)
eval_dataset(model, multiscale, model_dir, blend, window_size, window_stride, window_batch_size, save_images, frac_dataset, dataset_kwargs)
distributed.barrier()
distributed.destroy_process()
sys.exit(1) |
(name='versions')
def _versions() -> list[dict[(str, Any)]]:
with open(VERSIONS) as f:
return json.load(f) |
def test_bbox_mask():
cfg = dict(img_shape=(256, 256), max_bbox_shape=100, max_bbox_delta=10, min_margin=10)
bbox = random_bbox(**cfg)
mask_bbox = bbox2mask(cfg['img_shape'], bbox)
assert (mask_bbox.shape == (256, 256, 1))
zero_area = np.sum((mask_bbox == 0).astype(np.uint8))
ones_area = np.sum((mask_bbox == 1).astype(np.uint8))
assert ((zero_area + ones_area) == (256 * 256))
assert (mask_bbox.dtype == np.uint8)
with pytest.raises(ValueError):
cfg_ = cfg.copy()
cfg_['max_bbox_shape'] = 300
bbox = random_bbox(**cfg_)
with pytest.raises(ValueError):
cfg_ = cfg.copy()
cfg_['max_bbox_delta'] = 300
bbox = random_bbox(**cfg_)
with pytest.raises(ValueError):
cfg_ = cfg.copy()
cfg_['max_bbox_shape'] = 254
bbox = random_bbox(**cfg_)
cfg_ = cfg.copy()
cfg_['max_bbox_delta'] = 1
bbox = random_bbox(**cfg_)
mask_bbox = bbox2mask(cfg['img_shape'], bbox)
assert (mask_bbox.shape == (256, 256, 1)) |
class KLDivTeacherList(nn.Module):
def __init__(self):
super(KLDivTeacherList, self).__init__()
self.kl = torch.nn.KLDivLoss(reduction='batchmean')
def forward(self, scores, labels):
loss = self.kl(scores.softmax((- 1)), labels.softmax((- 1)))
return loss |
def test_NumpyArray():
a = ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3], dtype=np.float64))
assert (a.to_typetracer().form == a.to_typetracer(forget_length=True).form)
assert is_unknown_length(a.to_typetracer(forget_length=True).length)
b = ak.contents.numpyarray.NumpyArray(np.arange(((2 * 3) * 5), dtype=np.int64).reshape(2, 3, 5))
assert (b.to_typetracer().form == b.to_typetracer(forget_length=True).form)
assert is_unknown_length(b.to_typetracer(forget_length=True).length)
assert (b.to_typetracer(forget_length=True).data.shape[1:] == (3, 5)) |
(scope='module')
def test_data_xy_dict(test_data_xy):
return {'x': test_data_xy[0], 'y': test_data_xy[1]} |
def add_bootstrap_config(cfg: CN):
_C = cfg
_C.BOOTSTRAP_DATASETS = []
_C.BOOTSTRAP_MODEL = CN()
_C.BOOTSTRAP_MODEL.WEIGHTS = ''
_C.BOOTSTRAP_MODEL.DEVICE = 'cuda' |
class SPC(Model):
def __init__(self, cfg, emb_dim):
super().__init__(name=cfg['name'])
cfg['num_inputs'] = emb_dim
self.minion = minion_maker(cfg)
self.loss = self.minion.loss
self.loss_weight = self.minion.loss_weight
def forward(self, x, alpha=1, device=None):
y = self.minion(x, alpha)
label = make_labels(y).to(device)
return (y, label) |
def get_ann_ids(anno_path):
ids = list()
for p in anno_path.iterdir():
ids.append(p.name.split('.')[0])
return ids |
def matrix_centralizer_cardinalities_length_two(n, q=None, selftranspose=False, invertible=False):
if (q is None):
q = FractionField(QQ['q']).gen()
for tau in SimilarityClassTypes(n):
for pair in ext_orbit_centralizers(tau, q=q, selftranspose=selftranspose):
(yield (((q ** tau.centralizer_algebra_dim()) * pair[0]), (tau.number_of_classes(invertible=invertible, q=q) * pair[1]))) |
def annotate_hop_ids(hop):
samples = mongo.get_sample(train=False, limit=limit)
count = 0
for doc in samples:
(e, p) = doc[hop]
e_ids = []
for uri in e:
try:
e_ids.append(e_index.look_up_by_uri(uri)[0]['_source']['id'])
except:
print(('%s not found in the entity catalog' % uri))
p_ids = []
for uri in p:
try:
p_ids.append(p_index.look_up_by_uri(uri)[0]['_source']['id'])
except:
print(('%s not found in the predicate catalog' % uri))
doc[(hop + '_ids')] = (e_ids, p_ids)
mongo.col.update_one({'_id': doc['_id']}, {'$set': doc}, upsert=True)
count += 1
print(('%d documents annotated with ids' % count)) |
class DivisorGroup_curve(DivisorGroup_generic):
def _element_constructor_(self, x, check=True, reduce=True):
if isinstance(x, Divisor_curve):
P = x.parent()
if (P is self):
return x
elif (P == self):
return Divisor_curve(x._data, check=False, reduce=False, parent=self)
else:
x = x._data
if isinstance(x, list):
return Divisor_curve(x, check=check, reduce=reduce, parent=self)
if (x == 0):
return Divisor_curve([], check=False, reduce=False, parent=self)
else:
return Divisor_curve([(self.base_ring()(1), x)], check=False, reduce=False, parent=self) |
class VenmoAddMoney(VirtualFunctionTool):
name = 'VenmoAddMoney'
summary = "Add money to the User's Venmo balance from a linked bank account."
parameters: List[ArgParameter] = [{'name': 'amount', 'type': 'number', 'description': 'The amount of money to add, must be positive.', 'required': True}, {'name': 'account_id', 'type': 'string', 'description': 'The unique identifier of the linked bank account.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'result', 'type': 'object', 'description': "An object containing 'success' (boolean, indicates whether the transaction was successful), 'transaction_id' (string, the unique identifier of the transaction, if successful), and 'error_message' (string, if unsuccessful)."}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'amount' is negative."}, {'name': 'NotFoundException', 'description': "The 'account_id' does not exist."}] |
class ClassGroup(AbelianGroupWithValues_class):
Element = FractionalIdealClass
def __init__(self, gens_orders, names, number_field, gens, proof=True):
AbelianGroupWithValues_class.__init__(self, gens_orders, names, gens, values_group=number_field.ideal_monoid())
self._proof_flag = proof
self._number_field = number_field
def _element_constructor_(self, *args, **kwds):
if isinstance(args[0], FractionalIdealClass):
return self.element_class(self, None, self._number_field.ideal(args[0].ideal()))
else:
I = self._number_field.ideal(*args, **kwds)
if I.is_zero():
raise TypeError('The zero ideal is not a fractional ideal')
return self.element_class(self, None, I)
def _ideal_log(self, ideal):
return tuple((ZZ(order) for order in ideal.ideal_class_log(proof=self._proof_flag)))
def gens_ideals(self):
return self.gens_values()
def __iter__(self):
return self._iter_inner(self.one(), 0)
def _iter_inner(self, i0, k):
if (k == self.ngens()):
(yield i0)
return
gk = self.gen(k)
for _ in range(self._gens_orders[k]):
(yield from self._iter_inner(i0, (k + 1)))
i0 = (i0 * gk)
return
def _repr_(self):
s = ('Class group of order %s ' % self.order())
if (self.order() > 1):
s += ('with structure %s ' % self._group_notation(self.gens_orders()))
s += ('of %s' % self.number_field())
return s
def number_field(self):
return self._number_field |
class AttrDict(dict):
def __init__(self, init={}):
dict.__init__(self, init)
def __getitem__(self, name):
return super(AttrDict, self).__getitem__(name.lower())
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key.lower(), value)
__getattr__ = __getitem__
__setattr__ = __setitem__
__call__ = __getitem__ |
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/ptt_best.yaml', help='specify the config for demo')
parser.add_argument('--data_path', type=str, default=None, help='specify the point cloud data file or directory')
parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model')
parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
return (args, cfg) |
class HubregtsenEncodingCircuit(EncodingCircuitBase):
def __init__(self, num_qubits: int, num_features: int, num_layers: int=1, closed: bool=True, final_encoding=False) -> None:
super().__init__(num_qubits, num_features)
self.num_layers = num_layers
self.closed = closed
self.final_encoding = final_encoding
def num_parameters(self) -> int:
num_param = (self.num_qubits * self.num_layers)
if (self.num_qubits > 2):
if self.closed:
num_param += (self.num_qubits * self.num_layers)
else:
num_param += ((self.num_qubits - 1) * self.num_layers)
return num_param
def parameter_bounds(self) -> np.ndarray:
bound_array = np.zeros((self.num_parameters, 2))
ioff = 0
for ilayer in range(self.num_layers):
for i in range(self.num_qubits):
bound_array[ioff] = [(- np.pi), np.pi]
ioff = (ioff + 1)
if (self.num_qubits > 2):
if self.closed:
istop = self.num_qubits
else:
istop = (self.num_qubits - 1)
for i in range(istop):
bound_array[ioff] = [((- 2.0) * np.pi), (2.0 * np.pi)]
ioff = (ioff + 1)
return bound_array
def feature_bounds(self) -> np.ndarray:
return np.array(([[(- np.pi), np.pi]] * self.num_features))
def get_params(self, deep: bool=True) -> dict:
params = super().get_params()
params['num_layers'] = self.num_layers
params['closed'] = self.closed
params['final_encoding'] = self.final_encoding
return params
def get_circuit(self, features: Union[(ParameterVector, np.ndarray)], parameters: Union[(ParameterVector, np.ndarray)]) -> QuantumCircuit:
nfeatures = len(features)
nparam = len(parameters)
QC = QuantumCircuit(self.num_qubits)
ioff = 0
QC.h(range(self.num_qubits))
for ilayer in range(self.num_layers):
n_feature_loop = int(np.ceil((self.num_features / self.num_qubits)))
for i in range((n_feature_loop * self.num_qubits)):
if (((i // self.num_qubits) % 2) == 0):
QC.rz(features[(i % nfeatures)], (i % self.num_qubits))
else:
QC.rx(features[(i % nfeatures)], (i % self.num_qubits))
for i in range(self.num_qubits):
QC.ry(parameters[(ioff % nparam)], i)
ioff = (ioff + 1)
if (self.num_qubits > 2):
if self.closed:
istop = self.num_qubits
else:
istop = (self.num_qubits - 1)
for i in range(istop):
QC.crz(parameters[(ioff % nparam)], i, ((i + 1) % self.num_qubits))
ioff = (ioff + 1)
if self.final_encoding:
n_feature_loop = int(np.ceil((self.num_features / self.num_qubits)))
for i in range((n_feature_loop * self.num_qubits)):
if ((int(np.ceil((i / self.num_qubits))) % 2) == 0):
QC.rz(features[(i % nfeatures)], (i % self.num_qubits))
else:
QC.rx(features[(i % nfeatures)], (i % self.num_qubits))
return QC |
def adjust_learning_rate(optimizer, epoch, gammas, schedule):
lr = args.learning_rate
assert (len(gammas) == len(schedule)), 'length of gammas and schedule should be equal'
for (gamma, step) in zip(gammas, schedule):
if (epoch >= step):
lr = (lr * gamma)
else:
break
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr |
def test_mmhash3_bytes():
assert (murmurhash3_32(b'foo', 0) == (- ))
assert (murmurhash3_32(b'foo', 42) == (- ))
assert (murmurhash3_32(b'foo', 0, positive=True) == )
assert (murmurhash3_32(b'foo', 42, positive=True) == ) |
class Swish_DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=100):
super(Swish_DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = (2 * growth_rate)
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += (nblocks[0] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += (nblocks[1] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += (nblocks[2] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += (nblocks[3] * growth_rate)
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
self.swish = Swish()
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(self.swish(self.bn(out)), 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def short_path(path, cwd=None):
if (not isinstance(path, str)):
return path
if (cwd is None):
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if (len(abspath) <= len(relpath)):
return abspath
return relpath |
def test_orthogonal_procrustes_ndim_too_small():
np.random.seed(1234)
A = np.random.randn(3)
B = np.random.randn(3)
assert_raises(ValueError, orthogonal_procrustes, A, B) |
def resnet152_csn_ir(**kwargs):
model = ResNet(Bottleneck_depthwise_ir, [3, 8, 36, 3], **kwargs)
model.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(1, 3, 3), bias=False)
return model |
(autouse=True)
def add_dataset(doctest_namespace):
columns = ['query_id', 'item_id', 'timestamp']
data = [(1, 1, '01-01-2020'), (1, 2, '02-01-2020'), (1, 3, '03-01-2020'), (1, 4, '04-01-2020'), (1, 5, '05-01-2020'), (2, 1, '06-01-2020'), (2, 2, '07-01-2020'), (2, 3, '08-01-2020'), (2, 9, '09-01-2020'), (2, 10, '10-01-2020'), (3, 1, '01-01-2020'), (3, 5, '02-01-2020'), (3, 3, '03-01-2020'), (3, 1, '04-01-2020'), (3, 2, '05-01-2020')]
interactions = pd.DataFrame(data, columns=columns)
interactions['timestamp'] = pd.to_datetime(interactions['timestamp'], format='%d-%m-%Y')
doctest_namespace['dataset'] = interactions |
class Queue(deque, object):
def seeleft(self):
if self:
return self[0]
else:
return None |
def batch_step(pbar, net, image, optimizers, label, criterion, gamma, gamma_target, gamma_rate, amp_flag, working_device):
pbar.update(1)
image = image.to(working_device)
label = label.to(working_device)
prediction = net(image)
(correct, total) = common.accuracy_factor(prediction, label)
l = criterion(prediction, label)
r = calculate_expected_weight_compression(net)
if (gamma > 0.0):
l = (l + (gamma * torch.pow(torch.relu(((gamma_target - r) / gamma_target)), gamma_rate)))
if amp_flag:
with amp.scale_loss(l, optimizers) as scaled_loss:
scaled_loss.backward()
else:
l.backward()
[op.step() for op in optimizers]
[op.zero_grad() for op in optimizers]
return (correct, total, l.item()) |
def parse_package(line: str) -> Tuple[(str, Optional[str])]:
parts = re.split('(==|>=|<=|>|<)', line)
module = parts[0]
version = line.replace(module, '')
return (module, version) |
def build_model(model_opt, opt, fields, checkpoint):
print('Building model...')
model = onmt.ModelConstructor.make_base_model(model_opt, fields, use_gpu(opt), checkpoint)
if (len(opt.gpuid) > 1):
print('Multi gpu training: ', opt.gpuid)
model = nn.DataParallel(model, device_ids=opt.gpuid, dim=1)
print(model)
return model |
def test_QSDetectorPolarization_set_basis_list():
tl = Timeline()
qsdetector = QSDetectorPolarization('qsd', tl)
basis_list = []
start_time = 0
frequency = 1000000.0
qsdetector.set_basis_list(basis_list, start_time, frequency)
assert ((qsdetector.splitter.basis_list == basis_list) and (qsdetector.splitter.start_time == start_time) and (qsdetector.splitter.frequency == frequency)) |
class FailedBuilding(Exception):
def __init__(self, name, build_command):
super(FailedBuilding, self).__init__()
self._name = name
self._build_command = build_command |
def test_sbottom_regionC_1600_850_60(get_json_from_tarfile):
sbottom_archive = data_path('pyhf-ins1748602-probability-models.tar.gz')
sbottom_regionC_bkgonly_json = get_json_from_tarfile(sbottom_archive, 'RegionC/BkgOnly.json')
sbottom_regionC_1600_850_60_patch_json = get_json_from_tarfile(sbottom_archive, 'RegionC/patch.sbottom_1600_850_60.json')
(CLs_obs, CLs_exp) = calculate_CLs(sbottom_regionC_bkgonly_json, sbottom_regionC_1600_850_60_patch_json)
assert (CLs_obs == pytest.approx(0., rel=1e-05))
assert np.all(np.isclose(np.array(CLs_exp), np.array([0., 0., 0., 0., 0.]), rtol=1e-05)) |
.parametrize('task_name', [tn for tn in (all_tasks - julia_tasks)])
def test_obtain_prior_samples_from_task(task_name):
task = get_task(task_name)
prior = task.get_prior()
nsamples = 10
thetas = prior(num_samples=nsamples)
assert (thetas.shape[0] == nsamples) |
def reset():
for i in range(n_particles):
x[i] = [(((ti.random() * 0.2) + 0.3) + (0.1 * (i // group_size))), (((ti.random() * 0.2) + 0.05) + (0.32 * (i // group_size)))]
material[i] = (i // group_size)
v[i] = [0, 0]
F[i] = ti.Matrix([[1, 0], [0, 1]])
Jp[i] = 1
C[i] = ti.Matrix.zero(float, 2, 2) |
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
if ('fixsize' in opt.preprocess):
transform_list.append(transforms.Resize((opt.crop_size, opt.load_size), method))
if ('resize' in opt.preprocess):
osize = [opt.load_size, opt.load_size]
if ('gta2cityscapes' in opt.dataroot):
osize[0] = (opt.load_size // 2)
transform_list.append(transforms.Resize(osize, method))
elif ('scale_width' in opt.preprocess):
transform_list.append(transforms.Lambda((lambda img: __scale_width(img, opt.load_size, opt.crop_size, method))))
elif ('scale_shortside' in opt.preprocess):
transform_list.append(transforms.Lambda((lambda img: __scale_shortside(img, opt.load_size, opt.crop_size, method))))
elif ('scale_longside' in opt.preprocess):
transform_list.append(transforms.Lambda((lambda img: __scale_longside(img, opt.load_size, opt.crop_size, method))))
if ('zoom' in opt.preprocess):
if (params is None):
transform_list.append(transforms.Lambda((lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method))))
else:
transform_list.append(transforms.Lambda((lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method, factor=params['scale_factor']))))
if ('centercrop' in opt.preprocess):
transform_list.append(transforms.Lambda((lambda img: __centercrop(img))))
elif ('crop' in opt.preprocess):
if ((params is None) or ('crop_pos' not in params)):
transform_list.append(transforms.RandomCrop(opt.crop_size, padding=opt.preprocess_crop_padding))
else:
transform_list.append(transforms.Lambda((lambda img: __crop(img, params['crop_pos'], opt.crop_size))))
if ('patch' in opt.preprocess):
transform_list.append(transforms.Lambda((lambda img: __patch(img, params['patch_index'], opt.crop_size))))
if ('trim' in opt.preprocess):
transform_list.append(transforms.Lambda((lambda img: __trim(img, opt.crop_size))))
transform_list.append(transforms.Lambda((lambda img: __make_power_2(img, base=16, method=method))))
random_flip = (opt.isTrain and (not opt.no_flip))
if random_flip:
transform_list.append(transforms.RandomHorizontalFlip())
if convert:
transform_list += [transforms.ToTensor()]
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list) |
def main(_config):
_config = copy.deepcopy(_config)
pl.seed_everything(_config['seed'])
dm = MTDataModule(_config, dist=True)
model = AllinoneTransformerSS(_config)
exp_name = f"{_config['exp_name']}"
os.makedirs(_config['log_dir'], exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(save_top_k=1, verbose=True, monitor='val/the_metric', mode='max', save_last=True)
now = datetime.datetime.now()
instance_name = f"{exp_name}_seed{_config['seed']}_from_{_config['load_path'].split('/')[(- 1)][:(- 5)]}{now.year}_{now.month}_{now.day}"
logger = pl.loggers.TensorBoardLogger(_config['log_dir'], name=instance_name)
lr_callback = pl.callbacks.LearningRateMonitor(logging_interval='step')
callbacks = [checkpoint_callback, lr_callback]
num_gpus = (_config['num_gpus'] if isinstance(_config['num_gpus'], int) else len(_config['num_gpus']))
print(((('=' * 70) + 'Config: ') + ('=' * 70)))
print(instance_name)
print(_config)
print(('=' * 150))
grad_steps = (_config['batch_size'] // ((_config['per_gpu_batchsize'] * num_gpus) * _config['num_nodes']))
max_steps = (_config['max_steps'] if (_config['max_steps'] is not None) else None)
trainer = pl.Trainer(gpus=_config['num_gpus'], num_nodes=_config['num_nodes'], precision=_config['precision'], accelerator='ddp', benchmark=True, deterministic=True, max_epochs=(_config['max_epoch'] if (max_steps is None) else 1000), max_steps=max_steps, callbacks=callbacks, logger=logger, replace_sampler_ddp=False, accumulate_grad_batches=grad_steps, log_every_n_steps=10, flush_logs_every_n_steps=10, resume_from_checkpoint=_config['resume_from'], weights_summary='top', fast_dev_run=_config['fast_dev_run'], val_check_interval=_config['val_check_interval'])
print('accumulate grad batches is: ', trainer.accumulate_grad_batches)
if (not _config['test_only']):
trainer.fit(model, datamodule=dm)
else:
trainer.test(model, datamodule=dm) |
def get_amr_match(cur_amr1, cur_amr2, sent_num=1, justinstance=False, justattribute=False, justrelation=False):
amr_pair = []
for (i, cur_amr) in ((1, cur_amr1), (2, cur_amr2)):
try:
amr_pair.append(amr.AMR.parse_AMR_line(cur_amr))
except Exception as e:
print(('Error in parsing amr %d: %s' % (i, cur_amr)), file=ERROR_LOG)
print('Please check if the AMR is ill-formatted. Ignoring remaining AMRs', file=ERROR_LOG)
print(('Error message: %s' % e), file=ERROR_LOG)
(amr1, amr2) = amr_pair
prefix1 = 'a'
prefix2 = 'b'
amr1.rename_node(prefix1)
amr2.rename_node(prefix2)
(instance1, attributes1, relation1) = amr1.get_triples()
(instance2, attributes2, relation2) = amr2.get_triples()
if verbose:
print('AMR pair', sent_num, file=DEBUG_LOG)
print('', file=DEBUG_LOG)
print('AMR 1 (one-line):', cur_amr1, file=DEBUG_LOG)
print('AMR 2 (one-line):', cur_amr2, file=DEBUG_LOG)
print('Instance triples of AMR 1:', len(instance1), file=DEBUG_LOG)
print(instance1, file=DEBUG_LOG)
print('Attribute triples of AMR 1:', len(attributes1), file=DEBUG_LOG)
print(attributes1, file=DEBUG_LOG)
print('Relation triples of AMR 1:', len(relation1), file=DEBUG_LOG)
print(relation1, file=DEBUG_LOG)
print('Instance triples of AMR 2:', len(instance2), file=DEBUG_LOG)
print(instance2, file=DEBUG_LOG)
print('Attribute triples of AMR 2:', len(attributes2), file=DEBUG_LOG)
print(attributes2, file=DEBUG_LOG)
print('Relation triples of AMR 2:', len(relation2), file=DEBUG_LOG)
print(relation2, file=DEBUG_LOG)
doinstance = doattribute = dorelation = True
if justinstance:
doattribute = dorelation = False
if justattribute:
doinstance = dorelation = False
if justrelation:
doinstance = doattribute = False
(best_mapping, best_match_num) = get_best_match(instance1, attributes1, relation1, instance2, attributes2, relation2, prefix1, prefix2, doinstance=doinstance, doattribute=doattribute, dorelation=dorelation)
if verbose:
print('best match number', best_match_num, file=DEBUG_LOG)
print('best node mapping', best_mapping, file=DEBUG_LOG)
print('Best node mapping alignment:', print_alignment(best_mapping, instance1, instance2), file=DEBUG_LOG)
if justinstance:
test_triple_num = len(instance1)
gold_triple_num = len(instance2)
elif justattribute:
test_triple_num = len(attributes1)
gold_triple_num = len(attributes2)
elif justrelation:
test_triple_num = len(relation1)
gold_triple_num = len(relation2)
else:
test_triple_num = ((len(instance1) + len(attributes1)) + len(relation1))
gold_triple_num = ((len(instance2) + len(attributes2)) + len(relation2))
return (best_match_num, test_triple_num, gold_triple_num) |
def test_image_to_tensor():
ori_results = dict(img=np.random.randn(256, 256, 3))
keys = ['img']
to_float32 = False
image_to_tensor = ImageToTensor(keys)
results = image_to_tensor(ori_results)
assert (results['img'].shape == torch.Size([3, 256, 256]))
assert isinstance(results['img'], torch.Tensor)
assert torch.equal(results['img'].data, ori_results['img'])
assert (results['img'].dtype == torch.float32)
ori_results = dict(img=np.random.randint(256, size=(256, 256)))
keys = ['img']
to_float32 = True
image_to_tensor = ImageToTensor(keys)
results = image_to_tensor(ori_results)
assert (results['img'].shape == torch.Size([1, 256, 256]))
assert isinstance(results['img'], torch.Tensor)
assert torch.equal(results['img'].data, ori_results['img'])
assert (results['img'].dtype == torch.float32)
assert (repr(image_to_tensor) == (image_to_tensor.__class__.__name__ + f'(keys={keys}, to_float32={to_float32})')) |
class Object3dCaptionDataset(BaseDataset, __DisplMixin):
def __init__(self, **kwargs):
super().__init__(kwargs['vis_processor'], kwargs['text_processor'], kwargs['vis_root'], kwargs['ann_paths'])
self.modalities = kwargs['modalities']
self.npoints = 8192
self.sample_points_num = self.npoints
for modality in self.modalities:
if ('image' in modality):
setattr(self, f'existing_{modality}_annotation', getattr(self, f'get_existing_{modality}_annotations')())
continue
setattr(self, f'{modality}_root', kwargs[f'{modality}_root'])
setattr(self, f'{modality}_processor', kwargs[f'{modality}_processor'])
setattr(self, f'existing_{modality}_annotation', getattr(self, f'get_existing_{modality}_annotations')())
self.sample_ids = set.intersection(*[set(getattr(self, f'existing_{modality}_annotation')) for modality in self.modalities])
self.annotation = [ann for ann in self.annotation if (ann['sample_id'] in self.sample_ids)]
def get_existing_depth_annotations(self):
return os.listdir(self.depth_root)
def get_existing_images_annotations(self):
return os.listdir(self.vis_root)
def get_existing_pc_annotations(self):
raise NotImplementedError('Subclasses should implement this!')
def get_pc_path(self, sample_key):
raise NotImplementedError('Subclasses should implement this!')
def get_images_path(self, sample_key):
raise NotImplementedError('Subclasses should implement this!')
def get_depth_path(self, sample_key):
raise NotImplementedError('Subclasses should implement this!')
def __getitem__(self, index):
ann = copy.deepcopy(self.annotation[index])
ann['captions'] = ann['data']
del ann['data']
for modality in self.modalities:
ann[f'{modality}_path'] = getattr(self, f'get_{modality}_path')(ann['sample_id'])
if (type(ann[f'{modality}_path']) == list):
ann[f'{modality}_path'] = random.choice(ann[f'{modality}_path'])
if ('image' in modality):
ann['image'] = self.vis_processor(Image.open(ann[f'images_path']))
else:
ann[modality] = getattr(self, f'{modality}_processor')(ann[f'{modality}_path']).to(torch.float32)
return ann
def __len__(self):
return len(self.annotation)
def _build_templates(self, templates_path):
if (templates_path is None):
self.templates = None
else:
with open(templates_path) as f:
self.templates = json.load(f) |
.spark
def test_refit(fitted_model, log_ucb, log_ucb2):
fitted_model.seed = 123
fitted_model.sample = True
equality_check = (sparkDataFrameNotEqual if (fitted_model.sample and (fitted_model.seed is None)) else sparkDataFrameEqual)
dataset = create_dataset(log_ucb)
dataset2 = create_dataset(log_ucb2)
fitted_model.refit(dataset2)
pred_after_refit = fitted_model.predict(dataset, items=list(range(10)), k=1)
united_dataset = create_dataset(log_ucb.union(log_ucb2))
fitted_model.fit(united_dataset)
pred_after_full_fit = fitted_model.predict(dataset, items=list(range(10)), k=1)
equality_check(pred_after_full_fit, pred_after_refit) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.