code stringlengths 101 5.91M |
|---|
def test_option_ignore_between():
for what in ['null', 'true', '2', '2.2', '[]', '[2]', '[2, 2.2]', '{}', '{"z": 2.2}', '{"z": []}', '{"z": [2]}', '{"z": [2, 2.2]}']:
array = ak.from_json((('[{"x": 1, "y": ' + what) + ', "z": true}, {"x": 3, "z": false}]'), schema={'type': 'array', 'items': {'type': ['object', 'null'], 'properties': {'z': {'type': 'boolean'}, 'x': {'type': 'integer'}}, 'required': ['z', 'x']}})
assert (array.to_list() == [{'x': 1, 'z': True}, {'x': 3, 'z': False}])
assert (str(array.type) == '2 * ?{z: bool, x: int64}') |
class Encoder(object):
def __init__(self, name, is_train, norm='batch', activation='relu', image_size=128, latent_dim=8, use_resnet=True):
print(' [*] Init Encoder %s', name)
self.name = name
self._is_train = is_train
self._norm = norm
self._activation = activation
self._image_size = image_size
self._latent_dim = latent_dim
self._use_resnet = use_resnet
self._reuse = False
def __call__(self, input):
if self._use_resnet:
return self._resnet(input)
else:
return self._convnet(input)
def _convnet(self, input):
with tf.variable_scope(self.name, reuse=self._reuse):
num_filters = [64, 128, 256, 512, 512, 512, 512]
if (self._image_size == 256):
num_filters.append(512)
E = input
for (i, n) in enumerate(num_filters):
E = nn.conv_block(E, n, 'e_convnet_{}_{}'.format(n, i), 4, 2, self._is_train, self._reuse, norm=(self._norm if i else None), activation=self._activation)
E = F.flatten(E)
mu = nn.mlp(E, self._latent_dim, 'FC8_mu', self._is_train, self._reuse, norm=None, activation=None)
log_sigma = nn.mlp(E, self._latent_dim, 'FC8_sigma', self._is_train, self._reuse, norm=None, activation=None)
z = (mu + (tf.random_normal(shape=tf.shape(self._latent_dim)) * tf.exp(log_sigma)))
self._reuse = True
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
return (z, mu, log_sigma)
def _resnet(self, input):
with tf.variable_scope(self.name, reuse=self._reuse):
num_filters = [128, 256, 512, 512]
if (self._image_size == 256):
num_filters.append(512)
E = input
E = nn.conv_block(E, 64, 'e_resnet_{}_{}'.format(64, 0), 4, 2, self._is_train, self._reuse, norm=None, activation=self._activation, bias=True)
for (i, n) in enumerate(num_filters):
E = nn.residual(E, n, 'e_resnet_{}_{}'.format(n, (i + 1)), self._is_train, self._reuse, norm=self._norm, bias=True)
E = tf.nn.avg_pool(E, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
E = nn.activation_fn(E, 'relu')
E = tf.nn.avg_pool(E, [1, 8, 8, 1], [1, 8, 8, 1], 'SAME')
E = F.flatten(E)
mu = nn.mlp(E, self._latent_dim, 'FC8_mu', self._is_train, self._reuse, norm=None, activation=None)
log_sigma = nn.mlp(E, self._latent_dim, 'FC8_sigma', self._is_train, self._reuse, norm=None, activation=None)
z = (mu + (tf.random_normal(shape=tf.shape(self._latent_dim)) * tf.exp(log_sigma)))
self._reuse = True
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
return (z, mu, log_sigma) |
def in_plane_mobility_trans_times_force_pycuda(r_vectors, force, eta, a, *args, **kwargs):
number_of_blobs = np.int32(len(r_vectors))
(threads_per_block, num_blocks) = set_number_of_threads_and_blocks(number_of_blobs)
L = kwargs.get('periodic_length', np.array([0.0, 0.0, 0.0]))
x = real(np.reshape(r_vectors, (number_of_blobs * 3)))
f = real(np.reshape(force, (number_of_blobs * 3)))
x_gpu = cuda.mem_alloc(x.nbytes)
f_gpu = cuda.mem_alloc(f.nbytes)
u_gpu = cuda.mem_alloc(f.nbytes)
number_of_blobs_gpu = cuda.mem_alloc(number_of_blobs.nbytes)
cuda.memcpy_htod(x_gpu, x)
cuda.memcpy_htod(f_gpu, f)
mobility = mod.get_function('velocity_from_force_in_plane')
mobility(x_gpu, f_gpu, u_gpu, number_of_blobs, real(eta), real(a), real(L[0]), real(L[1]), real(L[2]), block=(threads_per_block, 1, 1), grid=(num_blocks, 1))
u = np.empty_like(f)
cuda.memcpy_dtoh(u, u_gpu)
return u |
.patch('trieste.logging.tf.summary.scalar')
def test_scalar(mocked_summary_scalar: unittest.mock.MagicMock) -> None:
scalar('this', 1, step=1)
scalar('_that', 2, step=2)
with tf.name_scope('foo'):
scalar('this', (lambda : 3), step=3)
scalar('_that', (lambda : 4), step=4)
scalar('broken', (lambda : (1 / 0)), step=5)
assert (len(mocked_summary_scalar.call_args_list) == 2)
for (i, j) in enumerate([1, 3]):
assert (mocked_summary_scalar.call_args_list[i][0] == ('this', j))
assert (mocked_summary_scalar.call_args_list[i][1] == {'step': j}) |
class DumpSeqPlayDialog(QtWidgets.QDialog):
(Layout=QtWidgets.QGridLayout, apply_=False)
def __init__(self, parent):
self.setWindowTitle('Dump qOut to seqplay')
row = 0
row += 1
self.layout.addWidget(QtWidgets.QLabel('Timestep'), row, 0)
self.timestepLineEdit = QtWidgets.QLineEdit('0.005')
validator = QtGui.QDoubleValidator()
validator.setBottom(1e-06)
self.timestepLineEdit.setValidator(validator)
self.layout.addWidget(self.timestepLineEdit, row, 1)
row += 1
self.layout.addWidget(QtWidgets.QLabel('Time scale'), row, 0)
self.timeScaleSpinBox = QtWidgets.QSpinBox()
self.timeScaleSpinBox.setMinimum(1)
self.timeScaleSpinBox.setPrefix('x')
self.layout.addWidget(self.timeScaleSpinBox, row, 1)
row += 1
filedialogButton = QtWidgets.QPushButton('Browse...')
filedialogButton.clicked.connect(self.filedialogButton)
self.layout.addWidget(filedialogButton, row, 0)
self.fileLineEdit = QtWidgets.QLineEdit('out.pos')
self.layout.addWidget(self.fileLineEdit)
def accept(self):
fout = self.fileLineEdit.text()
tScale = self.timeScaleSpinBox.value()
dt = float(self.timestepLineEdit.text())
rm = self.parent().rm
data = self.parent().data
if os.path.exists(fout):
overwrite = QtWidgets.QMessageBox.question(self, 'Overwrite existing file', '{} already exists, do you want to overwrite it?'.format(fout), QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if (overwrite == QtWidgets.QMessageBox.No):
return
with open(fout, 'w') as fd:
rjo_range = range(len(rm.ref_joint_order()))
i_range = range(len(data['t']))
t = 0
for i in i_range:
q = np.array([data['qOut_{}'.format(jIdx)][i] for jIdx in rjo_range])
if (i == i_range[(- 1)]):
next_q = q
else:
next_q = np.array([data['qOut_{}'.format(jIdx)][(i + 1)] for jIdx in rjo_range])
for j in range(tScale):
qOut = map(str, (q + ((j / float(tScale)) * (next_q - q))))
fd.write('{} {}\n'.format(t, ' '.join(qOut)))
t += dt
super(DumpSeqPlayDialog, self).accept()
def filedialogButton(self):
fpath = QtWidgets.QFileDialog.getSaveFileName(self, 'Output file')[0]
if len(fpath):
self.fileLineEdit.setText(fpath) |
def __scale_shortside(img, target_width, crop_width, method=Image.BICUBIC):
(ow, oh) = img.size
shortside = min(ow, oh)
if (shortside >= target_width):
return img
else:
scale = (target_width / shortside)
return img.resize((round((ow * scale)), round((oh * scale))), method) |
def progress_bar(current, total, msg=None):
global last_time, begin_time
if (current == 0):
begin_time = time.time()
cur_len = int(((TOTAL_BAR_LENGTH * current) / total))
rest_len = (int((TOTAL_BAR_LENGTH - cur_len)) - 1)
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = (cur_time - last_time)
last_time = cur_time
tot_time = (cur_time - begin_time)
L = []
L.append((' Step: %s' % format_time(step_time)))
L.append((' | Tot: %s' % format_time(tot_time)))
if msg:
L.append((' | ' + msg))
msg = ''.join(L)
sys.stdout.write(msg)
for i in range((((term_width - int(TOTAL_BAR_LENGTH)) - len(msg)) - 3)):
sys.stdout.write(' ')
for i in range(((term_width - int((TOTAL_BAR_LENGTH / 2))) + 2)):
sys.stdout.write('\x08')
sys.stdout.write((' %d/%d ' % ((current + 1), total)))
if (current < (total - 1)):
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush() |
def objects_counter_percentile(scan_ids, all_scans, prc):
all_obs_len = list()
for scan_id in all_scans:
if (scan_id in scan_ids):
all_obs_len.append(len(all_scans[scan_id].three_d_objects))
return np.percentile(all_obs_len, prc) |
class AffinePermutationTypeB(AffinePermutationTypeC):
def check(self):
if (not self):
return
k = self.parent().k
if (len(self) != k):
raise ValueError(('length of list must be k=' + str(k)))
reslist = []
for i in self:
r = (i % self.N)
if (r == 0):
raise ValueError('entries may not have residue 0 mod 2k+1')
if (not ((r not in reslist) and ((self.N - r) not in reslist))):
raise ValueError('entries must have distinct residues')
reslist.append(r)
s = sum(((((- i) // self.N) + 1) for i in (self.value(j) for j in range(1, (self.N + 1))) if (i < 0)))
if (s % 2):
raise ValueError('type B affine permutations have an even number of entries less than 0 to the right of the 0th position')
def apply_simple_reflection_right(self, i):
if (i not in self.parent().index_set()):
raise ValueError('index not in index set')
j = i
l = self[:]
if ((j != 0) and (j != self.k)):
(l[(j - 1)], l[j]) = (l[j], l[(j - 1)])
elif (j == 0):
l[0] = (- self(2))
l[1] = (- self(1))
elif (j == self.k):
l[(self.k - 1)] = self((self.k + 1))
return type(self)(self.parent(), l, check=False)
def apply_simple_reflection_left(self, i):
if (i not in self.parent().index_set()):
raise ValueError('index not in index set')
j = (self.N - i)
l = []
if ((i != self.k) and (i != 0)):
for m in range(self.k):
res = (self[m] % self.N)
if (res == i):
l.append((self[m] + 1))
elif (res == (i + 1)):
l.append((self[m] - 1))
elif (res == j):
l.append((self[m] - 1))
elif (res == (j - 1)):
l.append((self[m] + 1))
else:
l.append(self[m])
elif (i == 0):
for m in range(self.k):
res = (self[m] % self.N)
if (res == 1):
l.append((self[m] - 3))
elif (res == (self.N - 2)):
l.append((self[m] + 3))
elif (res == 2):
l.append((self[m] - 3))
elif (res == (self.N - 1)):
l.append((self[m] + 3))
else:
l.append(self[m])
elif (i == self.k):
for m in range(self.k):
res = (self[m] % self.N)
if (res == i):
l.append((self[m] + 1))
elif (res == j):
l.append((self[m] - 1))
else:
l.append(self[m])
return type(self)(self.parent(), l, check=False)
def has_right_descent(self, i) -> bool:
if (i == 0):
return (self.value((- 2)) > self.value(1))
return (self.value(i) > self.value((i + 1)))
def has_left_descent(self, i) -> bool:
if (i == 0):
return (self.position((- 2)) > self.position(1))
return (self.position(i) > self.position((i + 1))) |
def main():
in_file = 'tgbl-coin_edgelist.csv'
outname = 'tgbl-coin_edgelist_sorted.csv'
sort_edgelist(in_file, outname) |
def main(args):
builder = ModelBuilder()
unet = builder.build_unet(num_class=args.num_class, arch=args.unet_arch, weights=args.weights_unet)
print('Froze the following layers: ')
for (name, p) in unet.named_parameters():
if (p.requires_grad == False):
print(name)
print()
crit = DualLoss(mode='train')
segmentation_module = SegmentationModule(crit, unet, args.num_class)
train_augs = Compose([PaddingCenterCrop(256), RandomHorizontallyFlip(), RandomVerticallyFlip(), RandomRotate(180)])
test_augs = Compose([PaddingCenterCrop(256)])
dataset_train = AC17(root=args.data_root, split='train', k_split=args.k_split, augmentations=train_augs)
ac17_train = load2D(dataset_train, split='train', deform=True)
loader_train = data.DataLoader(ac17_train, batch_size=args.batch_size_per_gpu, shuffle=True, num_workers=int(args.workers), drop_last=True, pin_memory=True)
dataset_val = AC17(root=args.data_root, split='val', k_split=args.k_split, augmentations=test_augs)
ac17_val = load2D(dataset_val, split='val', deform=False)
loader_val = data.DataLoader(ac17_val, batch_size=1, shuffle=False, collate_fn=user_scattered_collate, num_workers=5, drop_last=True)
if (len(args.gpus) > 1):
segmentation_module = UserScatteredDataParallel(segmentation_module, device_ids=args.gpus)
patch_replication_callback(segmentation_module)
segmentation_module.cuda()
nets = ((net_encoder, net_decoder, crit) if (args.unet == False) else (unet, crit))
optimizers = create_optimizers(nets, args)
history = {'train': {'epoch': [], 'loss': [], 'acc': [], 'jaccard': []}}
best_val = {'epoch_1': 0, 'mIoU_1': 0, 'epoch_2': 0, 'mIoU_2': 0, 'epoch_3': 0, 'mIoU_3': 0, 'epoch': 0, 'mIoU': 0}
for epoch in range(args.start_epoch, (args.num_epoch + 1)):
train(segmentation_module, loader_train, optimizers, history, epoch, args)
(iou, loss) = eval(loader_val, segmentation_module, args, crit)
ckpted = False
if (iou[0] > best_val['mIoU_1']):
best_val['epoch_1'] = epoch
best_val['mIoU_1'] = iou[0]
ckpted = True
if (iou[1] > best_val['mIoU_2']):
best_val['epoch_2'] = epoch
best_val['mIoU_2'] = iou[1]
ckpted = True
if (iou[2] > best_val['mIoU_3']):
best_val['epoch_3'] = epoch
best_val['mIoU_3'] = iou[2]
ckpted = True
if ((((iou[0] + iou[1]) + iou[2]) / 3) > best_val['mIoU']):
best_val['epoch'] = epoch
best_val['mIoU'] = (((iou[0] + iou[1]) + iou[2]) / 3)
ckpted = True
if ((epoch % 50) == 0):
checkpoint(nets, history, args, epoch)
continue
if (epoch == args.num_epoch):
checkpoint(nets, history, args, epoch)
continue
if (epoch < 15):
ckpted = False
if (ckpted == False):
continue
else:
checkpoint(nets, history, args, epoch)
continue
print()
print('Training Done!') |
def jacobi(M):
if (not M.is_square()):
raise ValueError('the matrix must be square')
dim = M.nrows()
q = [list(row) for row in M]
for i in range((dim - 1)):
for j in range((i + 1), dim):
q[j][i] = q[i][j]
q[i][j] = (q[i][j] / q[i][i])
for k in range((i + 1), dim):
for l in range(k, dim):
q[k][l] -= (q[k][i] * q[i][l])
for i in range(1, dim):
for j in range(i):
q[i][j] = 0
return matrix(q) |
def enforce_concatenated_form(layout, form):
if ((not layout.is_unknown) and form.is_unknown):
raise AssertionError('merge result should never be of an unknown type unless the layout is unknown')
elif (layout.is_unknown and (not form.is_unknown)):
return form.length_zero_array(highlevel=False).to_backend(layout.backend)
elif (layout.is_union and (not form.is_union)):
raise AssertionError('merge result should be a union if layout is a union')
elif ((not layout.is_union) and form.is_union):
if (not ((form.tags == 'i8') and (form.index == 'i64'))):
raise AssertionError('merge result that forms a union should have i8 tags and i64 index')
if (layout.is_indexed and (not layout.is_option) and (layout.parameter('__array__') != 'categorical')):
index = layout.index.to64()
layout_to_merge = layout.content
else:
index = ak.index.Index64(layout.backend.index_nplike.arange(layout.length, dtype=np.int64))
layout_to_merge = layout
type_ = layout_to_merge.form.type
union_has_exact_type = False
contents = []
for content_form in form.contents:
if _form_has_type(content_form, type_):
contents.insert(0, enforce_concatenated_form(layout_to_merge, content_form))
union_has_exact_type = True
else:
contents.append(content_form.length_zero_array(highlevel=False).to_backend(layout.backend))
if (not union_has_exact_type):
contents.clear()
for content_form in form.contents:
content_layout = content_form.length_zero_array(highlevel=False).to_backend(layout.backend)
if mergeable(content_layout, layout_to_merge):
contents.insert(0, enforce_concatenated_form(layout_to_merge, content_form))
else:
contents.append(content_form.length_zero_array(highlevel=False).to_backend(layout.backend))
return ak.contents.UnionArray(ak.index.Index8(layout.backend.index_nplike.zeros(layout.length, dtype=np.int8)), index, contents, parameters=form._parameters)
elif (layout.is_union and form.is_union):
if (not ((form.tags == 'i8') and (form.index == 'i64'))):
raise AssertionError('merge result that forms a union should have i8 tags and i64 index')
if (len(form.contents) < len(layout.contents)):
raise AssertionError("merge result should only grow or preserve a union's cardinality")
form_contents = [f.length_zero_array(highlevel=False).to_backend(layout.backend) for f in form.contents]
form_indices = range(len(form_contents))
for form_projection_indices in permutations(form_indices, len(layout.contents)):
if all((mergeable(c, form_contents[i]) for (c, i) in zip(layout.contents, form_projection_indices))):
break
else:
raise AssertionError('merge result should be mergeable against some permutation of the layout')
next_contents = [enforce_concatenated_form(c, form.contents[i]) for (c, i) in zip(layout.contents, form_projection_indices)]
next_contents.extend([form_contents[i] for i in (set(form_indices) - set(form_projection_indices))])
return ak.contents.UnionArray(ak.index.Index8(layout.backend.index_nplike.astype(layout.tags.data, np.int8)), layout.index.to64(), next_contents, parameters=form._parameters)
elif (layout.is_option and (not form.is_option)):
raise AssertionError('merge result should be an option if layout is an option')
elif ((not layout.is_option) and form.is_option):
return enforce_concatenated_form(ak.contents.UnmaskedArray.simplified(layout), form)
elif (layout.is_option and form.is_option):
if isinstance(form, ak.forms.IndexedOptionForm):
if (form.index != 'i64'):
raise AssertionError('IndexedOptionForm should have i64 for merge results')
return layout.to_IndexedOptionArray64().copy(content=enforce_concatenated_form(layout.content, form.content), parameters=form._parameters)
elif isinstance(form, (ak.forms.ByteMaskedForm, ak.forms.BitMaskedForm, ak.forms.UnmaskedForm)):
return layout.copy(content=enforce_concatenated_form(layout.content, form.content), parameters=form._parameters)
else:
raise AssertionError
elif (layout.is_indexed and (not form.is_indexed)):
raise AssertionError('merge result must be indexed if layout is indexed')
elif ((not layout.is_indexed) and form.is_indexed):
return ak.contents.IndexedArray(ak.index.Index64(layout.backend.index_nplike.arange(layout.length)), enforce_concatenated_form(layout, form.content), parameters=form._parameters)
elif (layout.is_indexed and form.is_indexed):
if (form.index != 'i64'):
raise AssertionError('merge result must be i64')
return ak.contents.IndexedArray(layout.index.to64(), content=enforce_concatenated_form(layout.content, form.content), parameters=form._parameters)
elif (layout.is_numpy and form.is_numpy):
if (layout.inner_shape != form.inner_shape):
raise AssertionError('layout must have same inner_shape as merge result')
return ak.values_astype(layout.copy(parameters=None), to=primitive_to_dtype(form.primitive), highlevel=False).copy(parameters=form._parameters)
elif (layout.is_regular and form.is_numpy):
raise AssertionError('layout cannot be regular for NumpyForm merge result')
elif ((not (layout.is_regular or layout.is_numpy)) and form.is_regular):
raise AssertionError('merge result should be ragged if any input is ragged')
elif (layout.is_numpy and form.is_list):
if (len(layout.inner_shape) == 0):
raise AssertionError('layout must be at least 2D if merge result is a list')
return enforce_concatenated_form(layout.to_RegularArray(), form)
elif (layout.is_regular and form.is_regular):
if (layout.size != form.size):
raise AssertionError('RegularForm must have same size as layout for merge result')
return layout.copy(content=enforce_concatenated_form(layout.content, form.content), parameters=form._parameters)
elif (layout.is_regular and form.is_list):
if isinstance(form, (ak.forms.ListOffsetForm, ak.forms.ListForm)):
return enforce_concatenated_form(layout.to_ListOffsetArray64(False), form)
else:
raise AssertionError
elif (layout.is_list and form.is_list):
if isinstance(form, ak.forms.ListOffsetForm):
layout = layout.to_ListOffsetArray64(False)
return layout.copy(content=enforce_concatenated_form(layout.content, form.content), parameters=form._parameters)
elif isinstance(form, ak.forms.ListForm):
if (not ((form.starts == 'i64') and (form.stops == 'i64'))):
raise TypeError('ListForm should have i64 for merge results')
return ak.contents.ListArray(layout.starts.to64(), layout.stops.to64(), enforce_concatenated_form(layout.content, form.content), parameters=form._parameters)
else:
raise AssertionError
elif (layout.is_record and (not form.is_record)):
raise AssertionError('merge result should be a record if layout is a record')
elif ((not layout.is_record) and form.is_record):
raise AssertionError('layout result should be a record if merge result is a record')
elif (layout.is_record and form.is_record):
if (frozenset(layout.fields) != frozenset(form.fields)):
raise AssertionError('merge result and form must have matching fields')
elif (layout.is_tuple != form.is_tuple):
raise AssertionError('merge result and form must both be tuple or record-like')
return ak.contents.RecordArray([enforce_concatenated_form(layout.content(f), form.content(f)) for f in layout.fields], layout._fields, length=layout.length, parameters=form._parameters, backend=layout.backend)
else:
raise NotImplementedError |
def extract_N_frames_from_single_video(data_dir, video_name, save_dir, resize, scale=224, num_frames=64):
video_dir = os.path.join(data_dir, video_name)
frame_dir = os.path.join(save_dir, str(num_frames), video_name)
if (not os.path.exists(frame_dir)):
os.makedirs(frame_dir)
vidcap = cv2.VideoCapture(video_dir)
if resize:
width = vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)
if (width < height):
size = (scale, round(((scale * height) / width)))
else:
size = (round(((scale * width) / height)), scale)
total_frames = vidcap.get(cv2.CAP_PROP_FRAME_COUNT)
first_frame = 0
last_frame = (total_frames - 1)
skip_step = (last_frame / (num_frames - 1))
idxs_to_extract = np.arange(first_frame, (last_frame + skip_step), skip_step).round()
idxs_to_extract = idxs_to_extract.astype('int').tolist()
frame_idx = 0
(ret, frame) = vidcap.read()
if (not ret):
print(f'No valid frames exist in video: {video_dir}, skipping the process')
while ret:
if (frame_idx in idxs_to_extract):
if resize:
frame = cv2.resize(frame, dsize=size, interpolation=cv2.INTER_CUBIC)
cv2.imwrite(os.path.join(frame_dir, f'{frame_idx:05d}.png'), frame)
frame_idx += 1
(ret, frame) = vidcap.read()
vidcap.release()
return int(total_frames) |
class Stage2Config():
type: str = 'transformer1d'
vocab_size_txt: int = 16384
vocab_size_img: int = 16384
use_cls_cond: Optional[bool] = None
hparams: Stage2Hparams = Stage2Hparams() |
def build_mobilenetv2():
cnn = torch.hub.load('pytorch/vision', 'mobilenet_v2', pretrained=True)
model = torch.nn.Sequential(*list(cnn.children())[:(- 1)])
model.cuda()
model.eval()
return model |
def test_highlevel():
a = ak.highlevel.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5], [6.6], [7.7, 8.8, 9.9]], check_valid=True)
assert (repr(a) == "<Array [[1.1, 2.2, 3.3], [], ..., [7.7, 8.8, 9.9]] type='5 * var * float64'>")
assert (str(a) == '[[1.1, 2.2, 3.3], [], [4.4, 5.5], [6.6], [7.7, 8.8, 9.9]]')
b = ak.highlevel.Array(np.arange(100, dtype=np.int32), check_valid=True)
assert (repr(b) == "<Array [0, 1, 2, 3, 4, 5, 6, ..., 94, 95, 96, 97, 98, 99] type='100 * int32'>")
assert (str(b) == '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ..., 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]')
c = ak.highlevel.Array('[{"one": 3.14, "two": [1.1, 2.2]}, {"one": 99.9, "two": [-3.1415926]}]', check_valid=True)
assert (repr(c) == "<Array [{one: 3.14, two: [...]}, {...}] type='2 * {one: float64, two: var *...'>")
assert (str(c) == '[{one: 3.14, two: [1.1, 2.2]}, {one: 99.9, two: [-3.14]}]') |
def test_post():
leaves = {name: Leaf(name=name) for name in ['x_in', 'y_in', 'x_out', 'y_out', 'pre', 'post', 'w']}
delta_w_node = Node([leaves['x_in'], leaves['x_out'], leaves['post']], [1.0, 2.0, 3.0], 1.0, 0.0, identity, sum_ag, name='delta_w', leaves=leaves)
input_coords = [[(- 1.0), 0.0], [1.0, 0.0], [0.0, (- 1.0)]]
output_coords = [[(- 1.0), 0.0], [1.0, 0.0]]
net = AdaptiveLinearNet(delta_w_node, input_coords, output_coords, activation=slow_tanh, cppn_activation=slow_tanh, device='cpu')
w = np_tanh(np.array([[((- 1.0) + (2 * (- 1.0))), (1.0 + (2 * (- 1.0))), (2 * (- 1.0))], [((- 1.0) + (2 * 1.0)), (1.0 + (2 * 1.0)), (2 * 1.0)]], dtype=np.float32))
w[(np.abs(w) < 0.2)] = 0
w[(w < 0)] += 0.2
w[(w > 0)] -= 0.2
w[(w > 3.0)] = 3.0
w[(w < (- 3.0))] = (- 3.0)
w_expressed = (w != 0)
assert np.allclose(net.input_to_output.numpy(), w)
for _ in range(3):
inputs = np.array([[(- 1.0), 2.0, 3.0]])
outputs = net.activate(inputs)[0]
activs = np.tanh((0.5 * w.dot(inputs[0])))
assert np.allclose(outputs, activs)
delta_w = np_tanh((np.array([[((- 1.0) + (2 * (- 1.0))), (1.0 + (2 * (- 1.0))), (2 * (- 1.0))], [((- 1.0) + (2 * 1.0)), (1.0 + (2 * 1.0)), (2 * 1.0)]], dtype=np.float32) + (3 * np.expand_dims(activs, 1))))
w[w_expressed] += delta_w[w_expressed]
w[(w > 3.0)] = 3.0
w[(w < (- 3.0))] = (- 3.0)
assert np.allclose(net.input_to_output.numpy(), w) |
def test_regulartype_numpytype():
t = RegularType(NumpyType('int32'), 5)
assert (str(parser.parse(str(t))) == str(t)) |
def train_fixed_split(loggers, loaders, model, optimizer, scheduler, datasets, **kwargs):
start_epoch = 0
if cfg.train.auto_resume:
start_epoch = load_ckpt(model, optimizer, scheduler)
if (start_epoch == cfg.optim.max_epoch):
logging.info('Checkpoint found, Task already done')
else:
logging.info('Start from epoch {}'.format(start_epoch))
num_splits = len(loggers)
for cur_epoch in range(start_epoch, cfg.optim.max_epoch):
train_epoch(loggers[0], model, optimizer, scheduler, datasets[0], train=True, report_rank_based_metric=False)
loggers[0].write_epoch(cur_epoch)
if is_eval_epoch(cur_epoch):
for i in range(1, num_splits):
train_epoch(loggers[i], model, optimizer, scheduler, datasets[i], train=False, report_rank_based_metric=True)
loggers[i].write_epoch(cur_epoch)
if is_ckpt_epoch(cur_epoch):
save_ckpt(model, optimizer, scheduler, cur_epoch)
for logger in loggers:
logger.close()
if cfg.train.ckpt_clean:
clean_ckpt()
logging.info('Task done, results saved in {}'.format(cfg.out_dir)) |
class BiotETHTerm(BiotTerm, ETHTerm):
name = 'dw_biot_eth'
arg_types = (('ts', 'material_0', 'material_1', 'virtual', 'state'), ('ts', 'material_0', 'material_1', 'state', 'virtual'))
arg_shapes = {'material_0': 'S, 1', 'material_1': '1, 1', 'virtual/grad': ('D', None), 'state/grad': 1, 'virtual/div': (1, None), 'state/div': 'D'}
modes = ('grad', 'div')
def get_fargs(self, ts, mat0, mat1, vvar, svar, mode=None, term_mode=None, diff_var=None, **kwargs):
if (self.mode == 'grad'):
(qp_var, qp_name, iv) = (svar, 'val', 4)
else:
(qp_var, qp_name, iv) = (vvar, 'cauchy_strain', 3)
if (mode == 'weak'):
(vvg, _, key) = self.get_mapping(vvar, return_key=True)
(svg, _) = self.get_mapping(svar)
if (diff_var is None):
val_qp = self.get(qp_var, qp_name)
key += tuple((self.arg_names[ii] for ii in [1, 2, iv]))
data = self.get_eth_data(key, qp_var, mat1, val_qp)
val = (data.history + data.values)
fargs = (ts.dt, val, mat0, svg, vvg, 0)
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
fargs = (ts.dt, val_qp, mat0, svg, vvg, 1)
return fargs
else:
raise ValueError(('unsupported evaluation mode in %s! (%s)' % (self.name, mode))) |
class roi_2mlp_head_prd(nn.Module):
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.dim_in = dim_in
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
self.dim_out = hidden_dim = cfg.FAST_RCNN.MLP_HEAD_DIM
roi_size = cfg.FAST_RCNN.ROI_XFORM_RESOLUTION
self.fc1 = nn.Linear((dim_in * (roi_size ** 2)), hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self._init_weights()
def _init_weights(self):
mynn.init.XavierFill(self.fc1.weight)
init.constant_(self.fc1.bias, 0)
mynn.init.XavierFill(self.fc2.weight)
init.constant_(self.fc2.bias, 0)
def detectron_weight_mapping(self):
detectron_weight_mapping = {'fc1.weight': 'fc6_w', 'fc1.bias': 'fc6_b', 'fc2.weight': 'fc7_w', 'fc2.bias': 'fc7_b'}
return (detectron_weight_mapping, [])
def forward(self, x, rpn_ret, mask, rois_name, use_relu=True):
x = (self.roi_xform(x, rpn_ret, blob_rois=rois_name, method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=cfg.FAST_RCNN.ROI_XFORM_RESOLUTION, spatial_scale=self.spatial_scale, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO) + mask)
batch_size = x.size(0)
x = F.relu(self.fc1(x.view(batch_size, (- 1))), inplace=True)
if use_relu:
x = F.relu(self.fc2(x), inplace=True)
else:
x = self.fc2(x)
return x |
def fullsubnet_validate(model, validation_loader, writer, dir_to_save, epoch, DEVICE):
validation_loss = 0
batch_num = 0
avg_pesq = 0
avg_stoi = 0
f_score = open(((dir_to_save + '/Epoch_') + ('%d_SCORES' % epoch)), 'a')
model.eval()
with torch.no_grad():
for (inputs, targets) in tools.Bar(validation_loader):
batch_num += 1
inputs = inputs.float().to(DEVICE)
targets = targets.float().to(DEVICE)
noisy_complex = tools.stft(inputs)
clean_complex = tools.stft(targets)
(noisy_mag, _) = tools.mag_phase(noisy_complex)
cIRM = tools.build_complex_ideal_ratio_mask(noisy_complex, clean_complex)
cRM = model(noisy_mag)
loss = model.loss(cIRM, cRM)
validation_loss += loss
cRM = tools.decompress_cIRM(cRM)
enhanced_real = ((cRM[(..., 0)] * noisy_complex.real) - (cRM[(..., 1)] * noisy_complex.imag))
enhanced_imag = ((cRM[(..., 1)] * noisy_complex.real) + (cRM[(..., 0)] * noisy_complex.imag))
enhanced_complex = torch.stack((enhanced_real, enhanced_imag), dim=(- 1))
enhanced_outputs = tools.istft(enhanced_complex, length=inputs.size((- 1)))
estimated_wavs = enhanced_outputs.cpu().detach().numpy()
clean_wavs = targets.cpu().detach().numpy()
pesq = cal_pesq(estimated_wavs, clean_wavs)
stoi = cal_stoi(estimated_wavs, clean_wavs)
for i in range(len(pesq)):
f_score.write('PESQ {:.6f} | STOI {:.6f}\n'.format(pesq[i], stoi[i]))
pesq = np.reshape(pesq, (1, (- 1)))
stoi = np.reshape(stoi, (1, (- 1)))
avg_pesq += (sum(pesq[0]) / len(inputs))
avg_stoi += (sum(stoi[0]) / len(inputs))
if ((epoch % 10) == 0):
writer.log_wav(inputs[0], targets[0], enhanced_outputs[0], epoch)
validation_loss /= batch_num
avg_pesq /= batch_num
avg_stoi /= batch_num
return (validation_loss, avg_pesq, avg_stoi) |
class LieAlgebrasWithBasis(CategoryWithAxiom_over_base_ring):
_base_category_class_and_axiom = (LieAlgebras, 'WithBasis')
def example(self, gens=None):
if (gens is None):
from sage.combinat.partition import Partitions
gens = Partitions()
from sage.categories.examples.lie_algebras_with_basis import Example
return Example(self.base_ring(), gens)
Graded = LazyImport('sage.categories.graded_lie_algebras_with_basis', 'GradedLieAlgebrasWithBasis', as_name='Graded')
class ParentMethods():
def _basis_key(self, x):
return x
_method(optional=True)
def bracket_on_basis(self, x, y):
def module(self):
from sage.combinat.free_module import CombinatorialFreeModule
try:
return CombinatorialFreeModule(self.base_ring(), self.basis().keys())
except AttributeError:
return CombinatorialFreeModule(self.base_ring(), self.basis())
def from_vector(self, v, order=None, coerce=False):
B = self.basis()
return self.sum(((v[i] * B[i]) for i in v.support()))
def dimension(self):
return self.basis().cardinality()
def pbw_basis(self, basis_key=None, **kwds):
from sage.algebras.lie_algebras.poincare_birkhoff_witt import PoincareBirkhoffWittBasis
return PoincareBirkhoffWittBasis(self, basis_key, **kwds)
poincare_birkhoff_witt_basis = pbw_basis
_construct_UEA = pbw_basis
class ElementMethods():
def _bracket_(self, y):
P = self.parent()
def term(ml, mr):
key_ml = P._basis_key(ml)
key_mr = P._basis_key(mr)
if (key_ml == key_mr):
return P.zero()
if (key_ml < key_mr):
return P.bracket_on_basis(ml, mr)
return (- P.bracket_on_basis(mr, ml))
return P.sum((((cl * cr) * term(ml, mr)) for (ml, cl) in self for (mr, cr) in y))
def to_vector(self, order=None):
M = self.parent().module()
B = M.basis()
return M.sum(((self[i] * B[i]) for i in self.support()))
def lift(self):
P = self.parent()
UEA = P.universal_enveloping_algebra()
try:
gen_dict = UEA.algebra_generators()
except (TypeError, AttributeError):
gen_dict = UEA.gens_dict()
s = UEA.zero()
if (not self):
return s
if hasattr(P, '_UEA_names_map'):
names_map = P._UEA_names_map
for (t, c) in self.monomial_coefficients(copy=False).items():
s += (c * gen_dict[names_map[t]])
else:
for (t, c) in self.monomial_coefficients(copy=False).items():
s += (c * gen_dict[t])
return s |
class FairseqOptimizer(object):
def __init__(self, args):
super().__init__()
self.args = args
def add_args(parser):
pass
def optimizer(self):
if (not hasattr(self, '_optimizer')):
raise NotImplementedError
if (not isinstance(self._optimizer, torch.optim.Optimizer)):
raise ValueError('_optimizer must be an instance of torch.optim.Optimizer')
return self._optimizer
def optimizer(self, optimizer):
if (not hasattr(self, '_optimizer')):
raise NotImplementedError
if (not isinstance(self._optimizer, torch.optim.Optimizer)):
raise ValueError('_optimizer must be an instance of torch.optim.Optimizer')
self._optimizer = optimizer
def optimizer_config(self):
raise NotImplementedError
def params(self):
for param_group in self.param_groups:
for p in param_group['params']:
(yield p)
def param_groups(self):
return self.optimizer.param_groups
def __getstate__(self):
return self._optimizer.__getstate__()
def get_lr(self):
return self.param_groups[0]['lr']
def set_lr(self, lr):
for param_group in self.param_groups:
param_group['lr'] = lr
def state_dict(self):
return self.optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
self.optimizer.load_state_dict(state_dict)
if ((optimizer_overrides is not None) and (len(optimizer_overrides) > 0)):
for group in self.param_groups:
group.update(optimizer_overrides)
def backward(self, loss):
loss.backward()
def multiply_grads(self, c):
for p in self.params:
if (p.grad is not None):
p.grad.data.mul_(c)
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
return utils.clip_grad_norm_(self.params, max_norm, aggregate_norm_fn)
def step(self, closure=None, scale=1.0):
if self.supports_step_with_scale:
self.optimizer.step(closure, scale=scale)
else:
self.optimizer.step(closure)
def zero_grad(self):
for p in self.params:
p.grad = None
self.optimizer.zero_grad()
def supports_memory_efficient_fp16(self):
if hasattr(self.optimizer, 'supports_memory_efficient_fp16'):
return self.optimizer.supports_memory_efficient_fp16
return False
def supports_step_with_scale(self):
if hasattr(self.optimizer, 'supports_step_with_scale'):
return self.optimizer.supports_step_with_scale
return False
def supports_flat_params(self):
if hasattr(self.optimizer, 'supports_flat_params'):
return self.optimizer.supports_flat_params
return False
def average_params(self):
pass |
def convert_processed_lines(processed_lines):
paragraphs = []
sentences = []
for words in processed_lines:
if ((len(words) > 1) and (' ' == words[0])):
words = words[1:]
elif ((len(words) == 1) and (' ' == words[0])):
words = []
sentence = []
for word in words:
word = word.strip()
if (not word):
if (len(sentence) == 0):
print(word)
raise ValueError('Unexpected space at start of sentence in document {}'.format(filename))
sentence[(- 1)] = (sentence[(- 1)][0], True)
else:
sentence.append((word, False))
if (len(sentence) == 0):
paragraphs.append([sentences])
sentences = []
continue
sentence[(- 1)] = (sentence[(- 1)][0], True)
sentences.append(sentence)
paragraphs.append([sentences])
return paragraphs |
()
('input_path')
('--encoding', default='ISO-8859-1')
def convert_io_to_bio_format(input_path: str, encoding: str):
label_history = []
with open(input_path, 'r', encoding=encoding) as in_f, open((input_path + '.bio'), 'w') as out_f:
for original_line in in_f:
line = original_line.strip()
if (not line):
label_history = []
else:
(_, _, _, _, label) = line.split(' ')
if (label == 'O'):
label_type = 'O'
else:
(label_type, ent_type) = label.split('-')
if ((len(label_history) == 0) or (label_history[(- 1)] == 'O')):
converted_label = f'B-{ent_type}'
original_line = original_line.replace(label, converted_label)
label_history.append(label_type)
out_f.write(original_line) |
class PBWBasisOfFreeAlgebra(CombinatorialFreeModule):
def __classcall_private__(cls, R, n=None, names=None):
if ((n is None) and (names is None)):
if (not isinstance(R, FreeAlgebra_generic)):
raise ValueError('{} is not a free algebra'.format(R))
alg = R
else:
if (n is None):
n = len(names)
alg = FreeAlgebra(R, n, names)
return super().__classcall__(cls, alg)
def __init__(self, alg):
R = alg.base_ring()
self._alg = alg
category = AlgebrasWithBasis(R)
CombinatorialFreeModule.__init__(self, R, alg.monoid(), prefix='PBW', category=category)
self._assign_names(alg.variable_names())
def _repr_(self):
return 'The Poincare-Birkhoff-Witt basis of {}'.format(self._alg)
def _repr_term(self, w):
if (len(w) == 0):
return super()._repr_term(w)
ret = ''
p = 1
cur = None
for x in w.to_word().lyndon_factorization():
if (x == cur):
p += 1
else:
if (len(ret) != 0):
if (p != 1):
ret += '^{}'.format(p)
ret += '*'
ret += super()._repr_term(x.to_monoid_element())
cur = x
p = 1
if (p != 1):
ret += '^{}'.format(p)
return ret
def _element_constructor_(self, x):
if isinstance(x, FreeAlgebraElement):
return self._alg.pbw_element(self._alg(x))
return CombinatorialFreeModule._element_constructor_(self, x)
def _coerce_map_from_(self, R):
return self._alg.has_coerce_map_from(R)
def one_basis(self):
return self._indices.one()
def algebra_generators(self):
return tuple((self.monomial(x) for x in self._indices.gens()))
gens = algebra_generators
def gen(self, i):
return self.algebra_generators()[i]
def free_algebra(self):
return self._alg
def product(self, u, v):
return self((self.expansion(u) * self.expansion(v)))
def expansion(self, t):
return sum([(i[1] * self._alg.lie_polynomial(i[0])) for i in list(t)], self._alg.zero())
class Element(CombinatorialFreeModule.Element):
def expand(self):
return self.parent().expansion(self) |
def trans_net(net, input_var, name='TransferedPytorchModel'):
print('Starting Transform, This will take a while')
log.init([input_var])
log.cnet.net.name = name
log.cnet.net.input.extend([log.blobs(input_var)])
log.cnet.net.input_dim.extend(input_var.size())
global NET_INITTED
NET_INITTED = True
for (name, layer) in net.named_modules():
layer_names[layer] = name
print('torch ops name:', layer_names)
out = net.forward(input_var)
print('Transform Completed') |
def getTensorView(tensor, tensor_layout, conv_kind, problem_size, operand):
tensor_ref = getTensorRef(tensor, tensor_layout, conv_kind, problem_size, operand)
if (operand == 'a'):
tensor_coord = cutlass.conv.implicit_gemm_tensor_a_extent(conv_kind, problem_size)
elif (operand == 'b'):
tensor_coord = cutlass.conv.implicit_gemm_tensor_b_extent(conv_kind, problem_size)
elif (operand in ['c', 'd']):
tensor_coord = cutlass.conv.implicit_gemm_tensor_c_extent(conv_kind, problem_size)
else:
raise ValueError(('unknown operand: ' + operand))
if (tensor.dtype == np.float64):
return cutlass.TensorViewF64NHWC(tensor_ref, tensor_coord)
elif (tensor.dtype == np.float32):
return cutlass.TensorViewF32NHWC(tensor_ref, tensor_coord)
elif (tensor.dtype == np.float16):
return cutlass.TensorViewF16NHWC(tensor_ref, tensor_coord)
elif (tensor.dtype == bfloat16):
return cutlass.TensorViewBF16NHWC(tensor_ref, tensor_coord)
elif (tensor.dtype == np.int32):
return cutlass.TensorViewS32NHWC(tensor_ref, tensor_coord)
elif (tensor.dtype == np.int8):
if (tensor_layout == cutlass.TensorNC32HW32):
return cutlass.TensorViewS8NC32HW32(tensor_ref, tensor_coord)
elif (tensor_layout == cutlass.TensorC32RSK32):
return cutlass.TensorViewS8C32RSK32(tensor_ref, tensor_coord)
else:
return cutlass.TensorViewS8NHWC(tensor_ref, tensor_coord)
else:
raise ValueError('unsupported data type') |
class TFltPrV(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
__swig_destroy__ = _snap.delete_TFltPrV
def __init__(self, *args):
_snap.TFltPrV_swiginit(self, _snap.new_TFltPrV(*args))
def Load(self, SIn):
return _snap.TFltPrV_Load(self, SIn)
def Save(self, SOut):
return _snap.TFltPrV_Save(self, SOut)
def __add__(self, Val):
return _snap.TFltPrV___add__(self, Val)
def __eq__(self, Vec):
return _snap.TFltPrV___eq__(self, Vec)
def __lt__(self, Vec):
return _snap.TFltPrV___lt__(self, Vec)
def GetMemUsed(self):
return _snap.TFltPrV_GetMemUsed(self)
def GetMemSize(self):
return _snap.TFltPrV_GetMemSize(self)
def GetPrimHashCd(self):
return _snap.TFltPrV_GetPrimHashCd(self)
def GetSecHashCd(self):
return _snap.TFltPrV_GetSecHashCd(self)
def Gen(self, *args):
return _snap.TFltPrV_Gen(self, *args)
def GenExt(self, _ValT, _Vals):
return _snap.TFltPrV_GenExt(self, _ValT, _Vals)
def IsExt(self):
return _snap.TFltPrV_IsExt(self)
def Reserve(self, *args):
return _snap.TFltPrV_Reserve(self, *args)
def Clr(self, DoDel=True, NoDelLim=(- 1)):
return _snap.TFltPrV_Clr(self, DoDel, NoDelLim)
def Trunc(self, _Vals=(- 1)):
return _snap.TFltPrV_Trunc(self, _Vals)
def Reduce(self, _Vals=(- 1)):
return _snap.TFltPrV_Reduce(self, _Vals)
def Pack(self):
return _snap.TFltPrV_Pack(self)
def MoveFrom(self, Vec):
return _snap.TFltPrV_MoveFrom(self, Vec)
def CopyUniqueFrom(self, Vec, Offset, Sz):
return _snap.TFltPrV_CopyUniqueFrom(self, Vec, Offset, Sz)
def Empty(self):
return _snap.TFltPrV_Empty(self)
def Len(self):
return _snap.TFltPrV_Len(self)
def Reserved(self):
return _snap.TFltPrV_Reserved(self)
def Last(self, *args):
return _snap.TFltPrV_Last(self, *args)
def LastValN(self):
return _snap.TFltPrV_LastValN(self)
def LastLast(self, *args):
return _snap.TFltPrV_LastLast(self, *args)
def GetRndVal(self, *args):
return _snap.TFltPrV_GetRndVal(self, *args)
def BegI(self):
return _snap.TFltPrV_BegI(self)
def EndI(self):
return _snap.TFltPrV_EndI(self)
def GetI(self, ValN):
return _snap.TFltPrV_GetI(self, ValN)
def Add(self, *args):
return _snap.TFltPrV_Add(self, *args)
def AddMP(self, Val):
return _snap.TFltPrV_AddMP(self, Val)
def MoveLastMP(self, Val, Inc):
return _snap.TFltPrV_MoveLastMP(self, Val, Inc)
def AddV(self, ValV):
return _snap.TFltPrV_AddV(self, ValV)
def AddSorted(self, Val, Asc=True, _MxVals=(- 1)):
return _snap.TFltPrV_AddSorted(self, Val, Asc, _MxVals)
def AddBackSorted(self, Val, Asc):
return _snap.TFltPrV_AddBackSorted(self, Val, Asc)
def AddMerged(self, Val):
return _snap.TFltPrV_AddMerged(self, Val)
def AddVMerged(self, ValV):
return _snap.TFltPrV_AddVMerged(self, ValV)
def AddUnique(self, Val):
return _snap.TFltPrV_AddUnique(self, Val)
def GetVal(self, *args):
return _snap.TFltPrV_GetVal(self, *args)
def SetVal(self, ValN, Val):
return _snap.TFltPrV_SetVal(self, ValN, Val)
def GetSubValV(self, BValN, EValN, ValV):
return _snap.TFltPrV_GetSubValV(self, BValN, EValN, ValV)
def Ins(self, ValN, Val):
return _snap.TFltPrV_Ins(self, ValN, Val)
def Del(self, *args):
return _snap.TFltPrV_Del(self, *args)
def DelLast(self):
return _snap.TFltPrV_DelLast(self)
def DelIfIn(self, Val):
return _snap.TFltPrV_DelIfIn(self, Val)
def DelAll(self, Val):
return _snap.TFltPrV_DelAll(self, Val)
def PutAll(self, Val):
return _snap.TFltPrV_PutAll(self, Val)
def Swap(self, *args):
return _snap.TFltPrV_Swap(self, *args)
def SwapI(LVal, RVal):
return _snap.TFltPrV_SwapI(LVal, RVal)
SwapI = staticmethod(SwapI)
def NextPerm(self):
return _snap.TFltPrV_NextPerm(self)
def PrevPerm(self):
return _snap.TFltPrV_PrevPerm(self)
def GetPivotValN(self, LValN, RValN):
return _snap.TFltPrV_GetPivotValN(self, LValN, RValN)
def BSort(self, MnLValN, MxRValN, Asc):
return _snap.TFltPrV_BSort(self, MnLValN, MxRValN, Asc)
def ISort(self, MnLValN, MxRValN, Asc):
return _snap.TFltPrV_ISort(self, MnLValN, MxRValN, Asc)
def Partition(self, MnLValN, MxRValN, Asc):
return _snap.TFltPrV_Partition(self, MnLValN, MxRValN, Asc)
def QSort(self, MnLValN, MxRValN, Asc):
return _snap.TFltPrV_QSort(self, MnLValN, MxRValN, Asc)
def Sort(self, Asc=True):
return _snap.TFltPrV_Sort(self, Asc)
def IsSorted(self, Asc=True):
return _snap.TFltPrV_IsSorted(self, Asc)
def Shuffle(self, Rnd):
return _snap.TFltPrV_Shuffle(self, Rnd)
def Reverse(self, *args):
return _snap.TFltPrV_Reverse(self, *args)
def Merge(self):
return _snap.TFltPrV_Merge(self)
def Intrs(self, *args):
return _snap.TFltPrV_Intrs(self, *args)
def Union(self, *args):
return _snap.TFltPrV_Union(self, *args)
def Diff(self, *args):
return _snap.TFltPrV_Diff(self, *args)
def IntrsLen(self, ValV):
return _snap.TFltPrV_IntrsLen(self, ValV)
def UnionLen(self, ValV):
return _snap.TFltPrV_UnionLen(self, ValV)
def Count(self, Val):
return _snap.TFltPrV_Count(self, Val)
def SearchBin(self, *args):
return _snap.TFltPrV_SearchBin(self, *args)
def SearchBinLeft(self, Val, InsValN):
return _snap.TFltPrV_SearchBinLeft(self, Val, InsValN)
def SearchForw(self, Val, BValN=0):
return _snap.TFltPrV_SearchForw(self, Val, BValN)
def SearchBack(self, Val):
return _snap.TFltPrV_SearchBack(self, Val)
def SearchVForw(self, ValV, BValN=0):
return _snap.TFltPrV_SearchVForw(self, ValV, BValN)
def IsIn(self, *args):
return _snap.TFltPrV_IsIn(self, *args)
def IsInBin(self, Val):
return _snap.TFltPrV_IsInBin(self, Val)
def GetDat(self, Val):
return _snap.TFltPrV_GetDat(self, Val)
def GetAddDat(self, Val):
return _snap.TFltPrV_GetAddDat(self, Val)
def GetMxValN(self):
return _snap.TFltPrV_GetMxValN(self)
def GetV(*args):
return _snap.TFltPrV_GetV(*args)
GetV = staticmethod(GetV) |
class MaxMinFairnessWaterFillingPolicy(Policy, WaterFillingAlgorithm):
def __init__(self, priority_reweighting_policies=None):
self._name = 'MaxMinFairnessWaterFilling'
self._max_min_fairness_perf_policy = MaxMinFairnessWaterFillingPolicyWithPerf(priority_reweighting_policies)
def get_allocation(self, unflattened_throughputs, scale_factors, unflattened_priority_weights, cluster_spec, entity_weights=None, entity_to_job_mapping=None, verbose=False, return_effective_throughputs=False):
(throughputs, index) = super().flatten(unflattened_throughputs, cluster_spec)
if (throughputs is None):
return None
(job_ids, worker_types) = index
(m, n) = (len(job_ids), len(worker_types))
new_unflattened_throughputs = {}
for job_id in unflattened_throughputs:
new_unflattened_throughputs[job_id] = {}
for worker_type in unflattened_throughputs[job_id]:
new_unflattened_throughputs[job_id][worker_type] = 1.0
unflattened_x = self._max_min_fairness_perf_policy.get_allocation(new_unflattened_throughputs, scale_factors, unflattened_priority_weights, cluster_spec, entity_weights=entity_weights, entity_to_job_mapping=entity_to_job_mapping, verbose=verbose, return_effective_throughputs=False)
x = np.zeros((len(job_ids), len(worker_types)))
for (i, job_id) in enumerate(job_ids):
for (j, worker_type) in enumerate(worker_types):
x[(i, j)] = unflattened_x[job_id][worker_type]
if return_effective_throughputs:
effective_throughputs = np.sum(np.multiply(throughputs, x), axis=1)
proportional_throughputs = self._max_min_fairness_perf_policy._proportional_policy.get_throughputs(throughputs, index, cluster_spec)
normalized_effective_throughputs = np.multiply(effective_throughputs, (1.0 / proportional_throughputs.reshape(m)))
return (normalized_effective_throughputs, job_ids)
return unflattened_x |
(Output('page-content', 'children'), [Input('url', 'pathname')])
def display_page(pathname):
if (pathname == '/apps/textanalyzer'):
return get_page_divs(textanalyzer.layout())
elif (pathname == '/apps/topicmodel'):
return get_page_divs(topicmodel.layout())
elif (pathname == '/apps/topsources'):
return get_page_divs(topsources.layout())
elif (pathname == '/apps/topsourcetrends'):
return get_page_divs(topsourcetrends.layout())
elif (pathname == '/apps/dailywomenenglish'):
return get_page_divs(dailywomenenglish.layout())
elif (pathname == '/apps/articlecounts'):
return get_page_divs(articlecounts.layout())
else:
return get_page_divs(home_page, enable_footer=False) |
class PrintTree(TreeVisitor):
def __init__(self, start=None, end=None):
TreeVisitor.__init__(self)
self._indent = ''
if ((start is not None) or (end is not None)):
self._line_range = ((start or 0), (end or (2 ** 30)))
else:
self._line_range = None
def indent(self):
self._indent += ' '
def unindent(self):
self._indent = self._indent[:(- 2)]
def __call__(self, tree, phase=None):
print(("Parse tree dump at phase '%s'" % phase))
self.visit(tree)
return tree
def visit_Node(self, node):
self._print_node(node)
self.indent()
self.visitchildren(node)
self.unindent()
return node
def visit_CloneNode(self, node):
self._print_node(node)
self.indent()
line = node.pos[1]
if ((self._line_range is None) or (self._line_range[0] <= line <= self._line_range[1])):
print(('%s- %s: %s' % (self._indent, 'arg', self.repr_of(node.arg))))
self.indent()
self.visitchildren(node.arg)
self.unindent()
self.unindent()
return node
def _print_node(self, node):
line = node.pos[1]
if ((self._line_range is None) or (self._line_range[0] <= line <= self._line_range[1])):
if (len(self.access_path) == 0):
name = '(root)'
else:
(parent, attr, idx) = self.access_path[(- 1)]
if (idx is not None):
name = ('%s[%d]' % (attr, idx))
else:
name = attr
print(('%s- %s: %s' % (self._indent, name, self.repr_of(node))))
def repr_of(self, node):
if (node is None):
return '(none)'
else:
result = node.__class__.__name__
if isinstance(node, ExprNodes.NameNode):
result += ('(type=%s, name="%s")' % (repr(node.type), node.name))
elif isinstance(node, Nodes.DefNode):
result += ('(name="%s")' % node.name)
elif isinstance(node, ExprNodes.ExprNode):
t = node.type
result += ('(type=%s)' % repr(t))
elif node.pos:
pos = node.pos
path = pos[0].get_description()
if ('/' in path):
path = path.split('/')[(- 1)]
if ('\\' in path):
path = path.split('\\')[(- 1)]
result += ('(pos=(%s:%s:%s))' % (path, pos[1], pos[2]))
return result |
def get_baseline(training_args, model_args, data_args, model):
baseline_output_dir = (training_args.output_dir + '_baseline')
eval_args = dataclasses.replace(training_args, output_dir=baseline_output_dir)
(trainer, lm_datasets, _, last_checkpoint) = run_clm.get_trainer_and_dataset(model_args, data_args, eval_args, model)
model = torch.compile(model)
baseline_metrics = run_clm.run_trainer(model_args, data_args, training_args, trainer, lm_datasets, last_checkpoint)
baseline_metrics = {('baseline/' + k): v for (k, v) in baseline_metrics.items()}
with open((baseline_output_dir + '/metrics.json'), 'w') as f:
json.dump(baseline_metrics, f)
return baseline_metrics |
def main():
args = parse_args()
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s')
args.out_dir.mkdir(exist_ok=True)
filenames = list(args.in_dir.rglob('*.mp4'))
if (args.jobs == 1):
pbar = tqdm.tqdm(filenames, ncols=80)
for filename in pbar:
pbar.set_postfix_str(filename.stem)
process(filename, args.out_dir, args.fps, args.skip_existing, args.ignore_exceptions, args.quiet)
else:
joblib.Parallel(n_jobs=args.jobs, verbose=5)((joblib.delayed(process)(filename, args.out_dir, args.fps, args.skip_existing, args.ignore_exceptions, args.quiet) for filename in filenames)) |
def read_jsonl(file_path: str) -> List[Any]:
all_data = []
with open(file_path, 'r') as f:
for line in f:
line = line.strip()
if line:
data = json.loads(line)
all_data.append(data)
return all_data |
_module()
class CenterNet(SingleStageDetector):
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(CenterNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
def merge_aug_results(self, aug_results, with_nms):
(recovered_bboxes, aug_labels) = ([], [])
for single_result in aug_results:
recovered_bboxes.append(single_result[0][0])
aug_labels.append(single_result[0][1])
bboxes = torch.cat(recovered_bboxes, dim=0).contiguous()
labels = torch.cat(aug_labels).contiguous()
if with_nms:
(out_bboxes, out_labels) = self.bbox_head._bboxes_nms(bboxes, labels, self.bbox_head.test_cfg)
else:
(out_bboxes, out_labels) = (bboxes, labels)
return (out_bboxes, out_labels)
def aug_test(self, imgs, img_metas, rescale=True):
img_inds = list(range(len(imgs)))
assert (img_metas[0][0]['flip'] + img_metas[1][0]['flip']), 'aug test must have flipped image pair'
aug_results = []
for (ind, flip_ind) in zip(img_inds[0::2], img_inds[1::2]):
flip_direction = img_metas[flip_ind][0]['flip_direction']
img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
x = self.extract_feat(img_pair)
(center_heatmap_preds, wh_preds, offset_preds) = self.bbox_head(x)
assert (len(center_heatmap_preds) == len(wh_preds) == len(offset_preds) == 1)
center_heatmap_preds[0] = ((center_heatmap_preds[0][0:1] + flip_tensor(center_heatmap_preds[0][1:2], flip_direction)) / 2)
wh_preds[0] = ((wh_preds[0][0:1] + flip_tensor(wh_preds[0][1:2], flip_direction)) / 2)
bbox_list = self.bbox_head.get_bboxes(center_heatmap_preds, wh_preds, [offset_preds[0][0:1]], img_metas[ind], rescale=rescale, with_nms=False)
aug_results.append(bbox_list)
nms_cfg = self.bbox_head.test_cfg.get('nms_cfg', None)
if (nms_cfg is None):
with_nms = False
else:
with_nms = True
bbox_list = [self.merge_aug_results(aug_results, with_nms)]
bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in bbox_list]
return bbox_results |
class LambdaToFunction(ast.NodeTransformer):
def visit_Lambda(self, node: ast.Lambda):
newbody = [ast.Return(value=node.body)]
newnode = ast.FunctionDef(name='_anonymous', args=node.args, body=newbody, decorator_list=[])
newnode = ast.copy_location(newnode, node)
return ast.fix_missing_locations(newnode) |
class Project():
PROJECT_FILE = 'project.yml'
VERSIONS_DIR = 'versions'
MISUSES_DIR = 'misuses'
def __init__(self, base_path: str, id: str):
self._base_path = base_path
self.id = id.lower()
self.path = join(base_path, id)
self._versions_path = join(self.path, Project.VERSIONS_DIR)
self._project_file = join(self.path, Project.PROJECT_FILE)
self._YAML = None
self._VERSIONS = []
self._REPOSITORY = None
def is_project(path: str) -> bool:
return exists(join(path, Project.PROJECT_FILE))
def _yaml(self) -> Dict[(str, Any)]:
if (self._YAML is None):
with open(self._project_file) as project_file:
project_yml = yaml.load(project_file)
self._YAML = project_yml
return self._YAML
def name(self) -> Optional[str]:
return self._yaml.get('name', None)
def repository(self) -> Repository:
if (not self._REPOSITORY):
repository = self._yaml.get('repository', None)
if (repository is None):
raise ValueError('Repository not defined')
repository_type = repository.get('type', None)
repository_url = repository.get('url', None)
self._REPOSITORY = Repository(repository_type, repository_url)
return self._REPOSITORY
def versions(self) -> List[ProjectVersion]:
if (not self._VERSIONS):
if exists(self._versions_path):
self._VERSIONS = [ProjectVersion(self._base_path, self.id, subdir) for subdir in listdir(self._versions_path) if ProjectVersion.is_project_version(join(self._versions_path, subdir))]
return self._VERSIONS
def __str__(self):
return "project '{}'".format(self.id)
def __eq__(self, other):
return (isinstance(other, Project) and (self.path == other.path)) |
class ConcatDataset(Dataset):
def cumsum(sequence):
(r, s) = ([], 0)
for e in sequence:
l = len(e)
r.append((l + s))
s += l
return r
def __init__(self, datasets):
super(ConcatDataset, self).__init__()
assert (len(datasets) > 0), 'datasets should not be an empty iterable'
self.datasets = list(datasets)
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self):
return self.cumulative_sizes[(- 1)]
def __getitem__(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if (dataset_idx == 0):
sample_idx = idx
else:
sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)])
return self.datasets[dataset_idx][sample_idx]
def cummulative_sizes(self):
warnings.warn('cummulative_sizes attribute is renamed to cumulative_sizes', DeprecationWarning, stacklevel=2)
return self.cumulative_sizes |
def build_sn_patch_gan_discriminator(x, reuse=False, training=True):
with tf.variable_scope('sn_patch_gan', reuse=reuse):
cnum = 64
x = dis_spectralconv(x, cnum, name='conv1', training=training)
x = dis_spectralconv(x, (cnum * 2), name='conv2', training=training)
x = dis_spectralconv(x, (cnum * 4), name='conv3', training=training)
x = dis_spectralconv(x, (cnum * 4), name='conv4', training=training)
x = dis_spectralconv(x, (cnum * 4), name='conv5', training=training)
x = dis_spectralconv(x, (cnum * 4), name='conv6', training=training)
x = flatten(x, name='flatten')
return x |
class _DropoutNd(Module):
__constants__ = ['p', 'inplace']
p: float
inplace: bool
def __init__(self, p: float=0.5, inplace: bool=False) -> None:
super(_DropoutNd, self).__init__()
if ((p < 0) or (p > 1)):
raise ValueError('dropout probability has to be between 0 and 1, but got {}'.format(p))
self.p = p
self.inplace = inplace
def extra_repr(self) -> str:
return 'p={}, inplace={}'.format(self.p, self.inplace) |
_duration
def ffmpeg_audiowrite(clip, filename, fps, nbytes, buffersize, codec='libvorbis', bitrate=None, write_logfile=False, verbose=True, ffmpeg_params=None, logger='bar'):
if write_logfile:
logfile = open((filename + '.log'), 'w+')
else:
logfile = None
logger = proglog.default_bar_logger(logger)
logger(message=('MoviePy - Writing audio in %s' % filename))
writer = FFMPEG_AudioWriter(filename, fps, nbytes, clip.nchannels, codec=codec, bitrate=bitrate, logfile=logfile, ffmpeg_params=ffmpeg_params)
for chunk in clip.iter_chunks(chunksize=buffersize, quantize=True, nbytes=nbytes, fps=fps, logger=logger):
writer.write_frames(chunk)
writer.close()
if write_logfile:
logfile.close()
logger(message='MoviePy - Done.') |
class CanonicalHFIndex(HFIndexBase):
def __init__(self, vector_size: int, dataset_name: str='wiki_dpr', dataset_split: str='train', index_name: Optional[str]=None, index_path: Optional[str]=None, use_dummy_dataset=False):
if ((int((index_path is None)) + int((index_name is None))) != 1):
raise ValueError('Please provide `index_name` or `index_path`.')
self.dataset_name = dataset_name
self.dataset_split = dataset_split
self.index_name = index_name
self.index_path = index_path
self.use_dummy_dataset = use_dummy_dataset
logger.info(f'Loading passages from {self.dataset_name}')
dataset = load_dataset(self.dataset_name, with_index=False, split=self.dataset_split, dummy=self.use_dummy_dataset)
super().__init__(vector_size, dataset, index_initialized=False)
def init_index(self):
if (self.index_path is not None):
logger.info(f'Loading index from {self.index_path}')
self.dataset.load_faiss_index('embeddings', file=self.index_path)
else:
logger.info(f'Loading index from {self.dataset_name} with index name {self.index_name}')
self.dataset = load_dataset(self.dataset_name, with_embeddings=True, with_index=True, split=self.dataset_split, index_name=self.index_name, dummy=self.use_dummy_dataset)
self.dataset.set_format('numpy', columns=['embeddings'], output_all_columns=True)
self._index_initialized = True |
class DefaultDomainNameServiceMerger(ServiceMerger):
def __mergeZone(self, a: Zone, b: Zone, dst: Zone, position: str=''):
names = set()
self._log('merging zone: {}'.format(('(root)' if (position == '') else position)))
for r in a.getRecords():
if (r not in dst.getRecords()):
dst.addRecord(r)
for r in b.getRecords():
if (r not in dst.getRecords()):
dst.addRecord(r)
for r in a.getGuleRecords():
dst.addGuleRecord(r)
for r in b.getGuleRecords():
if (r not in dst.getGuleRecords()):
dst.addGuleRecord(r)
for (n, v) in a.getPendingRecords().items():
dst.resolveToVnode(n, v)
for (n, v) in b.getPendingRecords().items():
assert (n not in dst.getPendingRecords()), 'found conflict: {} already points to a vnode'.format(n)
dst.resolveToVnode(n, v)
for k in a.getSubZones().keys():
self._log('{}.{} zone found in first emulator.'.format(k, position))
names.add(k)
for k in b.getSubZones().keys():
self._log('{}.{} zone found in second emulator.'.format(k, position))
names.add(k)
for name in names:
assert (len([r for r in dst.getRecords() if match('{}\\s+'.format(name), r)]) == 0), 'found conflict: {}.{} is both a record and a standalone zone.'.format(name, position)
self.__mergeZone(a.getSubZone(name), b.getSubZone(name), dst.getSubZone(name), '{}.{}'.format(name, position))
def __mergeMaster(self, objectA: DomainNameService, objectB: DomainNameService, merged: DomainNameService):
masterA = objectA.getMasterIp()
masterB = objectB.getMasterIp()
new_master = {key: (value + masterB[key]) for (key, value) in masterA.items()}
merged.setAllMasterIp(new_master)
def _createService(self) -> DomainNameService:
return DomainNameService()
def getName(self) -> str:
return 'DefaultDomainNameServiceMerger'
def getTargetType(self) -> str:
return 'DomainNameServiceLayer'
def doMerge(self, objectA: DomainNameService, objectB: DomainNameService) -> DomainNameService:
merged: DomainNameService = super().doMerge(objectA, objectB)
self.__mergeZone(objectA.getRootZone(), objectB.getRootZone(), merged.getRootZone())
self.__mergeMaster(objectA, objectB, merged)
return merged |
def sharp_invoke(module, function, args):
functions = modules.get(module)
if functions:
funct = functions.get(function)
if funct:
return text_type(funct(args))
return '' |
def huber_loss(x, delta=1.0):
'Reference:
return tf.compat.v1.where((tf.abs(x) < delta), (tf.square(x) * 0.5), (delta * (tf.abs(x) - (0.5 * delta)))) |
def _listify(obj):
if (obj is None):
return []
elif isinstance(obj, str):
return [obj]
else:
return obj |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def _get_base_mp_nbits_candidates():
return [(4, 8), (4, 4), (4, 2), (8, 8), (8, 4), (8, 2), (2, 8), (2, 4), (2, 2)] |
def replace_oovs(word_list, vocab, oov_handling, vocab_vectors=None, all_vectors=None):
if (not check_for_oov(word_list, vocab)):
return word_list
for i in range(len(word_list)):
if (word_list[i] in vocab):
continue
if (oov_handling == 'wordnet'):
new_word = find_wordnet_substitute(word_list[i], vocab)
elif (oov_handling == 'wordvec'):
assert (len(vocab_vectors) > 0), 'Need to provide preloaded word vectors to use wordvec for OOV'
new_word = find_vector_substitute(word_list[i], vocab, vocab_vectors, all_vectors)
else:
print('invalid choice for oov: {}, returning original string'.format(oov_handling), file=sys.stderr)
return word_list
if new_word:
print('subbing {} instead of original {}'.format(new_word, word_list[i]), file=sys.stderr)
word_list[i] = new_word
else:
print("couldn't find a substitute for {}".format(word_list[i]), file=sys.stderr)
return word_list |
def test_banded_ode_solvers():
t_exact = np.linspace(0, 1.0, 5)
a_real = np.array([[(- 0.6), 0.1, 0.0, 0.0, 0.0], [0.2, (- 0.5), 0.9, 0.0, 0.0], [0.1, 0.1, (- 0.4), 0.1, 0.0], [0.0, 0.3, (- 0.1), (- 0.9), (- 0.3)], [0.0, 0.0, 0.1, 0.1, (- 0.7)]])
a_real_upper = np.triu(a_real)
a_real_lower = np.tril(a_real)
a_real_diag = np.triu(a_real_lower)
real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag]
real_solutions = []
for a in real_matrices:
y0 = np.arange(1, (a.shape[0] + 1))
y_exact = _analytical_solution(a, y0, t_exact)
real_solutions.append((y0, t_exact, y_exact))
def check_real(idx, solver, meth, use_jac, with_jac, banded):
a = real_matrices[idx]
(y0, t_exact, y_exact) = real_solutions[idx]
(t, y) = _solve_linear_sys(a, y0, tend=t_exact[(- 1)], dt=(t_exact[1] - t_exact[0]), solver=solver, method=meth, use_jac=use_jac, with_jacobian=with_jac, banded=banded)
assert_allclose(t, t_exact)
assert_allclose(y, y_exact)
for idx in range(len(real_matrices)):
p = [['vode', 'lsoda'], ['bdf', 'adams'], [False, True], [False, True], [False, True]]
for (solver, meth, use_jac, with_jac, banded) in itertools.product(*p):
check_real(idx, solver, meth, use_jac, with_jac, banded)
a_complex = (a_real - (0.5j * a_real))
a_complex_diag = np.diag(np.diag(a_complex))
complex_matrices = [a_complex, a_complex_diag]
complex_solutions = []
for a in complex_matrices:
y0 = (np.arange(1, (a.shape[0] + 1)) + 1j)
y_exact = _analytical_solution(a, y0, t_exact)
complex_solutions.append((y0, t_exact, y_exact))
def check_complex(idx, solver, meth, use_jac, with_jac, banded):
a = complex_matrices[idx]
(y0, t_exact, y_exact) = complex_solutions[idx]
(t, y) = _solve_linear_sys(a, y0, tend=t_exact[(- 1)], dt=(t_exact[1] - t_exact[0]), solver=solver, method=meth, use_jac=use_jac, with_jacobian=with_jac, banded=banded)
assert_allclose(t, t_exact)
assert_allclose(y, y_exact)
for idx in range(len(complex_matrices)):
p = [['bdf', 'adams'], [False, True], [False, True], [False, True]]
for (meth, use_jac, with_jac, banded) in itertools.product(*p):
check_complex(idx, 'zvode', meth, use_jac, with_jac, banded) |
class FlaxRobertaModelTester(unittest.TestCase):
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
config = RobertaConfig(vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range)
return (config, input_ids, token_type_ids, attention_mask)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, attention_mask) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return (config, inputs_dict) |
class Up(nn.Module):
def __init__(self, in_channels, chan_factor, bias=False):
super(Up, self).__init__()
self.bot = nn.Sequential(nn.Conv2d(in_channels, int((in_channels // chan_factor)), 1, stride=1, padding=0, bias=bias), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=bias))
def forward(self, x):
return self.bot(x) |
def is_array_param(p):
if ((param_kind(p) == IN_ARRAY) or (param_kind(p) == INOUT_ARRAY) or (param_kind(p) == OUT_ARRAY)):
return True
else:
return False |
_utils.test(arch=supported_archs_cgraph)
def test_arg_mismatched_ndim():
n = 4
def test(pos: ti.types.ndarray(ndim=1)):
for i in range(n):
pos[i] = 2.5
sym_pos = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'pos', ti.f32, ndim=2)
g_init = ti.graph.GraphBuilder()
with pytest.raises(TaichiCompilationError, match="doesn't match kernel's annotated ndim"):
g_init.dispatch(test, sym_pos) |
def test_light_tokenizer():
light_tokenizer = LightTokenizer()
default_tokenizer = DefaultTokenizer()
assert (light_tokenizer.tokenize(TEST_DOCUMENT) == TEST_TOKENS_SPLIT_BY_SPACE)
assert (default_tokenizer.tokenize(TEST_DOCUMENT) == TEST_TOKENS_BY_DEFAULT_TOKENIZER)
simple_tokenization_test_case: str = 'THis ,,iS a SiMPlE t-EsT cAsE'
assert (light_tokenizer.tokenize(simple_tokenization_test_case) == ['THis', ',,iS', 'a', 'SiMPlE', 't-EsT', 'cAsE'])
assert (default_tokenizer.tokenize(simple_tokenization_test_case) == ['this', 'is', 'a', 'simple', 't', 'est', 'case']) |
def jacobi_1d_shared(TSTEPS: dc.int64, A: dc.float64[N], B: dc.float64[N]):
for t in range(1, TSTEPS):
B[1:(- 1)] = (0.33333 * ((A[:(- 2)] + A[1:(- 1)]) + A[2:]))
A[1:(- 1)] = (0.33333 * ((B[:(- 2)] + B[1:(- 1)]) + B[2:])) |
def apply_weight_norm(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
weight_norm(m) |
class RealmTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = RealmTokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if ((normalizer_state.get('lowercase', do_lower_case) != do_lower_case) or (normalizer_state.get('strip_accents', strip_accents) != strip_accents) or (normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars)):
normalizer_class = getattr(normalizers, normalizer_state.pop('type'))
normalizer_state['lowercase'] = do_lower_case
normalizer_state['strip_accents'] = strip_accents
normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars
self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
self.do_lower_case = do_lower_case
def batch_encode_candidates(self, text, **kwargs):
kwargs['padding'] = PaddingStrategy.MAX_LENGTH
batch_text = text
batch_text_pair = kwargs.pop('text_pair', None)
return_tensors = kwargs.pop('return_tensors', None)
output_data = {'input_ids': [], 'attention_mask': [], 'token_type_ids': []}
for (idx, candidate_text) in enumerate(batch_text):
if (batch_text_pair is not None):
candidate_text_pair = batch_text_pair[idx]
else:
candidate_text_pair = None
encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs)
encoded_input_ids = encoded_candidates.get('input_ids')
encoded_attention_mask = encoded_candidates.get('attention_mask')
encoded_token_type_ids = encoded_candidates.get('token_type_ids')
if (encoded_input_ids is not None):
output_data['input_ids'].append(encoded_input_ids)
if (encoded_attention_mask is not None):
output_data['attention_mask'].append(encoded_attention_mask)
if (encoded_token_type_ids is not None):
output_data['token_type_ids'].append(encoded_token_type_ids)
output_data = {key: item for (key, item) in output_data.items() if (len(item) != 0)}
return BatchEncoding(output_data, tensor_type=return_tensors)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
if token_ids_1:
output += (token_ids_1 + [self.sep_token_id])
return output
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files) |
def _seg_58():
return [(93763, 'M', u''), (93764, 'M', u''), (93765, 'M', u''), (93766, 'M', u''), (93767, 'M', u''), (93768, 'M', u''), (93769, 'M', u''), (93770, 'M', u''), (93771, 'M', u''), (93772, 'M', u''), (93773, 'M', u''), (93774, 'M', u''), (93775, 'M', u''), (93776, 'M', u''), (93777, 'M', u''), (93778, 'M', u''), (93779, 'M', u''), (93780, 'M', u''), (93781, 'M', u''), (93782, 'M', u''), (93783, 'M', u''), (93784, 'M', u''), (93785, 'M', u''), (93786, 'M', u''), (93787, 'M', u''), (93788, 'M', u''), (93789, 'M', u''), (93790, 'M', u''), (93791, 'M', u''), (93792, 'V'), (93851, 'X'), (93952, 'V'), (94027, 'X'), (94031, 'V'), (94088, 'X'), (94095, 'V'), (94112, 'X'), (94176, 'V'), (94180, 'X'), (94208, 'V'), (100344, 'X'), (100352, 'V'), (101107, 'X'), (110592, 'V'), (110879, 'X'), (110928, 'V'), (110931, 'X'), (110948, 'V'), (110952, 'X'), (110960, 'V'), (111356, 'X'), (113664, 'V'), (113771, 'X'), (113776, 'V'), (113789, 'X'), (113792, 'V'), (113801, 'X'), (113808, 'V'), (113818, 'X'), (113820, 'V'), (113824, 'I'), (113828, 'X'), (118784, 'V'), (119030, 'X'), (119040, 'V'), (119079, 'X'), (119081, 'V'), (119134, 'M', u''), (119135, 'M', u''), (119136, 'M', u''), (119137, 'M', u''), (119138, 'M', u''), (119139, 'M', u''), (119140, 'M', u''), (119141, 'V'), (119155, 'X'), (119163, 'V'), (119227, 'M', u''), (119228, 'M', u''), (119229, 'M', u''), (119230, 'M', u''), (119231, 'M', u''), (119232, 'M', u''), (119233, 'V'), (119273, 'X'), (119296, 'V'), (119366, 'X'), (119520, 'V'), (119540, 'X'), (119552, 'V'), (119639, 'X'), (119648, 'V'), (119673, 'X'), (119808, 'M', u'a'), (119809, 'M', u'b'), (119810, 'M', u'c'), (119811, 'M', u'd'), (119812, 'M', u'e'), (119813, 'M', u'f'), (119814, 'M', u'g')] |
def worker(kwargs) -> Tuple[(Dict[(int, int)], float, float)]:
graph = Graph.from_state(kwargs.pop('graph'))
kwargs['graph'] = graph
meta_algorithm = kwargs.pop('meta_algorithm')
algorithm = kwargs['algorithm']
allocated_seconds = kwargs.pop('allocated_seconds')
objective = kwargs['objective']
(best_solution, edge_cut, worst_case) = (None, np.inf, np.inf)
nwf = kwargs.pop('node_weight_function')
ewf = kwargs.pop('edge_weight_function')
node_weights = dict()
edge_weights = dict()
params_per_node = dict(kwargs['params_per_node'])
for u in graph.nodes:
node_weights[u] = nwf(u)
params_per_node[u] = params_per_node.pop(u.id)
for o in u.out_edges:
edge_weights[(u, o)] = ewf(u, o)
kwargs['params_per_node'] = params_per_node
kwargs['node_weights'] = node_weights
kwargs['edge_weights'] = edge_weights
start = time.time()
steps = 0
while ((time.time() - start) < allocated_seconds):
seed = int.from_bytes(os.urandom(4), byteorder='little')
random.seed(seed)
if (meta_algorithm is META_ALGORITH.SINGLE_LEVEL):
(solution, solution_edge_cut, solution_worst_case) = single_level_partitioning(**kwargs)
else:
(solution, solution_edge_cut, solution_worst_case) = multilevel_partitioning(**kwargs)
if is_better_solution((solution_edge_cut, solution_worst_case), (edge_cut, worst_case), objective):
best_solution = solution
edge_cut = solution_edge_cut
worst_case = solution_worst_case
steps += 1
return (best_solution, edge_cut, worst_case) |
class EmbeddingsAlreadyPackagedAsTriplets(BaseTupleMiner):
def mine(self, embeddings, labels, ref_emb, ref_labels):
batch_size = embeddings.size(0)
a = torch.arange(0, batch_size, 3)
p = torch.arange(1, batch_size, 3)
n = torch.arange(2, batch_size, 3)
return (a, p, n) |
def default_str_type(env):
return {'bytes': bytes_type, 'bytearray': bytearray_type, 'str': str_type, 'unicode': unicode_type}.get(env.directives['c_string_type']) |
_module()
class ConcatDataset(_ConcatDataset):
def __init__(self, datasets: list):
super(ConcatDataset, self).__init__(datasets) |
_module()
class PointNet2Encoder(nn.Module):
def __init__(self, in_channels: int, radius: (List[float] or float), num_samples: (List[int] or int), aggr_args: dict, group_args: dict, conv_args: dict, norm_args: dict, act_args: dict, blocks: Optional[List]=None, mlps=None, width: Optional[int]=None, strides: List[int]=[4, 4, 4, 4], layers: int=3, width_scaling: int=2, radius_scaling: int=2, block_radius_scaling: int=1, nsample_scaling: int=1, sampler: str='fps', use_res=False, stem_conv=False, stem_aggr=False, double_last_channel=True, query_as_support=False, **kwargs):
super().__init__()
if kwargs:
logging.warning(f'kwargs: {kwargs} are not used in {__class__.__name__}')
stages = len(strides)
self.strides = strides
self.blocks = (blocks if (mlps is None) else [len(mlp) for mlp in mlps])
radius = self._to_full_list(radius, blocks=self.blocks, param_scaling=radius_scaling, block_param_scaling=block_radius_scaling)
num_samples = self._to_full_list(num_samples, blocks=self.blocks, param_scaling=nsample_scaling)
self.radius = radius
self.num_samples = num_samples
logging.info(f'radius is modified to {radius}')
logging.info(f'num_samples is modified to {num_samples}')
self.stem_conv = stem_conv
self.stem_aggr = stem_aggr
if stem_conv:
width = (width if (width is not None) else mlps[0][0][0])
self.conv1 = create_convblock1d(in_channels, width, norm_args=None, act_args=None)
if stem_aggr:
channels = ([width] * (layers + 1))
group_args.radius = radius[0][0]
group_args.nsample = num_samples[0][0]
self.stem = LocalAggregation(channels, aggr_args, conv_args, norm_args, act_args, group_args, use_res)
in_channels = width
if (mlps is None):
assert (width is not None)
assert (layers is not None)
assert (strides is not None)
mlps = []
for i in range(stages):
if (not double_last_channel):
mlps.append(([([width] * layers)] * self.blocks[i]))
width = ((width * width_scaling) if (strides[i] > 1) else width)
else:
mlps_temp = ([width] * (layers - 1))
width = ((width * width_scaling) if (strides[i] > 1) else width)
mlps_temp += [width]
mlps.append(([mlps_temp] + ([([width] * layers)] * (self.blocks[i] - 1))))
logging.info(f'channels is modified to {mlps}')
self.mlps = mlps
self.SA_modules = nn.ModuleList()
skip_channel_list = [in_channels]
for k in range(stages):
channel_list = mlps[k].copy()
channel_out = 0
for idx in range(channel_list.__len__()):
channel_list[idx] = ([in_channels] + channel_list[idx])
channel_out += channel_list[idx][(- 1)]
self.SA_modules.append(PointNetSAModuleMSG(stride=strides[k], radii=radius[k], nsamples=num_samples[k], channel_list=channel_list, aggr_args=aggr_args, group_args=group_args, conv_args=conv_args, norm_args=norm_args, act_args=act_args, sampler=sampler, use_res=use_res, query_as_support=query_as_support))
skip_channel_list.append(channel_out)
in_channels = channel_out
self.out_channels = channel_out
self.channel_list = skip_channel_list
def _to_full_list(self, param, blocks, param_scaling=1, block_param_scaling=1):
param_list = []
if isinstance(param, List):
for (i, value) in enumerate(param):
value = ([value] if (not isinstance(value, List)) else value)
if (len(value) != blocks[i]):
value += ([value[(- 1)]] * (blocks[i] - len(value)))
param_list.append(value)
else:
for (i, stride) in enumerate(self.strides):
if (stride == 1):
param_list.append(([param] * blocks[i]))
else:
param_list.append(([param] + ([(param * block_param_scaling)] * (blocks[i] - 1))))
param *= param_scaling
return param_list
def forward_cls_feat(self, xyz, features=None):
if hasattr(xyz, 'keys'):
(xyz, features) = (xyz['pos'], xyz['x'])
if (features is None):
features = xyz.clone().transpose(1, 2).contiguous()
if self.stem_conv:
features = self.conv1(features)
if self.stem_aggr:
features = self.stem(xyz, xyz, features)
for i in range(len(self.SA_modules)):
(xyz, features) = self.SA_modules[i](xyz, features)
return features.squeeze((- 1))
def forward_seg_feat(self, xyz, features=None):
if hasattr(xyz, 'keys'):
(xyz, features) = (xyz['pos'], xyz['x'])
if (features is None):
features = xyz.clone().transpose(1, 2).contiguous()
xyz = xyz.contiguous()
if self.stem_conv:
features = self.conv1(features)
if self.stem_aggr:
features = self.stem(xyz, xyz, features)
(l_xyz, l_features) = ([xyz], [features])
for i in range(len(self.SA_modules)):
(li_xyz, li_features) = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
return (l_xyz, l_features)
def forward(self, xyz, features=None):
if hasattr(xyz, 'keys'):
(xyz, features) = (xyz['pos'], xyz['x'])
return self.forward_seg_feat(xyz, features) |
def load_blender_data(basedir, half_res=False, testskip=1):
splits = ['train', 'val', 'test']
metas = {}
for s in splits:
with open(os.path.join(basedir, 'transforms_{}.json'.format(s)), 'r') as fp:
metas[s] = json.load(fp)
all_imgs = []
all_poses = []
counts = [0]
for s in splits:
meta = metas[s]
imgs = []
poses = []
if ((s == 'train') or (testskip == 0)):
skip = 1
else:
skip = testskip
for frame in meta['frames'][::skip]:
fname = os.path.join(basedir, (frame['file_path'] + '.png'))
imgs.append(imageio.imread(fname))
poses.append(np.array(frame['transform_matrix']))
imgs = (np.array(imgs) / 255.0).astype(np.float32)
poses = np.array(poses).astype(np.float32)
counts.append((counts[(- 1)] + imgs.shape[0]))
all_imgs.append(imgs)
all_poses.append(poses)
i_split = [np.arange(counts[i], counts[(i + 1)]) for i in range(3)]
imgs = np.concatenate(all_imgs, 0)
poses = np.concatenate(all_poses, 0)
(H, W) = imgs[0].shape[:2]
camera_angle_x = float(meta['camera_angle_x'])
focal = ((0.5 * W) / np.tan((0.5 * camera_angle_x)))
render_poses = torch.stack([pose_spherical(angle, (- 30.0), 4.0) for angle in np.linspace((- 180), 180, (40 + 1))[:(- 1)]], 0)
if half_res:
H = (H // 2)
W = (W // 2)
focal = (focal / 2.0)
imgs_half_res = np.zeros((imgs.shape[0], H, W, 4))
for (i, img) in enumerate(imgs):
imgs_half_res[i] = cv2.resize(img, (W, H), interpolation=cv2.INTER_AREA)
imgs = imgs_half_res
return (imgs, poses, render_poses, [H, W, focal], i_split) |
def prepare_paws_qqp():
(train_path, dev_path) = ('', '')
for mode in ['dev_and_test', 'train']:
os.makedirs('../paraphraser/data/processed_datasets/paws_qqp_{}'.format(mode), exist_ok=True)
save_path = '../paraphraser/data/processed_datasets/paws_qqp_{}/paws_qqp_{}.txt'.format(mode, mode)
if (mode == 'train'):
train_path = save_path
else:
dev_path = save_path
paws_qqp = pd.read_csv('/export/home/projects/pegasus/paws_qqp/output/{}.tsv'.format(mode), sep='\t')
paws_qqp = paws_qqp[(paws_qqp['label'] == 1)]
processed_paws_qqp = open(save_path, 'w')
for (a, b) in zip(paws_qqp['sentence1'], paws_qqp['sentence2']):
processed_paws_qqp.write((((a + '\t') + b) + '\n'))
processed_paws_qqp.close()
return (train_path, dev_path) |
class BaseColBERT(torch.nn.Module):
def __init__(self, name_or_path, colbert_config=None):
super().__init__()
self.colbert_config = ColBERTConfig.from_existing(ColBERTConfig.load_from_checkpoint(name_or_path), colbert_config)
self.name = (self.colbert_config.model_name or name_or_path)
try:
HF_ColBERT = class_factory(self.name)
except:
self.name = 'bert-base-uncased'
HF_ColBERT = class_factory(self.name)
self.model = HF_ColBERT.from_pretrained(name_or_path, colbert_config=self.colbert_config)
self.model.to(DEVICE)
self.raw_tokenizer = AutoTokenizer.from_pretrained(name_or_path)
self.eval()
def device(self):
return self.model.device
def bert(self):
return self.model.LM
def linear(self):
return self.model.linear
def score_scaler(self):
return self.model.score_scaler
def save(self, path):
assert (not path.endswith('.dnn')), f'{path}: We reserve *.dnn names for the deprecated checkpoint format.'
self.model.save_pretrained(path)
self.raw_tokenizer.save_pretrained(path)
self.colbert_config.save_for_checkpoint(path) |
_utils.test(arch=archs_support_ndarray_ad)
def test_ad_vector_arg():
N = 10
def compute_sum(a: ti.types.ndarray(), p: ti.types.ndarray(), z: ti.math.vec2):
for i in p:
p[i] = (a[i] * z[0])
a = ti.ndarray(ti.math.vec2, shape=N, needs_grad=True)
p = ti.ndarray(ti.math.vec2, shape=N, needs_grad=True)
z = ti.math.vec2([2.0, 3.0])
for i in range(N):
a[i] = [3, 3]
compute_sum(a, p, z)
for i in range(N):
assert (p[i] == [(a[i] * 2), (a[i] * 2)])
p.grad[i] = [1, 1]
compute_sum.grad(a, p, z)
for i in range(N):
for j in range(2):
assert (a.grad[i][j] == 2) |
class STL10(CIFAR10):
base_folder = 'stl10_binary'
url = '
filename = 'stl10_binary.tar.gz'
tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'
class_names_file = 'class_names.txt'
train_list = [['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'], ['train_y.bin', '5a34089d4802c674881badbb'], ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4']]
test_list = [['test_X.bin', '7f263ba9f9e0b06bf721ac82'], ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e']]
splits = ('train', 'train+unlabeled', 'unlabeled', 'test')
def __init__(self, root, split='train', transform=None, target_transform=None, download=False):
if (split not in self.splits):
raise ValueError('Split "{}" not found. Valid splits are: {}'.format(split, ', '.join(self.splits)))
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.split = split
if download:
self.download()
if (not self._check_integrity()):
raise RuntimeError('Dataset not found or corrupted. You can use download=True to download it')
if (self.split == 'train'):
(self.data, self.labels) = self.__loadfile(self.train_list[0][0], self.train_list[1][0])
elif (self.split == 'train+unlabeled'):
(self.data, self.labels) = self.__loadfile(self.train_list[0][0], self.train_list[1][0])
(unlabeled_data, _) = self.__loadfile(self.train_list[2][0])
self.data = np.concatenate((self.data, unlabeled_data))
self.labels = np.concatenate((self.labels, np.asarray(([(- 1)] * unlabeled_data.shape[0]))))
elif (self.split == 'unlabeled'):
(self.data, _) = self.__loadfile(self.train_list[2][0])
self.labels = np.asarray(([(- 1)] * self.data.shape[0]))
else:
(self.data, self.labels) = self.__loadfile(self.test_list[0][0], self.test_list[1][0])
class_file = os.path.join(self.root, self.base_folder, self.class_names_file)
if os.path.isfile(class_file):
with open(class_file) as f:
self.classes = f.read().splitlines()
def __getitem__(self, index):
if (self.labels is not None):
(img, target) = (self.data[index], int(self.labels[index]))
else:
(img, target) = (self.data[index], None)
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target)
def __len__(self):
return self.data.shape[0]
def __loadfile(self, data_file, labels_file=None):
labels = None
if labels_file:
path_to_labels = os.path.join(self.root, self.base_folder, labels_file)
with open(path_to_labels, 'rb') as f:
labels = (np.fromfile(f, dtype=np.uint8) - 1)
path_to_data = os.path.join(self.root, self.base_folder, data_file)
with open(path_to_data, 'rb') as f:
everything = np.fromfile(f, dtype=np.uint8)
images = np.reshape(everything, ((- 1), 3, 96, 96))
images = np.transpose(images, (0, 1, 3, 2))
return (images, labels)
def __repr__(self):
fmt_str = (('Dataset ' + self.__class__.__name__) + '\n')
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Split: {}\n'.format(self.split)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
return fmt_str |
def iterate_over_frames(frequency):
for (_, subject_id) in lps_subjects.items():
print(subject_id)
csv_path = ((frames_dir + subject_id) + '.csv')
print(csv_path)
subject_frames_df = pd.read_csv(csv_path, sep=',')
counter = 0
per_video_counter = 0
for row in subject_frames_df.iterrows():
if (counter == 0):
ims = []
if (counter >= 2):
if (old_vid_seq_name != vid_seq_name):
print('New clip!', counter, per_video_counter)
print(vid_seq_name)
per_video_counter = 0
old_vid_seq_name = vid_seq_name
counter_format = ('%06d' % (per_video_counter - 1))
frequency_counter = (per_video_counter % frequency)
if ((frequency_counter == 2) or (frequency_counter == 14)):
flow_output_path_stem = (((((output_root_dir + subject_id) + '/') + vid_seq_name) + '/flow_') + counter_format)
optical_flow.compute_optical_flow(ims, flow_output_path_stem, magnitude=True)
ims[0] = ims[1]
ims.pop()
frame_path = row[1]['path']
vid_seq_name = find_between(frame_path, (subject_id + '/'), '/frame')
im = process_image(('../' + frame_path), (width, height, channels), standardize=False, mean=None, std=None, normalize=False)
ims.append(im)
counter += 1
per_video_counter += 1
if (counter == 1):
old_vid_seq_name = vid_seq_name |
def rescale_img(img_in, new_size_in):
img_in = img_in.resize(new_size_in, resample=Image.BICUBIC)
return img_in |
class EmitSparseGemmInstance():
def __init__(self):
self.gemm_template = '\n // Gemm operator ${operation_name}\n using Operation_${operation_name} = cutlass::gemm::device::SparseGemm<\n ${element_a}, ${layout_a},\n ${element_b}, ${layout_b},\n ${element_c}, ${layout_c},\n ${element_accumulator},\n ${opcode_class},\n ${arch},\n cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,\n cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,\n cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,\n ${epilogue_functor}<\n ${element_c},\n ${epilogue_vector_length},\n ${element_accumulator},\n ${element_epilogue}\n >,\n ${swizzling_functor},\n ${stages},\n ${align_a},\n ${align_b},\n false,\n ${math_operation}\n ${residual}\n >;\n'
def emit(self, operation):
warp_shape = [(operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx]) for idx in range(3)]
epilogue_vector_length = int((min((operation.C.alignment * DataTypeSize[operation.C.element]), 128) / DataTypeSize[operation.C.element]))
residual = ''
values = {'operation_name': operation.procedural_name(), 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[operation.A.layout], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[operation.B.layout], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[operation.C.layout], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': ('cutlass::arch::Sm%d' % operation.arch), 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'warp_shape_m': str(warp_shape[0]), 'warp_shape_n': str(warp_shape[1]), 'warp_shape_k': str(warp_shape[2]), 'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]), 'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]), 'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]), 'epilogue_vector_length': str(epilogue_vector_length), 'element_epilogue': str(DataTypeTag[operation.element_epilogue]), 'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor], 'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor], 'stages': str(operation.tile_description.stages), 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment), 'transform_a': ComplexTransformTag[operation.A.complex_transform], 'transform_b': ComplexTransformTag[operation.B.complex_transform], 'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation], 'residual': residual}
template = self.gemm_template
return SubstituteTemplate(template, values) |
def Draw(im, mode=None):
try:
return im.getdraw(mode)
except AttributeError:
return ImageDraw(im, mode) |
def fixup(dir):
for (root, dirs, files) in os.walk(dir):
for f in files:
if f.endswith('.h'):
path = ('%s\\%s' % (root, f))
fix_hdr(path) |
_task('translation')
class TranslationTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner; however, valid and test data are always in the first directory to avoid the need for repeating them in all directories')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language')
parser.add_argument('--load-alignments', action='store_true', help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False, help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N', help='if >0, then bucket source and target lengths into N buckets and pad accordingly; this is useful on TPUs to minimize the number of compilations')
parser.add_argument('--eval-bleu', action='store_true', help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default='space', help='detokenize before computing BLEU (e.g., "moses"); required if using --eval-bleu; use "space" to disable detokenization; see fairseq.data.encoders for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON', help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False, help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const=' ', default=None, help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON', help='generation args for BLUE scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true', help='print sample generations during validation')
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def setup_task(cls, args, **kwargs):
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert (len(paths) > 0)
if ((args.source_lang is None) or (args.target_lang is None)):
(args.source_lang, args.target_lang) = data_utils.infer_language_pair(paths[0])
if ((args.source_lang is None) or (args.target_lang is None)):
raise Exception('Could not infer language pair, please provide it explicitly')
src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang)))
assert (src_dict.pad() == tgt_dict.pad())
assert (src_dict.eos() == tgt_dict.eos())
assert (src_dict.unk() == tgt_dict.unk())
logger.info('[{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
logger.info('[{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
paths = utils.split_paths(self.args.data)
assert (len(paths) > 0)
if (split != getattr(self.args, 'train_subset', None)):
paths = paths[:1]
data_path = paths[((epoch - 1) % len(paths))]
(src, tgt) = (self.args.source_lang, self.args.target_lang)
self.datasets[split] = load_langpair_dataset(data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, load_alignments=self.args.load_alignments, truncate_source=self.args.truncate_source, num_buckets=self.args.num_batch_buckets, shuffle=(split != 'test'), pad_to_multiple=self.args.required_seq_len_multiple)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary, tgt_dict=self.target_dictionary, constraints=constraints)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, 'eval_bleu', False):
assert (getattr(args, 'eval_bleu_detok', None) is not None), '--eval-bleu-detok is required if using --eval-bleu; try --eval-bleu-detok=moses (or --eval-bleu-detok=space to disable detokenization, e.g., when using sentencepiece)'
detok_args = json.loads((getattr(args, 'eval_bleu_detok_args', '{}') or '{}'))
self.tokenizer = encoders.build_tokenizer(Namespace(tokenizer=getattr(args, 'eval_bleu_detok', None), **detok_args))
gen_args = json.loads((getattr(args, 'eval_bleu_args', '{}') or '{}'))
self.sequence_generator = self.build_generator([model], Namespace(**gen_args))
return model
def valid_step(self, sample, model, criterion):
(loss, sample_size, logging_output) = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output['_bleu_sys_len'] = bleu.sys_len
logging_output['_bleu_ref_len'] = bleu.ref_len
assert (len(bleu.counts) == EVAL_BLEU_ORDER)
for i in range(EVAL_BLEU_ORDER):
logging_output[('_bleu_counts_' + str(i))] = bleu.counts[i]
logging_output[('_bleu_totals_' + str(i))] = bleu.totals[i]
return (loss, sample_size, logging_output)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum((log.get(key, 0) for log in logging_outputs))
(counts, totals) = ([], [])
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs(('_bleu_counts_' + str(i))))
totals.append(sum_logs(('_bleu_totals_' + str(i))))
if (max(totals) > 0):
metrics.log_scalar('_bleu_counts', np.array(counts))
metrics.log_scalar('_bleu_totals', np.array(totals))
metrics.log_scalar('_bleu_sys_len', sum_logs('_bleu_sys_len'))
metrics.log_scalar('_bleu_ref_len', sum_logs('_bleu_ref_len'))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if ('smooth_method' in fn_sig):
smooth = {'smooth_method': 'exp'}
else:
smooth = {'smooth': 'exp'}
bleu = sacrebleu.compute_bleu(correct=meters['_bleu_counts'].sum, total=meters['_bleu_totals'].sum, sys_len=meters['_bleu_sys_len'].sum, ref_len=meters['_bleu_ref_len'].sum, **smooth)
return round(bleu.score, 2)
metrics.log_derived('bleu', compute_bleu)
def max_positions(self):
return (self.args.max_source_positions, self.args.max_target_positions)
def source_dictionary(self):
return self.src_dict
def target_dictionary(self):
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(toks.int().cpu(), self.args.eval_bleu_remove_bpe, unk_string=('UNKNOWNTOKENINREF' if escape_unk else 'UNKNOWNTOKENINHYP'))
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
(hyps, refs) = ([], [])
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]['tokens']))
refs.append(decode(utils.strip_pad(sample['target'][i], self.tgt_dict.pad()), escape_unk=True))
if self.args.eval_bleu_print_samples:
logger.info(('example hypothesis: ' + hyps[0]))
logger.info(('example reference: ' + refs[0]))
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize='none')
else:
return sacrebleu.corpus_bleu(hyps, [refs]) |
def save_corpus_segments_to_file(output_filename, input_dict):
with open(output_filename, 'w') as file:
file.write('{\n')
for (key, value) in input_dict.items():
value = value.lstrip()
file.write('"{}": "{}",\n'.format(key, value))
file.write('}\n') |
class TestGeneral(unittest.TestCase):
def setUp(self):
self.task = generate_task(task_generator_id='general')
self.env = CausalWorld(task=self.task, enable_visualization=False)
self.env.reset()
return
def tearDown(self):
self.env.close()
return
def test_determinism(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
obs = self.env.reset()
observations_1.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
obs = self.env.reset()
observations_2.append(obs)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
if (not np.array_equal(observations_1[i], observations_2[i])):
print((np.array(observations_1[i]) - np.array(observations_2[i])))
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_interventions(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
new_goal = self.env.sample_new_goal()
self.env.set_starting_state(interventions_dict=new_goal)
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
for _ in range(10):
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2)
def test_determinism_w_in_episode_interventions(self):
observations_1 = []
rewards_1 = []
horizon = 100
actions = [self.env.action_space.sample() for _ in range(horizon)]
actions = np.array(actions)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_1.append(obs)
rewards_1.append(reward)
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
if (i == 50):
success_signal = self.env.do_intervention({'tool_0': {'cylindrical_position': [0, 0, 0.2]}})
observations_2 = []
rewards_2 = []
self.env.reset()
for i in range(horizon):
(obs, reward, done, info) = self.env.step(actions[i])
observations_2.append(obs)
rewards_2.append(reward)
assert np.array_equal(observations_1[i], observations_2[i])
assert (rewards_1 == rewards_2) |
class All2All(torch.autograd.Function):
def forward(ctx, xs, input_splits=None, output_splits=None):
ctx.input_splits = input_splits
ctx.output_splits = output_splits
ys = (torch.empty_like(xs) if (output_splits is None) else xs.new_empty(size=([sum(output_splits)] + list(xs.size()[1:]))))
torch.distributed.all_to_all_single(ys, xs, output_split_sizes=output_splits, input_split_sizes=input_splits)
return ys
def backward(ctx, grad_output):
result = (torch.empty_like(grad_output) if (ctx.input_splits is None) else grad_output.new_empty(size=([sum(ctx.input_splits)] + list(grad_output.size()[1:]))))
torch.distributed.all_to_all_single(result, grad_output, output_split_sizes=ctx.input_splits, input_split_sizes=ctx.output_splits)
return (result, None, None) |
class contdist5():
def __init__(self):
self.mode = 0
def pdf(self, x):
return (0.2 * (0.05 + (0.45 * (1 + np.sin(((2 * np.pi) * x))))))
def dpdf(self, x):
return (((0.2 * 0.45) * (2 * np.pi)) * np.cos(((2 * np.pi) * x)))
def cdf(self, x):
return (((x / 10.0) + 0.5) + ((0.09 / (2 * np.pi)) * (np.cos((10 * np.pi)) - np.cos(((2 * np.pi) * x)))))
def support(self):
return ((- 5), 5)
def __repr__(self):
return 'sin10' |
def test_close(model=None):
if (model is None):
model = SimpleModel()
state = build_initial_state(model)[0]
open_transition_vp = parse_transitions.OpenConstituent('VP')
assert open_transition_vp.is_legal(state, model)
state = open_transition_vp.apply(state, model)
assert (state.num_opens == 1)
shift = parse_transitions.Shift()
assert shift.is_legal(state, model)
state = shift.apply(state, model)
open_transition_np = parse_transitions.OpenConstituent('NP')
assert open_transition_np.is_legal(state, model)
state = open_transition_np.apply(state, model)
assert (state.num_opens == 2)
assert shift.is_legal(state, model)
state = shift.apply(state, model)
assert shift.is_legal(state, model)
state = shift.apply(state, model)
assert (not shift.is_legal(state, model))
assert (state.num_opens == 2)
close_transition = parse_transitions.CloseConstituent()
assert close_transition.is_legal(state, model)
state = close_transition.apply(state, model)
assert (state.num_opens == 1)
assert close_transition.is_legal(state, model)
state = close_transition.apply(state, model)
assert (state.num_opens == 0)
assert (not close_transition.is_legal(state, model))
tree = model.get_top_constituent(state.constituents)
assert (tree.label == 'VP')
assert (len(tree.children) == 2)
tree = tree.children[1]
assert (tree.label == 'NP')
assert (len(tree.children) == 2)
assert tree.children[0].is_preterminal()
assert tree.children[1].is_preterminal()
assert (tree.children[0].children[0].label == 'Mox')
assert (tree.children[1].children[0].label == 'Opal')
assert (len(state.constituents) == 2)
assert (state.all_transitions(model) == [open_transition_vp, shift, open_transition_np, shift, shift, close_transition, close_transition]) |
def sort_specializations(keystring):
ordering = ['bool', 'int8', 'int16', 'int32', 'int64', 'u8', 'uint8', 'u16', 'uint16', 'u32', 'uint32', 'u64', 'uint64', 'float16', 'float32', 'float64', 'float128', 'complex64', 'complex128', 'complex256']
elemsfound = []
keystring = keystring.lower()
while any(((element in keystring) for element in ordering)):
for (i, element) in enumerate(ordering):
if ((element in keystring) and (not (element.startswith('int') and (keystring[(keystring.find(element) - 1)] == 'u')))):
elemsfound.append((keystring.find(element), i))
keystring = keystring.replace(element, '', 1)
elemsfound.sort()
if (len(elemsfound) == 0):
return keystring
elif (len(elemsfound) == 1):
return (elemsfound[0][1], 0)
else:
return (elemsfound[0][1], elemsfound[1][1]) |
def _v(m1, m2, hue):
hue = (hue % 1.0)
if (hue < ONE_SIXTH):
return (m1 + (((m2 - m1) * hue) * 6.0))
if (hue < 0.5):
return m2
if (hue < TWO_THIRD):
return (m1 + (((m2 - m1) * (TWO_THIRD - hue)) * 6.0))
return m1 |
def line_stats(example):
line_lengths = [len(line) for line in example['content'].splitlines()]
return {'line_mean': np.mean(line_lengths), 'line_max': max(line_lengths)} |
def main(cfg):
(train_loader, train_loader_ca, train_loader_cb, val_loader_c, val_loader_b, num_query_c, num_query_b, num_classes) = make_data_loader(cfg, use_eraser=True)
model = build_model(num_classes, 'base', pretrain_choice=True)
model = (torch.nn.DataParallel(model).cuda() if torch.cuda.is_available() else model)
loss_func = make_loss()
optimizer = make_optimizer(cfg, model)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[40, 80], gamma=0.1)
if (cfg.train == 1):
start_epoch = 0
acc_best = 0.0
do_train(cfg, model, train_loader, val_loader_c, optimizer, scheduler, loss_func, num_query_c, start_epoch, acc_best)
else:
last_model_wts = torch.load(os.path.join('pre_feat', 'checkpoint_best_pre.pth'))
model.load_state_dict(last_model_wts['state_dict'])
(mAP, cmc1, cmc5, cmc10, cmc20) = inference_path(model, val_loader_c, num_query_c)
start_time = datetime.datetime.now()
start_time = ('%4d:%d:%d-%2d:%2d:%2d' % (start_time.year, start_time.month, start_time.day, start_time.hour, start_time.minute, start_time.second))
print('{} - Final: cmc1: {:.1%} cmc5: {:.1%} cmc10: {:.1%} cmc20: {:.1%} mAP: {:.1%}\n'.format(start_time, cmc1, cmc5, cmc10, cmc20, mAP)) |
_fusion('mfb')
class MFB(nn.Module):
def __init__(self, input_dims, output_dim, mm_dim=1200, factor=2, activ_input='relu', activ_output='relu', normalize=False, dropout_input=0.0, dropout_pre_norm=0.0, dropout_output=0.0):
super().__init__()
self.input_dims = input_dims
self.mm_dim = mm_dim
self.factor = factor
self.output_dim = output_dim
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_norm = dropout_pre_norm
self.dropout_output = dropout_output
self.linear0 = nn.Linear(input_dims[0], (mm_dim * factor))
self.linear1 = nn.Linear(input_dims[1], (mm_dim * factor))
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum((p.numel() for p in self.parameters() if p.requires_grad))
log_class_usage('Fusion', self.__class__)
def forward(self, x):
x0 = self.linear0(x[0])
x1 = self.linear1(x[1])
if self.activ_input:
x0 = getattr(F, self.activ_input)(x0)
x1 = getattr(F, self.activ_input)(x1)
if (self.dropout_input > 0):
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
z = (x0 * x1)
if (self.dropout_pre_norm > 0):
z = F.dropout(z, p=self.dropout_pre_norm, training=self.training)
z = z.view(z.size(0), self.mm_dim, self.factor)
z = z.sum(2)
if self.normalize:
z = (torch.sqrt(F.relu(z)) - torch.sqrt(F.relu((- z))))
z = F.normalize(z, p=2)
z = self.linear_out(z)
if self.activ_output:
z = getattr(F, self.activ_output)(z)
if (self.dropout_output > 0):
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z |
class KnowledgeDistillationLoss(nn.Module):
def __init__(self, reduction='mean', alpha=1.0):
super().__init__()
self.reduction = reduction
self.alpha = alpha
def forward(self, inputs, targets, gt, mask=None):
inputs = inputs.narrow(1, 0, targets.shape[1])
outputs = torch.log_softmax(inputs, dim=1)
labels = torch.softmax((targets * self.alpha), dim=1)
loss = (outputs * labels).mean(dim=1)
if (mask is not None):
loss = (loss * mask.float())
if (self.reduction == 'mean'):
outputs = (- torch.mean(loss))
elif (self.reduction == 'sum'):
outputs = (- torch.sum(loss))
else:
outputs = (- loss)
return outputs |
class ClassifierStudentLoss(object):
def __init__(self, student_model, base_loss, alpha=0.9):
self.student = student_model
self.base_loss = base_loss
self.alpha = alpha
def __call__(self, inputs, targets, teacher_logits, temp=None):
real_batch_size = targets.size(0)
student_logits = self.student(inputs)
hard_loss = F.cross_entropy(student_logits[:real_batch_size], targets)
temp = (torch.ones_like(student_logits) if (temp is None) else temp)
soft_loss = self.base_loss(teacher_logits, student_logits, temp)
loss = ((self.alpha * hard_loss) + ((1 - self.alpha) * soft_loss))
return (loss, student_logits) |
def check_fit_args_fix(distfn, arg, rvs):
with np.errstate(all='ignore'), suppress_warnings() as sup:
sup.filter(category=DeprecationWarning, message='.*frechet_')
sup.filter(category=RuntimeWarning, message='The shape parameter of the erlang')
vals = distfn.fit(rvs, floc=0)
vals2 = distfn.fit(rvs, fscale=1)
npt.assert_((len(vals) == (2 + len(arg))))
npt.assert_((vals[(- 2)] == 0))
npt.assert_((vals2[(- 1)] == 1))
npt.assert_((len(vals2) == (2 + len(arg))))
if (len(arg) > 0):
vals3 = distfn.fit(rvs, f0=arg[0])
npt.assert_((len(vals3) == (2 + len(arg))))
npt.assert_((vals3[0] == arg[0]))
if (len(arg) > 1):
vals4 = distfn.fit(rvs, f1=arg[1])
npt.assert_((len(vals4) == (2 + len(arg))))
npt.assert_((vals4[1] == arg[1]))
if (len(arg) > 2):
vals5 = distfn.fit(rvs, f2=arg[2])
npt.assert_((len(vals5) == (2 + len(arg))))
npt.assert_((vals5[2] == arg[2])) |
class Score(Operation):
operation_type: OperationType = OperationType.score
def __init__(self, num_samples: int=1, combined_scoring: bool=False, scoring_function: Callable[([Union[(List[Dict], Dict)]], Union[(List[float], float)])]=None) -> None:
super().__init__()
self.num_samples: int = num_samples
self.combined_scoring: bool = combined_scoring
self.thoughts: List[Thought] = []
self.scoring_function: Callable[([Union[(List[Dict], Dict)]], Union[(List[float], float)])] = scoring_function
def get_thoughts(self) -> List[Thought]:
return self.thoughts
def _execute(self, lm: AbstractLanguageModel, prompter: Prompter, parser: Parser, **kwargs) -> None:
previous_thoughts: List[Thought] = self.get_previous_thoughts()
assert (len(self.predecessors) > 0), 'Score operation needs at least one predecessor'
if self.combined_scoring:
previous_thoughts_states = [thought.state for thought in previous_thoughts]
if (self.scoring_function is not None):
self.logger.debug('Using scoring function %s to score states', self.scoring_function)
scores = self.scoring_function(previous_thoughts_states)
else:
prompt = prompter.score_prompt(previous_thoughts_states)
self.logger.debug('Prompt for LM: %s', prompt)
responses = lm.get_response_texts(lm.query(prompt, num_responses=self.num_samples))
self.logger.debug('Responses from LM: %s', responses)
scores = parser.parse_score_answer(previous_thoughts_states, responses)
for (thought, score) in zip(previous_thoughts, scores):
new_thought = Thought.from_thought(thought)
new_thought.score = score
self.thoughts.append(new_thought)
else:
for thought in previous_thoughts:
new_thought = Thought.from_thought(thought)
if (self.scoring_function is not None):
self.logger.debug('Using scoring function %s to score state', self.scoring_function)
score = self.scoring_function(thought.state)
else:
prompt = prompter.score_prompt([thought.state])
self.logger.debug('Prompt for LM: %s', prompt)
responses = lm.get_response_texts(lm.query(prompt, num_responses=self.num_samples))
self.logger.debug('Responses from LM: %s', responses)
score = parser.parse_score_answer([thought.state], responses)[0]
new_thought.score = score
self.thoughts.append(new_thought)
self.logger.info('Score operation %d scored %d thoughts', self.id, len(self.thoughts)) |
def AssionGroupU(n=None, names='u'):
return CubicBraidGroup(n=n, names=names, cbg_type=CubicBraidGroup.type.AssionU) |
class BatchPolopt(RLAlgorithm):
def __init__(self, env, policy, baseline, scope=None, n_itr=500, start_itr=0, batch_size=5000, max_path_length=500, discount=0.99, gae_lambda=1, plot=False, pause_for_plot=False, center_adv=True, positive_adv=False, store_paths=False, whole_paths=True, fixed_horizon=False, sampler_cls=None, sampler_args=None, force_batch_sampler=True, clip_reward=False, checkpoint_interval=5, **kwargs):
self.env = env
self.policy = policy
self.baseline = baseline
self.scope = scope
self.n_itr = n_itr
self.start_itr = start_itr
self.batch_size = batch_size
self.max_path_length = max_path_length
self.discount = discount
self.gae_lambda = gae_lambda
self.plot = plot
self.pause_for_plot = pause_for_plot
self.center_adv = center_adv
self.positive_adv = positive_adv
self.store_paths = store_paths
self.whole_paths = whole_paths
self.fixed_horizon = fixed_horizon
self.clip_reward = clip_reward
self.checkpoint_interval = checkpoint_interval
if (sampler_cls is None):
if (self.policy.vectorized and (not force_batch_sampler)):
sampler_cls = VectorizedSampler
else:
sampler_cls = BatchSampler
if (sampler_args is None):
sampler_args = dict()
self.sampler = sampler_cls(self, **sampler_args)
self.init_opt()
def start_worker(self):
self.sampler.start_worker()
def shutdown_worker(self):
self.sampler.shutdown_worker()
def obtain_samples(self, itr):
return self.sampler.obtain_samples(itr)
def process_samples(self, itr, paths):
return self.sampler.process_samples(itr, paths)
def train(self, sess=None):
created_session = (True if (sess is None) else False)
if (sess is None):
sess = tf.Session()
sess.__enter__()
global_step = tf.train.get_or_create_global_step()
global_step_inc = global_step.assign_add(1)
sess.run(tf.global_variables_initializer())
self.start_worker()
start_time = time.time()
total_timesteps = 0
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
with logger.prefix(('itr #%d | ' % itr)):
logger.log('Obtaining samples...')
with _MeasureTime('ObtainSamplesTime'):
paths = self.obtain_samples(itr)
logger.log('Processing samples...')
with _MeasureTime('ProcessPathsTime'):
self.process_paths(paths)
with _MeasureTime('ProcessSamplesTime'):
samples_data = self.process_samples(itr, paths)
timesteps = len(samples_data['observations'])
total_timesteps += timesteps
logger.log('Logging diagnostics...')
self.log_diagnostics(paths)
logger.log('Optimizing policy...')
with _MeasureTime('OptimizePolicyTime'):
self.optimize_policy(itr, samples_data)
logger.log('Saving snapshot...')
params = self.get_itr_snapshot(itr, samples_data)
if self.store_paths:
params['paths'] = samples_data['paths']
logger.save_itr_params(itr, params)
logger.log('Saved')
logger.record_tabular('Time', (time.time() - start_time))
logger.record_tabular('ItrTime', (time.time() - itr_start_time))
logger.record_tabular('Timesteps', timesteps)
logger.record_tabular('TotalTimesteps', total_timesteps)
logger.dump_tabular(with_prefix=False)
if self.plot:
rollout(self.env, self.policy, animated=True, max_path_length=self.max_path_length)
if self.pause_for_plot:
input('Plotting evaluation run: Press Enter to continue...')
sess.run(global_step_inc)
self.shutdown_worker()
if created_session:
sess.close()
def log_diagnostics(self, paths):
self.env.log_diagnostics(paths)
self.policy.log_diagnostics(paths)
self.baseline.log_diagnostics(paths)
def init_opt(self):
raise NotImplementedError
def get_itr_snapshot(self, itr, samples_data):
raise NotImplementedError
def optimize_policy(self, itr, samples_data):
raise NotImplementedError
def process_paths(self, paths):
for path in paths:
path['raw_rewards'] = np.copy(path['rewards'])
if self.clip_reward:
path['rewards'] = np.clip(path['raw_rewards'], (- 1), 1) |
def is_video_file(filename):
filename_lower = filename.lower()
return any((filename_lower.endswith(ext) for ext in VID_EXTENSIONS)) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x, fake_relu=False):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
if fake_relu:
return FakeReLU.apply(out)
return F.relu(out) |
def collate_fn_squeeze_pcd_batch_grasp(batch: List) -> Dict:
batch_data = {key: [d[key] for d in batch] for key in batch[0]}
for key in batch_data:
if torch.is_tensor(batch_data[key][0]):
batch_data[key] = torch.stack(batch_data[key])
(offset, count) = ([], 0)
for item in batch_data['pos']:
count += item.shape[0]
offset.append(count)
offset = torch.IntTensor(offset)
batch_data['offset'] = offset
batch_data['pos'] = rearrange(batch_data['pos'], 'b n c -> (b n) c')
return batch_data |
def make_main_state(sdfg):
state = sdfg.add_state('spmv')
a_row = state.add_array('A_row_device', ((rows + 1),), itype, transient=True, storage=StorageType.FPGA_Global)
row_to_val_out = state.add_stream('row_to_val', itype, transient=True, storage=StorageType.FPGA_Local)
row_to_col_out = state.add_stream('row_to_col', itype, transient=True, storage=StorageType.FPGA_Local)
row_to_x_out = state.add_stream('row_to_x', itype, transient=True, storage=StorageType.FPGA_Local)
row_to_compute_out = state.add_stream('row_to_compute', itype, transient=True, storage=StorageType.FPGA_Local)
read_row_sdfg = make_read_row()
read_row_tasklet = state.add_nested_sdfg(read_row_sdfg, sdfg, {'A_row_mem'}, {'to_val_pipe', 'to_col_pipe', 'to_x_pipe', 'to_compute_pipe'})
state.add_memlet_path(a_row, read_row_tasklet, memlet=dace.memlet.Memlet.simple(a_row, '0:rows+1'), dst_conn='A_row_mem')
state.add_memlet_path(read_row_tasklet, row_to_val_out, memlet=dace.memlet.Memlet.simple(row_to_val_out, '0', num_accesses=(- 1)), src_conn='to_val_pipe')
state.add_memlet_path(read_row_tasklet, row_to_col_out, memlet=dace.memlet.Memlet.simple(row_to_col_out, '0', num_accesses=(- 1)), src_conn='to_col_pipe')
state.add_memlet_path(read_row_tasklet, row_to_x_out, memlet=dace.memlet.Memlet.simple(row_to_x_out, '0', num_accesses=(- 1)), src_conn='to_x_pipe')
state.add_memlet_path(read_row_tasklet, row_to_compute_out, memlet=dace.memlet.Memlet.simple(row_to_compute_out, '0', num_accesses=(- 1)), src_conn='to_compute_pipe')
a_col = state.add_array('A_col_device', (nnz,), itype, transient=True, storage=StorageType.FPGA_Global)
row_to_col_in = state.add_stream('row_to_col', itype, transient=True, storage=StorageType.FPGA_Local)
col_to_x_out = state.add_stream('col_to_x', itype, transient=True, storage=StorageType.FPGA_Local)
read_col_sdfg = make_read_col()
read_col_tasklet = state.add_nested_sdfg(read_col_sdfg, sdfg, {'A_col_mem', 'row_pipe'}, {'col_pipe'})
state.add_memlet_path(a_col, read_col_tasklet, memlet=dace.memlet.Memlet.simple(a_col, '0:nnz'), dst_conn='A_col_mem')
state.add_memlet_path(row_to_col_in, read_col_tasklet, memlet=dace.memlet.Memlet.simple(row_to_col_in, '0', num_accesses=(- 1)), dst_conn='row_pipe')
state.add_memlet_path(read_col_tasklet, col_to_x_out, memlet=dace.memlet.Memlet.simple(col_to_x_out, '0', num_accesses=(- 1)), src_conn='col_pipe')
a_val = state.add_array('A_val_device', (nnz,), dtype, transient=True, storage=StorageType.FPGA_Global)
row_to_val_in = state.add_stream('row_to_val', itype, transient=True, storage=StorageType.FPGA_Local)
val_to_compute_out = state.add_stream('val_to_compute', dtype, transient=True, storage=StorageType.FPGA_Local)
read_val_sdfg = make_read_val()
read_val_tasklet = state.add_nested_sdfg(read_val_sdfg, sdfg, {'A_val_mem', 'row_pipe'}, {'compute_pipe'})
state.add_memlet_path(a_val, read_val_tasklet, dst_conn='A_val_mem', memlet=dace.memlet.Memlet.simple(a_val, '0:nnz'))
state.add_memlet_path(row_to_val_in, read_val_tasklet, dst_conn='row_pipe', memlet=dace.memlet.Memlet.simple(row_to_val_in, '0', num_accesses=(- 1)))
state.add_memlet_path(read_val_tasklet, val_to_compute_out, src_conn='compute_pipe', memlet=dace.memlet.Memlet.simple(val_to_compute_out, '0', num_accesses=(- 1)))
x = state.add_array('x_device', (cols,), dtype, transient=True, storage=StorageType.FPGA_Global)
row_to_x_in = state.add_stream('row_to_x', itype, transient=True, storage=StorageType.FPGA_Local)
col_to_x_in = state.add_stream('col_to_x', itype, transient=True, storage=StorageType.FPGA_Local)
x_to_compute_out = state.add_stream('x_to_compute', dtype, transient=True, storage=StorageType.FPGA_Local)
read_x_sdfg = make_read_x()
read_x_tasklet = state.add_nested_sdfg(read_x_sdfg, sdfg, {'x_mem', 'col_pipe', 'row_pipe'}, {'compute_pipe'})
state.add_memlet_path(x, read_x_tasklet, dst_conn='x_mem', memlet=dace.memlet.Memlet.simple(x, '0:cols'))
state.add_memlet_path(col_to_x_in, read_x_tasklet, dst_conn='col_pipe', memlet=dace.memlet.Memlet.simple(col_to_x_in, '0', num_accesses=(- 1)))
state.add_memlet_path(row_to_x_in, read_x_tasklet, dst_conn='row_pipe', memlet=dace.memlet.Memlet.simple(row_to_x_in, '0', num_accesses=(- 1)))
state.add_memlet_path(read_x_tasklet, x_to_compute_out, src_conn='compute_pipe', memlet=dace.memlet.Memlet.simple(x_to_compute_out, '0', num_accesses=(- 1)))
row_to_compute_in = state.add_stream('row_to_compute', itype, transient=True, storage=StorageType.FPGA_Local)
val_to_compute_in = state.add_stream('val_to_compute', dtype, transient=True, storage=StorageType.FPGA_Local)
x_to_compute_in = state.add_stream('x_to_compute', dtype, transient=True, storage=StorageType.FPGA_Local)
result_to_write_out = state.add_stream('result_to_write', dtype, transient=True, storage=StorageType.FPGA_Local)
compute_sdfg = make_compute_sdfg()
compute_tasklet = state.add_nested_sdfg(compute_sdfg, sdfg, {'row_pipe', 'a_pipe', 'x_pipe'}, {'b_pipe'})
state.add_memlet_path(row_to_compute_in, compute_tasklet, dst_conn='row_pipe', memlet=dace.memlet.Memlet.simple(row_to_compute_out, '0', num_accesses=(- 1)))
state.add_memlet_path(val_to_compute_in, compute_tasklet, dst_conn='a_pipe', memlet=dace.memlet.Memlet.simple(val_to_compute_in, '0', num_accesses=(- 1)))
state.add_memlet_path(x_to_compute_in, compute_tasklet, dst_conn='x_pipe', memlet=dace.memlet.Memlet.simple(x_to_compute_in, '0', num_accesses=(- 1)))
state.add_memlet_path(compute_tasklet, result_to_write_out, src_conn='b_pipe', memlet=dace.memlet.Memlet.simple(result_to_write_out, '0', num_accesses=(- 1)))
result_to_write_in = state.add_stream('result_to_write', dtype, transient=True, storage=StorageType.FPGA_Local)
b = state.add_array('b_device', (rows,), dtype, transient=True, storage=StorageType.FPGA_Global)
write_sdfg = make_write_sdfg()
write_tasklet = state.add_nested_sdfg(write_sdfg, sdfg, {'b_pipe'}, {'b_mem'})
state.add_memlet_path(result_to_write_in, write_tasklet, dst_conn='b_pipe', memlet=dace.memlet.Memlet.simple(result_to_write_in, '0', num_accesses=(- 1)))
state.add_memlet_path(write_tasklet, b, src_conn='b_mem', memlet=dace.memlet.Memlet.simple(b, '0:rows'))
return state |
def generate_matchpy_matcher(pattern_list):
matcher = matchpy.ManyToOneMatcher()
for pattern in pattern_list:
matcher.add(matchpy.Pattern(pattern))
return matcher |
('/predict', methods=['POST'])
def predict():
board = np.fromstring(request.form['board'], sep=',').reshape(g.getBoardSize())
use_alpha_zero = True
if use_alpha_zero:
action = np.argmax(mcts.getActionProb(board, temp=0))
else:
action = GreedyRandomPlayer(g).play(board)
resp = Response(str(action))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.