code stringlengths 281 23.7M |
|---|
class VerticalTabBar(QtWidgets.QTabBar):
def tabSizeHint(self, index: int) -> QtCore.QSize:
return super().tabSizeHint(index).transposed()
def paintEvent(self, event: QtGui.QPaintEvent) -> None:
painter = QtWidgets.QStylePainter(self)
opt = QtWidgets.QStyleOptionTab()
for i in range(self.count()):
self.initStyleOption(opt, i)
painter.drawControl(QtWidgets.QStyle.CE_TabBarTabShape, opt)
painter.save()
s = opt.rect.size()
s.transpose()
r = QtCore.QRect(QtCore.QPoint(), s)
r.moveCenter(opt.rect.center())
opt.rect = r
c = QtCore.QPoint(self.tabRect(i).center())
painter.translate(c)
painter.rotate(90)
painter.translate((- c))
painter.drawControl(QtWidgets.QStyle.CE_TabBarTabLabel, opt)
painter.restore() |
class VideoRecord(object):
def __init__(self, video, feature_dir, annot_dir, label_name, test_mode=False):
self.video = video
self.feature_dir = feature_dir
self.annot_dir = annot_dir
self.label_name = label_name
self.test_mode = test_mode
self.path_label = self.get_path_label()
def get_path_label(self):
frames = glob.glob(os.path.join(self.feature_dir, self.video, '*.npy'))
frames = sorted(frames, key=(lambda x: os.path.basename(x).split('.')[0]))
if (len(frames) == 0):
raise ValueError('number of frames of video {} should not be zero.'.format(self.video))
if ('_' in self.label_name):
self.label_name = self.label_name.split('_')
else:
self.label_name = [self.label_name]
annot_file = [os.path.join(self.annot_dir, 'train', ln, (self.video + '.txt')) for ln in self.label_name]
if ((not self.test_mode) and any([(not os.path.exists(file)) for file in annot_file])):
raise ValueError('Annotation file not found: the training mode should always has annotation file!')
if self.test_mode:
return [frames, np.array(([([(- 100)] * len(self.label_name))] * len(frames)))]
else:
total_labels = []
for file in annot_file:
f = open(file, 'r')
(corr_frames, labels) = ([], [])
for (i, x) in enumerate(f):
label = float(x)
corr_frame = os.path.join(self.feature_dir, self.video, '{0:05d}.npy'.format((i + 1)))
if os.path.exists(corr_frame):
corr_frames.append(corr_frame)
labels.append(label)
else:
continue
f.close()
total_labels.append(labels)
assert (len(corr_frames) == len(labels))
total_labels = np.asarray(total_labels)
total_labels = total_labels.transpose(1, 0)
return [corr_frames, total_labels]
def __str__(self):
string = ''
for (key, record) in self.utterance_dict.items():
string += (str(record) + '\n')
return string |
def minimize(X, f, args, maxnumlinesearch=None, maxnumfuneval=None, red=1.0, verbose=False):
INT = 0.1
EXT = 3.0
MAX = 20
RATIO = 10
SIG = 0.1
RHO = (SIG / 2)
SMALL = (10.0 ** (- 16))
if (maxnumlinesearch == None):
if (maxnumfuneval == None):
raise 'Specify maxnumlinesearch or maxnumfuneval'
else:
S = 'Function evaluation'
length = maxnumfuneval
elif (maxnumfuneval != None):
raise 'Specify either maxnumlinesearch or maxnumfuneval (not both)'
else:
S = 'Linesearch'
length = maxnumlinesearch
i = 0
ls_failed = 0
(f0, df0) = f(X, *args)
fX = [f0]
i = (i + (length < 0))
s = (- df0)
d0 = (- dot(s, s))
x3 = (red / (1.0 - d0))
while (i < abs(length)):
i = (i + (length > 0))
X0 = X
F0 = f0
dF0 = df0
if (length > 0):
M = MAX
else:
M = min(MAX, ((- length) - i))
while 1:
x2 = 0
f2 = f0
d2 = d0
f3 = f0
df3 = df0
success = 0
while ((not success) and (M > 0)):
try:
M = (M - 1)
i = (i + (length < 0))
(f3, df3) = f((X + (x3 * s)), *args)
if (isnan(f3) or isinf(f3) or any((isnan(df3) + isinf(df3)))):
print('error')
return
success = 1
except:
x3 = ((x2 + x3) / 2)
if (f3 < F0):
X0 = (X + (x3 * s))
F0 = f3
dF0 = df3
d3 = dot(df3, s)
if ((d3 > (SIG * d0)) or (f3 > (f0 + ((x3 * RHO) * d0))) or (M == 0)):
break
x1 = x2
f1 = f2
d1 = d2
x2 = x3
f2 = f3
d2 = d3
A = ((6 * (f1 - f2)) + ((3 * (d2 + d1)) * (x2 - x1)))
B = ((3 * (f2 - f1)) - (((2 * d1) + d2) * (x2 - x1)))
Z = (B + sqrt(complex(((B * B) - ((A * d1) * (x2 - x1))))))
if (Z != 0.0):
x3 = (x1 - ((d1 * ((x2 - x1) ** 2)) / Z))
else:
x3 = inf
if ((not isreal(x3)) or isnan(x3) or isinf(x3) or (x3 < 0)):
x3 = (x2 * EXT)
elif (x3 > (x2 * EXT)):
x3 = (x2 * EXT)
elif (x3 < (x2 + (INT * (x2 - x1)))):
x3 = (x2 + (INT * (x2 - x1)))
x3 = real(x3)
while (((abs(d3) > ((- SIG) * d0)) or (f3 > (f0 + ((x3 * RHO) * d0)))) and (M > 0)):
if ((d3 > 0) or (f3 > (f0 + ((x3 * RHO) * d0)))):
x4 = x3
f4 = f3
d4 = d3
else:
x2 = x3
f2 = f3
d2 = d3
if (f4 > f0):
x3 = (x2 - (((0.5 * d2) * ((x4 - x2) ** 2)) / ((f4 - f2) - (d2 * (x4 - x2)))))
else:
A = (((6 * (f2 - f4)) / (x4 - x2)) + (3 * (d4 + d2)))
B = ((3 * (f4 - f2)) - (((2 * d2) + d4) * (x4 - x2)))
if (A != 0):
x3 = (x2 + ((sqrt(((B * B) - ((A * d2) * ((x4 - x2) ** 2)))) - B) / A))
else:
x3 = inf
if (isnan(x3) or isinf(x3)):
x3 = ((x2 + x4) / 2)
x3 = max(min(x3, (x4 - (INT * (x4 - x2)))), (x2 + (INT * (x4 - x2))))
(f3, df3) = f((X + (x3 * s)), *args)
if (f3 < F0):
X0 = (X + (x3 * s))
F0 = f3
dF0 = df3
M = (M - 1)
i = (i + (length < 0))
d3 = dot(df3, s)
if ((abs(d3) < ((- SIG) * d0)) and (f3 < (f0 + ((x3 * RHO) * d0)))):
X = (X + (x3 * s))
f0 = f3
fX.append(f0)
if verbose:
print(('%s %6i; Value %4.6e\r' % (S, i, f0)))
s = ((((dot(df3, df3) - dot(df0, df3)) / dot(df0, df0)) * s) - df3)
df0 = df3
d3 = d0
d0 = dot(df0, s)
if (d0 > 0):
s = (- df0)
d0 = (- dot(s, s))
x3 = (x3 * min(RATIO, (d3 / (d0 - SMALL))))
ls_failed = 0
else:
X = X0
f0 = F0
df0 = dF0
if (ls_failed or (i > abs(length))):
break
s = (- df0)
d0 = (- dot(s, s))
x3 = (1 / (1 - d0))
ls_failed = 1
if verbose:
print('\n')
return (X, fX, i) |
class EESP(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, k=4, r_lim=7, down_method='esp', norm_layer=nn.BatchNorm2d):
super(EESP, self).__init__()
self.stride = stride
n = int((out_channels / k))
n1 = (out_channels - ((k - 1) * n))
assert (down_method in ['avg', 'esp']), 'One of these is suppported (avg or esp)'
assert (n == n1), 'n(={}) and n1(={}) should be equal for Depth-wise Convolution '.format(n, n1)
self.proj_1x1 = _ConvBNPReLU(in_channels, n, 1, stride=1, groups=k, norm_layer=norm_layer)
map_receptive_ksize = {3: 1, 5: 2, 7: 3, 9: 4, 11: 5, 13: 6, 15: 7, 17: 8}
self.k_sizes = list()
for i in range(k):
ksize = int((3 + (2 * i)))
ksize = (ksize if (ksize <= r_lim) else 3)
self.k_sizes.append(ksize)
self.k_sizes.sort()
self.spp_dw = nn.ModuleList()
for i in range(k):
dilation = map_receptive_ksize[self.k_sizes[i]]
self.spp_dw.append(nn.Conv2d(n, n, 3, stride, dilation, dilation=dilation, groups=n, bias=False))
self.conv_1x1_exp = _ConvBN(out_channels, out_channels, 1, 1, groups=k, norm_layer=norm_layer)
self.br_after_cat = _BNPReLU(out_channels, norm_layer)
self.module_act = nn.PReLU(out_channels)
self.downAvg = (True if (down_method == 'avg') else False)
def forward(self, x):
output1 = self.proj_1x1(x)
output = [self.spp_dw[0](output1)]
for k in range(1, len(self.spp_dw)):
out_k = self.spp_dw[k](output1)
out_k = (out_k + output[(k - 1)])
output.append(out_k)
expanded = self.conv_1x1_exp(self.br_after_cat(torch.cat(output, 1)))
del output
if ((self.stride == 2) and self.downAvg):
return expanded
if (expanded.size() == x.size()):
expanded = (expanded + x)
return self.module_act(expanded) |
_settings(GUEST_ENABLED=True, GUEST_LIST=['bruce_wayne'])
class TestDefaultGuest(EvenniaTest):
ip = '212.216.134.22'
_settings(GUEST_ENABLED=False)
def test_create_not_enabled(self):
(account, errors) = DefaultGuest.authenticate(ip=self.ip)
self.assertFalse(account, 'Guest account was created despite being disabled.')
def test_authenticate(self):
(account, errors) = DefaultGuest.authenticate(ip=self.ip)
self.assertTrue(account, 'Guest account should have been created.')
(account, errors) = DefaultGuest.authenticate(ip=self.ip)
self.assertFalse(account, 'Two guest accounts were created with a single entry on the guest list!')
('evennia.accounts.accounts.ChannelDB.objects.get_channel')
def test_create(self, get_channel):
get_channel.connect = MagicMock(return_value=True)
(account, errors) = DefaultGuest.create()
self.assertTrue(account, 'Guest account should have been created.')
self.assertFalse(errors)
def test_at_post_login(self):
self.account.db._last_puppet = self.char1
self.account.at_post_login(self.session)
self.account.at_post_login()
def test_at_server_shutdown(self):
(account, errors) = DefaultGuest.create(ip=self.ip)
self.char1.delete = MagicMock()
account.db._playable_characters = [self.char1]
account.at_server_shutdown()
self.char1.delete.assert_called()
def test_at_post_disconnect(self):
(account, errors) = DefaultGuest.create(ip=self.ip)
self.char1.delete = MagicMock()
account.db._playable_characters = [self.char1]
account.at_post_disconnect()
self.char1.delete.assert_called() |
class ResConvBlock(nn.Module):
def __init__(self, in_c, out_c, btn_c, kernel_size, stride, act='silu', reparam=False, block_type='k1kx'):
super(ResConvBlock, self).__init__()
self.stride = stride
if (block_type == 'k1kx'):
self.conv1 = ConvKXBN(in_c, btn_c, kernel_size=1, stride=1)
else:
self.conv1 = ConvKXBN(in_c, btn_c, kernel_size=kernel_size, stride=1)
if (not reparam):
self.conv2 = ConvKXBN(btn_c, out_c, kernel_size, stride)
else:
self.conv2 = RepConv(btn_c, out_c, kernel_size, stride, act='identity')
self.activation_function = get_activation(act)
if ((in_c != out_c) and (stride != 2)):
self.residual_proj = ConvKXBN(in_c, out_c, kernel_size=1, stride=1)
else:
self.residual_proj = None
def forward(self, x):
if (self.residual_proj is not None):
reslink = self.residual_proj(x)
else:
reslink = x
x = self.conv1(x)
x = self.activation_function(x)
x = self.conv2(x)
if (self.stride != 2):
x = (x + reslink)
x = self.activation_function(x)
return x |
def convert_to_detectron2_names(layer_keys):
output_keys = []
for k in layer_keys:
k = k.replace('_feature_blocks.conv1.', 'stem.conv1.')
k = k.replace('_feature_blocks.bn1.', 'stem.conv1.norm.')
k = k.replace('_feature_blocks.layer1.', 'res2.')
k = k.replace('_feature_blocks.layer2.', 'res3.')
k = k.replace('_feature_blocks.layer3.', 'res4.')
k = k.replace('_feature_blocks.layer4.', 'res5.')
k = k.replace('.downsample.0.', '.shortcut.')
k = k.replace('.downsample.1.', '.shortcut.norm.')
k = k.replace('.bn1.', '.conv1.bn.')
k = k.replace('.bn2.', '.conv2.bn.')
k = k.replace('.bn3.', '.conv3.bn.')
k = re.sub('bn\\.bias$', 'norm.bias', k)
k = re.sub('bn\\.weight$', 'norm.weight', k)
k = re.sub('bn\\.running_mean$', 'norm.running_mean', k)
k = re.sub('bn\\.running_var$', 'norm.running_var', k)
output_keys.append(k)
assert (len(output_keys) == len(set(output_keys))), 'Error in converting layer names'
return output_keys |
class MNIST_Net(nn.Module):
def __init__(self, N=10):
super(MNIST_Net, self).__init__()
self.encoder = nn.Sequential(nn.Conv2d(1, 6, 5), nn.MaxPool2d(2, 2), nn.ReLU(True), nn.Conv2d(6, 16, 5), nn.MaxPool2d(2, 2), nn.ReLU(True))
self.classifier = nn.Sequential(nn.Linear(((16 * 4) * 4), 120), nn.ReLU(), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, N))
def forward(self, x):
x = self.encoder(x)
x = x.view((- 1), ((16 * 4) * 4))
x = self.classifier(x)
return x |
_transform('imagenet_no_augment')
class ImagenetNoAugmentTransform(ClassyTransform):
def __init__(self, resize: int=ImagenetConstants.RESIZE, crop_size: int=ImagenetConstants.CROP_SIZE, mean: List[float]=ImagenetConstants.MEAN, std: List[float]=ImagenetConstants.STD):
self.transform = transforms.Compose([transforms.Resize(resize), transforms.CenterCrop(crop_size), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
def __call__(self, img):
return self.transform(img) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='variationally_sparse_gp', nargs='?', type=str)
parser.add_argument('--dataset', default='boston', nargs='?', type=str)
parser.add_argument('--split', default=0, nargs='?', type=int)
parser.add_argument('--seed', default=0, nargs='?', type=int)
parser.add_argument('--iterations', default=10, nargs='?', type=int)
parser.add_argument('--num_initial_points', default=3, nargs='?', type=int)
return parser.parse_args() |
def voting_test(args):
logger = logging.getLogger(__name__)
logger.info(('Working path: %s' % str(os.getcwd())))
logger.info(('random seed is set to %s ...' % str(args.seed)))
logger.info(('Load %s dataset ...' % args.dataset))
DATA_PATH = hydra.utils.to_absolute_path(args.dataset_dir)
if (args.dataset == 'ModelNet40'):
test_loader = DataLoader(ModelNet40(DATA_PATH, partition='test', num_points=args.num_points), num_workers=8, batch_size=args.test_batch_size, shuffle=False, drop_last=False)
elif (args.dataset == 'ScanObjectNN'):
test_loader = DataLoader(ScanObjectNN(DATA_PATH, partition='test', num_points=args.num_points), num_workers=8, batch_size=args.test_batch_size, shuffle=False, drop_last=False)
else:
raise NotImplementedError
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
logger.info(('Using GPUs : %s' % str(args.gpu)))
logger.info(('Load %s model ...' % args.model_name))
model = PointConT_cls(args).cuda()
logger.info('Loading pretrained model...')
checkpoint = torch.load('model.pth')
model.load_state_dict(checkpoint['model_state_dict'])
model = model.eval()
best_acc = 0
for i in range(args.num_repeat):
test_true = []
test_pred = []
for (data, label) in tqdm(test_loader):
pred = 0
for v in range(args.num_vote):
new_data = data
new_label = label
r = np.random.rand(1)
if ((args.beta > 0) and (r < args.rsmix_prob)):
new_data = new_data.cpu().numpy()
(new_data, lam, new_label, label_b) = rsmix_provider.rsmix(new_data, new_label, beta=args.beta, n_sample=args.rsmix_nsample)
new_data = torch.FloatTensor(new_data)
new_data = new_data.cuda()
with torch.no_grad():
pred += F.softmax(model(new_data), dim=1)
pred /= args.num_vote
label = label.cuda().squeeze()
label = label.view((- 1))
pred_choice = pred.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(pred_choice.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
test_acc_avg = metrics.balanced_accuracy_score(test_true, test_pred)
if (test_acc > best_acc):
best_acc = test_acc
best_acc_avg = test_acc_avg
logger.info(('Voting %d, test acc: %.6f, test avg acc: %.6f,' % ((i + 1), (test_acc * 100), (test_acc_avg * 100))))
logger.info(('best acc: %.6f, best avg acc: %.6f,' % ((best_acc * 100), (best_acc_avg * 100))))
logger.info(('Final voting result test acc: %.6f, test avg acc: %.6f,' % ((best_acc * 100), (best_acc_avg * 100)))) |
class F27_RepoData(F21_RepoData):
removedKeywords = F21_RepoData.removedKeywords
removedAttrs = F21_RepoData.removedAttrs
def __init__(self, *args, **kwargs):
F21_RepoData.__init__(self, *args, **kwargs)
self.metalink = kwargs.get('metalink', False)
def _getArgsAsStr(self):
retval = F21_RepoData._getArgsAsStr(self)
if self.metalink:
retval += (' --metalink=%s' % self.metalink)
return retval |
class PBEnc(object):
def _update_vids(cls, cnf, inp, vpool):
(top, vmap) = (max((inp + [vpool.top])), {})
inp = set([abs(l) for l in inp])
while (top < cnf.nv):
top += 1
if (top in inp):
vmap[top] = top
continue
vpool.top += 1
while (vpool._occupied and (vpool.top >= vpool._occupied[0][0])):
if (vpool.top <= (vpool._occupied[0][1] + 1)):
vpool.top = (vpool._occupied[0][1] + 1)
vpool._occupied.pop(0)
vmap[top] = vpool.top
for cl in cnf.clauses:
cl[:] = map((lambda l: (int(math.copysign(vmap[abs(l)], l)) if (abs(l) in vmap) else l)), cl)
cnf.nv = vpool.top
def _encode(cls, lits, weights=None, bound=1, top_id=None, vpool=None, encoding=EncType.best, comparator='<'):
assert pblib_present, "Package 'pypblib' is unavailable. Check your installation."
if ((encoding < 0) or (encoding > 5)):
raise NoSuchEncodingError(encoding)
assert lits, 'No literals are provided.'
assert ((not top_id) or (not vpool)), 'Use either a top id or a pool of variables but not both.'
if weights:
assert (len(lits) == len(weights)), 'Same number of literals and weights is expected.'
wlits = [pblib.WeightedLit(l, w) for (l, w) in zip(lits, weights)]
elif all(map((lambda lw: ((type(lw) in (list, tuple)) and (len(lw) == 2))), lits)):
wlits = [pblib.WeightedLit(*wl) for wl in lits]
lits = zip(*lits)[0]
elif all(map((lambda l: (type(l) is int)), lits)):
wlits = [pblib.WeightedLit(l, 1) for l in lits]
else:
assert 0, 'Incorrect literals given.'
if vpool:
top_id = vpool.top
if (not top_id):
top_id = max(map((lambda x: abs(x)), lits))
constr = pblib.PBConstraint(wlits, EncType._to_pbcmp[comparator], bound)
varmgr = pblib.AuxVarManager((top_id + 1))
config = pblib.PBConfig()
config.set_PB_Encoder(EncType._to_pbenc[encoding])
result = pblib.VectorClauseDatabase(config)
pb2cnf = pblib.Pb2cnf(config)
pb2cnf.encode(constr, result, varmgr)
ret = CNF(from_clauses=result.get_clauses())
ret.nv = max(ret.nv, top_id)
if vpool:
if (vpool._occupied and (vpool.top <= vpool._occupied[0][0] <= ret.nv)):
cls._update_vids(ret, vpool, lits)
else:
vpool.top = (ret.nv - 1)
vpool._next()
return ret
def leq(cls, lits, weights=None, bound=1, top_id=None, vpool=None, encoding=EncType.best):
return cls._encode(lits, weights=weights, bound=bound, top_id=top_id, vpool=vpool, encoding=encoding, comparator='<')
def atmost(cls, lits, weights=None, bound=1, top_id=None, vpool=None, encoding=EncType.best):
return cls.leq(lits, weights=weights, bound=bound, top_id=top_id, vpool=vpool, encoding=encoding)
def geq(cls, lits, weights=None, bound=1, top_id=None, vpool=None, encoding=EncType.best):
return cls._encode(lits, weights=weights, bound=bound, top_id=top_id, vpool=vpool, encoding=encoding, comparator='>')
def atleast(cls, lits, weights=None, bound=1, top_id=None, vpool=None, encoding=EncType.best):
return cls.geq(lits, weights=weights, bound=bound, top_id=top_id, vpool=vpool, encoding=encoding)
def equals(cls, lits, weights=None, bound=1, top_id=None, vpool=None, encoding=EncType.best):
return cls._encode(lits, weights=weights, bound=bound, top_id=top_id, vpool=vpool, encoding=encoding, comparator='=') |
class InventoryTestCase(CommonAPIRequestTools, unittest.TestCase):
api_class = mws.Inventory
def test_list_inventory_supply(self):
now = datetime.datetime.utcnow()
skus = ['thing1', 'thing2']
response_group = 'Detailed'
params = self.api.list_inventory_supply(skus, now, response_group=response_group)
self.assert_common_params(params, action='ListInventorySupply')
self.assertEqual(params['QueryStartDateTime'], clean_date(now))
self.assertEqual(params['ResponseGroup'], 'Detailed')
self.assertEqual(params['SellerSkus.member.1'], 'thing1')
self.assertEqual(params['SellerSkus.member.2'], 'thing2')
def test_list_inventory_supply_by_next_token(self):
next_token = 'token_foobar'
params = self.api.list_inventory_supply(next_token=next_token)
self.assert_common_params(params, action='ListInventorySupplyByNextToken')
self.assertEqual(params['NextToken'], next_token)
def test_list_inventory_supply_by_next_token_alias(self):
next_token = 'token_foobar'
params = self.api.list_inventory_supply_by_next_token(next_token)
self.assert_common_params(params, action='ListInventorySupplyByNextToken')
self.assertEqual(params['NextToken'], next_token) |
def print_presence_view(chain_state: Any, translator: Optional[Translator]=None) -> None:
if (translator is None):
trans = (lambda s: s)
else:
trans = translator.translate
def network_state_to_color(network_state: NetworkState) -> Optional[str]:
if (network_state == NetworkState.REACHABLE):
return 'green'
if (network_state == NetworkState.UNREACHABLE):
return 'red'
if (network_state == NetworkState.UNKNOWN):
return 'white'
return None
click.secho('Presence:', nl=False, fg='white')
for (k, v) in chain_state.nodeaddresses_to_networkstates.items():
click.secho(f' {trans(pex(k))}', fg=network_state_to_color(v), nl=False)
click.echo('', nl=True) |
class TestUnaryOperators(TestCase):
def test_unary_operator(self):
a = pybamm.Symbol('a', domain=['test'])
un = pybamm.UnaryOperator('unary test', a)
self.assertEqual(un.children[0].name, a.name)
self.assertEqual(un.domain, a.domain)
a = pybamm.InputParameter('a')
absval = pybamm.AbsoluteValue((- a))
self.assertEqual(absval.evaluate(inputs={'a': 10}), 10)
def test_negation(self):
a = pybamm.Symbol('a')
nega = pybamm.Negate(a)
self.assertEqual(nega.name, '-')
self.assertEqual(nega.children[0].name, a.name)
b = pybamm.Scalar(4)
negb = pybamm.Negate(b)
self.assertEqual(negb.evaluate(), (- 4))
broad_a = pybamm.PrimaryBroadcast(a, 'test')
neg_broad = (- broad_a)
self.assertEqual(neg_broad, pybamm.PrimaryBroadcast(nega, 'test'))
broad_a = pybamm.FullBroadcast(a, 'test', 'test2')
neg_broad = (- broad_a)
self.assertEqual(neg_broad, pybamm.FullBroadcast(nega, 'test', 'test2'))
broad_a = pybamm.PrimaryBroadcast(pybamm.PrimaryBroadcast(a, 'test'), 'test2')
neg_broad = (- broad_a)
self.assertEqual(neg_broad, pybamm.PrimaryBroadcast(pybamm.PrimaryBroadcast(nega, 'test'), 'test2'))
input_json = {'name': '-', 'id': mock.ANY, 'domains': {'primary': [], 'secondary': [], 'tertiary': [], 'quaternary': []}, 'children': [a]}
self.assertEqual(pybamm.Negate._from_json(input_json), nega)
def test_absolute(self):
a = pybamm.Symbol('a')
absa = pybamm.AbsoluteValue(a)
self.assertEqual(absa.name, 'abs')
self.assertEqual(absa.children[0].name, a.name)
b = pybamm.Scalar((- 4))
absb = pybamm.AbsoluteValue(b)
self.assertEqual(absb.evaluate(), 4)
broad_a = pybamm.PrimaryBroadcast(a, 'test')
abs_broad = abs(broad_a)
self.assertEqual(abs_broad, pybamm.PrimaryBroadcast(absa, 'test'))
broad_a = pybamm.FullBroadcast(a, 'test', 'test2')
abs_broad = abs(broad_a)
self.assertEqual(abs_broad, pybamm.FullBroadcast(absa, 'test', 'test2'))
broad_a = pybamm.PrimaryBroadcast(pybamm.PrimaryBroadcast(a, 'test'), 'test2')
abs_broad = abs(broad_a)
self.assertEqual(abs_broad, pybamm.PrimaryBroadcast(pybamm.PrimaryBroadcast(absa, 'test'), 'test2'))
input_json = {'name': 'abs', 'id': mock.ANY, 'domains': {'primary': [], 'secondary': [], 'tertiary': [], 'quaternary': []}, 'children': [a]}
self.assertEqual(pybamm.AbsoluteValue._from_json(input_json), absa)
def test_smooth_absolute_value(self):
a = pybamm.StateVector(slice(0, 1))
expr = pybamm.smooth_absolute_value(a, 10)
self.assertAlmostEqual(expr.evaluate(y=np.array([1]))[(0, 0)], 1)
self.assertEqual(expr.evaluate(y=np.array([0])), 0)
self.assertAlmostEqual(expr.evaluate(y=np.array([(- 1)]))[(0, 0)], 1)
self.assertEqual(str(expr), 'y[0:1] * (exp(10.0 * y[0:1]) - exp(-10.0 * y[0:1])) / (exp(10.0 * y[0:1]) + exp(-10.0 * y[0:1]))')
def test_sign(self):
b = pybamm.Scalar((- 4))
signb = pybamm.sign(b)
self.assertEqual(signb.evaluate(), (- 1))
A = diags(np.linspace((- 1), 1, 5))
b = pybamm.Matrix(A)
signb = pybamm.sign(b)
np.testing.assert_array_equal(np.diag(signb.evaluate().toarray()), [(- 1), (- 1), 0, 1, 1])
broad = pybamm.PrimaryBroadcast((- 4), 'test domain')
self.assertEqual(pybamm.sign(broad), pybamm.PrimaryBroadcast((- 1), 'test domain'))
conc = pybamm.Concatenation(broad, pybamm.PrimaryBroadcast(2, 'another domain'))
self.assertEqual(pybamm.sign(conc), pybamm.Concatenation(pybamm.PrimaryBroadcast((- 1), 'test domain'), pybamm.PrimaryBroadcast(1, 'another domain')))
with self.assertRaises(NotImplementedError):
pybamm.Sign._from_json({})
def test_floor(self):
a = pybamm.Symbol('a')
floora = pybamm.Floor(a)
self.assertEqual(floora.name, 'floor')
self.assertEqual(floora.children[0].name, a.name)
b = pybamm.Scalar(3.5)
floorb = pybamm.Floor(b)
self.assertEqual(floorb.evaluate(), 3)
c = pybamm.Scalar((- 3.2))
floorc = pybamm.Floor(c)
self.assertEqual(floorc.evaluate(), (- 4))
input_json = {'name': 'floor', 'id': mock.ANY, 'domains': {'primary': [], 'secondary': [], 'tertiary': [], 'quaternary': []}, 'children': [a]}
self.assertEqual(pybamm.Floor._from_json(input_json), floora)
def test_ceiling(self):
a = pybamm.Symbol('a')
ceila = pybamm.Ceiling(a)
self.assertEqual(ceila.name, 'ceil')
self.assertEqual(ceila.children[0].name, a.name)
b = pybamm.Scalar(3.5)
ceilb = pybamm.Ceiling(b)
self.assertEqual(ceilb.evaluate(), 4)
c = pybamm.Scalar((- 3.2))
ceilc = pybamm.Ceiling(c)
self.assertEqual(ceilc.evaluate(), (- 3))
input_json = {'name': 'ceil', 'id': mock.ANY, 'domains': {'primary': [], 'secondary': [], 'tertiary': [], 'quaternary': []}, 'children': [a]}
self.assertEqual(pybamm.Ceiling._from_json(input_json), ceila)
def test_gradient(self):
a = pybamm.Symbol('a')
with self.assertRaisesRegex(pybamm.DomainError, "Cannot take gradient of 'a' since its domain is empty"):
pybamm.Gradient(a)
a = pybamm.PrimaryBroadcastToEdges(pybamm.Scalar(1), 'test')
with self.assertRaisesRegex(TypeError, 'evaluates on edges'):
pybamm.Gradient(a)
a = pybamm.PrimaryBroadcast(pybamm.Variable('a'), 'test domain')
grad = pybamm.grad(a)
self.assertEqual(grad, pybamm.PrimaryBroadcastToEdges(0, 'test domain'))
a = pybamm.Symbol('a', domain='test domain')
grad = pybamm.Gradient(a)
self.assertEqual(grad.children[0].name, a.name)
self.assertEqual(grad.domain, a.domain)
def test_div(self):
a = pybamm.Symbol('a')
with self.assertRaisesRegex(pybamm.DomainError, "Cannot take divergence of 'a' since its domain is empty"):
pybamm.Divergence(a)
a = pybamm.PrimaryBroadcast(pybamm.Scalar(1), 'test')
with self.assertRaisesRegex(TypeError, 'evaluate on edges'):
pybamm.Divergence(a)
a = pybamm.PrimaryBroadcastToEdges(pybamm.Variable('a'), 'test domain')
div = pybamm.div(a)
self.assertEqual(div, pybamm.PrimaryBroadcast(0, 'test domain'))
a = pybamm.PrimaryBroadcastToEdges(pybamm.Variable('a', 'some domain'), 'test domain')
div = pybamm.div(a)
self.assertEqual(div, pybamm.PrimaryBroadcast(pybamm.PrimaryBroadcast(0, 'some domain'), 'test domain'))
a = pybamm.Symbol('a', domain='test domain')
div = pybamm.Divergence(pybamm.Gradient(a))
self.assertEqual(div.domain, a.domain)
a = pybamm.Symbol('a', domain='test domain')
div = pybamm.div((- pybamm.Gradient(a)))
self.assertEqual(div, (- pybamm.Divergence(pybamm.Gradient(a))))
div = pybamm.div(((- a) * pybamm.Gradient(a)))
self.assertEqual(div, (- pybamm.Divergence((a * pybamm.Gradient(a)))))
def test_integral(self):
a = pybamm.Symbol('a', domain=['negative electrode'])
x = pybamm.SpatialVariable('x', ['negative electrode'])
inta = pybamm.Integral(a, x)
self.assertEqual(inta.name, "integral dx ['negative electrode']")
self.assertEqual(inta.children[0].name, a.name)
self.assertEqual(inta.integration_variable[0], x)
self.assertDomainEqual(inta.domains, {})
a_sec = pybamm.Symbol('a', domain=['negative electrode'], auxiliary_domains={'secondary': 'current collector'})
x = pybamm.SpatialVariable('x', ['negative electrode'])
inta_sec = pybamm.Integral(a_sec, x)
self.assertDomainEqual(inta_sec.domains, {'primary': ['current collector']})
a_tert = pybamm.Symbol('a', domain=['negative electrode'], auxiliary_domains={'secondary': 'current collector', 'tertiary': 'some extra domain'})
x = pybamm.SpatialVariable('x', ['negative electrode'])
inta_tert = pybamm.Integral(a_tert, x)
self.assertDomainEqual(inta_tert.domains, {'primary': ['current collector'], 'secondary': ['some extra domain']})
a_quat = pybamm.Symbol('a', domain=['negative electrode'], auxiliary_domains={'secondary': 'current collector', 'tertiary': 'some extra domain', 'quaternary': 'another extra domain'})
inta_quat = pybamm.Integral(a_quat, x)
self.assertDomainEqual(inta_quat.domains, {'primary': ['current collector'], 'secondary': ['some extra domain'], 'tertiary': ['another extra domain']})
y = pybamm.SpatialVariable('y', ['current collector'])
inta_sec_y = pybamm.Integral(a_sec, y)
self.assertDomainEqual(inta_sec_y.domains, {'primary': ['negative electrode']})
inta_tert_y = pybamm.Integral(a_tert, y)
self.assertDomainEqual(inta_tert_y.domains, {'primary': ['negative electrode'], 'secondary': ['some extra domain']})
inta_quat_y = pybamm.Integral(a_quat, y)
self.assertDomainEqual(inta_quat_y.domains, {'primary': ['negative electrode'], 'secondary': ['some extra domain'], 'tertiary': ['another extra domain']})
z = pybamm.SpatialVariable('z', ['some extra domain'])
inta_tert_z = pybamm.Integral(a_tert, z)
self.assertDomainEqual(inta_tert_z.domains, {'primary': ['negative electrode'], 'secondary': ['current collector']})
inta_quat_z = pybamm.Integral(a_quat, z)
self.assertDomainEqual(inta_quat_z.domains, {'primary': ['negative electrode'], 'secondary': ['current collector'], 'tertiary': ['another extra domain']})
Z = pybamm.SpatialVariable('Z', ['another extra domain'])
inta_quat_Z = pybamm.Integral(a_quat, Z)
self.assertDomainEqual(inta_quat_Z.domains, {'primary': ['negative electrode'], 'secondary': ['current collector'], 'tertiary': ['some extra domain']})
b = pybamm.Symbol('b', domain=['current collector'])
y = pybamm.SpatialVariable('y', ['current collector'])
z = pybamm.SpatialVariable('z', ['current collector'])
inta = pybamm.Integral(b, [y, z])
self.assertEqual(inta.name, "integral dy dz ['current collector']")
self.assertEqual(inta.children[0].name, b.name)
self.assertEqual(inta.integration_variable[0], y)
self.assertEqual(inta.integration_variable[1], z)
self.assertEqual(inta.domain, [])
inta = pybamm.IndefiniteIntegral(a, x)
self.assertEqual(inta.name, "a integrated w.r.t x on ['negative electrode']")
self.assertEqual(inta.children[0].name, a.name)
self.assertEqual(inta.integration_variable[0], x)
self.assertEqual(inta.domain, ['negative electrode'])
inta_sec = pybamm.IndefiniteIntegral(a_sec, x)
self.assertDomainEqual(inta_sec.domains, {'primary': ['negative electrode'], 'secondary': ['current collector']})
inta = pybamm.BackwardIndefiniteIntegral(a, x)
self.assertEqual(inta.name, "a integrated backward w.r.t x on ['negative electrode']")
a = pybamm.Symbol('a', domain=['negative electrode'])
x = pybamm.SpatialVariable('x', ['separator'])
y = pybamm.Variable('y')
z = pybamm.SpatialVariable('z', ['negative electrode'])
with self.assertRaises(pybamm.DomainError):
pybamm.Integral(a, x)
with self.assertRaisesRegex(TypeError, 'integration_variable must be'):
pybamm.Integral(a, y)
with self.assertRaisesRegex(NotImplementedError, 'Indefinite integral only implemented w.r.t. one variable'):
pybamm.IndefiniteIntegral(a, [x, y])
def test_index(self):
vec = pybamm.StateVector(slice(0, 5))
y_test = np.array([1, 2, 3, 4, 5])
ind = pybamm.Index(vec, 3)
self.assertIsInstance(ind, pybamm.Index)
self.assertEqual(ind.slice, slice(3, 4))
self.assertEqual(ind.evaluate(y=y_test), 4)
ind = pybamm.Index(vec, (- 1))
self.assertIsInstance(ind, pybamm.Index)
self.assertEqual(ind.slice, slice((- 1), None))
self.assertEqual(ind.evaluate(y=y_test), 5)
self.assertEqual(ind.name, 'Index[-1]')
ind = pybamm.Index(vec, slice(1, 3))
self.assertIsInstance(ind, pybamm.Index)
self.assertEqual(ind.slice, slice(1, 3))
np.testing.assert_array_equal(ind.evaluate(y=y_test), np.array([[2], [3]]))
ind = pybamm.Index(vec, slice(3))
self.assertIsInstance(ind, pybamm.Index)
self.assertEqual(ind.slice, slice(3))
np.testing.assert_array_equal(ind.evaluate(y=y_test), np.array([[1], [2], [3]]))
with self.assertRaisesRegex(TypeError, 'index must be integer or slice'):
pybamm.Index(vec, 0.0)
debug_mode = pybamm.settings.debug_mode
pybamm.settings.debug_mode = True
with self.assertRaisesRegex(ValueError, 'slice size exceeds child size'):
pybamm.Index(vec, 5)
pybamm.settings.debug_mode = debug_mode
def test_evaluate_at(self):
a = pybamm.Symbol('a', domain=['negative electrode'])
f = pybamm.EvaluateAt(a, 1)
self.assertEqual(f.position, 1)
def test_upwind_downwind(self):
a = pybamm.Symbol('a')
with self.assertRaisesRegex(pybamm.DomainError, "Cannot upwind 'a' since its domain is empty"):
pybamm.Upwind(a)
a = pybamm.PrimaryBroadcastToEdges(pybamm.Scalar(1), 'test')
with self.assertRaisesRegex(TypeError, 'evaluate on nodes'):
pybamm.Upwind(a)
a = pybamm.Symbol('a', domain='test domain')
upwind = pybamm.upwind(a)
self.assertIsInstance(upwind, pybamm.Upwind)
self.assertEqual(upwind.children[0].name, a.name)
self.assertEqual(upwind.domain, a.domain)
a = pybamm.Symbol('a', domain='test domain')
downwind = pybamm.downwind(a)
self.assertIsInstance(downwind, pybamm.Downwind)
self.assertEqual(downwind.children[0].name, a.name)
self.assertEqual(downwind.domain, a.domain)
def test_diff(self):
a = pybamm.StateVector(slice(0, 1))
y = np.array([5])
self.assertEqual((- a).diff(a).evaluate(y=y), (- 1))
self.assertEqual((- a).diff((- a)).evaluate(), 1)
self.assertEqual((a ** 3).diff(a).evaluate(y=y), (3 * (5 ** 2)))
self.assertEqual(abs((a ** 3)).diff(a).evaluate(y=y), (3 * (5 ** 2)))
self.assertEqual((a ** 3).diff(a).evaluate(y=(- y)), (3 * (5 ** 2)))
self.assertEqual(abs((a ** 3)).diff(a).evaluate(y=(- y)), ((- 3) * (5 ** 2)))
self.assertEqual(pybamm.sign(a).diff(a).evaluate(y=y), 0)
self.assertEqual(pybamm.Floor(a).diff(a).evaluate(y=y), 0)
self.assertEqual(pybamm.Ceiling(a).diff(a).evaluate(y=y), 0)
spatial_a = pybamm.SpatialOperator('name', a)
with self.assertRaises(NotImplementedError):
spatial_a.diff(a)
def test_printing(self):
a = pybamm.Symbol('a', domain='test')
self.assertEqual(str((- a)), '-a')
grad = pybamm.Gradient(a)
self.assertEqual(grad.name, 'grad')
self.assertEqual(str(grad), 'grad(a)')
def test_eq(self):
a = pybamm.Scalar(4)
un1 = pybamm.UnaryOperator('test', a)
un2 = pybamm.UnaryOperator('test', a)
un3 = pybamm.UnaryOperator('new test', a)
self.assertEqual(un1, un2)
self.assertNotEqual(un1, un3)
a = pybamm.Scalar(4)
un4 = pybamm.UnaryOperator('test', a)
self.assertEqual(un1, un4)
d = pybamm.Scalar(42)
un5 = pybamm.UnaryOperator('test', d)
self.assertNotEqual(un1, un5)
def test_delta_function(self):
a = pybamm.Symbol('a')
delta_a = pybamm.DeltaFunction(a, 'right', 'some domain')
self.assertEqual(delta_a.side, 'right')
self.assertEqual(delta_a.child, a)
self.assertEqual(delta_a.domain, ['some domain'])
self.assertFalse(delta_a.evaluates_on_edges('primary'))
a = pybamm.Symbol('a', domain='some domain')
delta_a = pybamm.DeltaFunction(a, 'left', 'another domain')
self.assertEqual(delta_a.side, 'left')
self.assertDomainEqual(delta_a.domains, {'primary': ['another domain'], 'secondary': ['some domain']})
with self.assertRaisesRegex(pybamm.DomainError, 'Delta function domain cannot be None'):
delta_a = pybamm.DeltaFunction(a, 'right', None)
def test_boundary_operators(self):
a = pybamm.Symbol('a', domain='some domain')
boundary_a = pybamm.BoundaryOperator('boundary', a, 'right')
self.assertEqual(boundary_a.side, 'right')
self.assertEqual(boundary_a.child, a)
def test_evaluates_on_edges(self):
a = pybamm.StateVector(slice(0, 10), domain='test')
self.assertFalse(pybamm.Index(a, slice(1)).evaluates_on_edges('primary'))
self.assertFalse(pybamm.Laplacian(a).evaluates_on_edges('primary'))
self.assertFalse(pybamm.GradientSquared(a).evaluates_on_edges('primary'))
self.assertFalse(pybamm.BoundaryIntegral(a).evaluates_on_edges('primary'))
self.assertTrue(pybamm.Upwind(a).evaluates_on_edges('primary'))
self.assertTrue(pybamm.Downwind(a).evaluates_on_edges('primary'))
def test_boundary_value(self):
a = pybamm.Scalar(1)
boundary_a = pybamm.boundary_value(a, 'right')
self.assertEqual(boundary_a, a)
boundary_broad_a = pybamm.boundary_value(pybamm.PrimaryBroadcast(a, ['negative electrode']), 'left')
self.assertEqual(boundary_broad_a.evaluate(), np.array([1]))
a = pybamm.Symbol('a', domain=['separator'])
boundary_a = pybamm.boundary_value(a, 'right')
self.assertIsInstance(boundary_a, pybamm.BoundaryValue)
self.assertEqual(boundary_a.side, 'right')
self.assertDomainEqual(boundary_a.domains, {})
a_sec = pybamm.Symbol('a', domain=['separator'], auxiliary_domains={'secondary': 'current collector'})
boundary_a_sec = pybamm.boundary_value(a_sec, 'right')
self.assertDomainEqual(boundary_a_sec.domains, {'primary': ['current collector']})
a_tert = pybamm.Symbol('a', domain=['separator'], auxiliary_domains={'secondary': 'current collector', 'tertiary': 'bla'})
boundary_a_tert = pybamm.boundary_value(a_tert, 'right')
self.assertDomainEqual(boundary_a_tert.domains, {'primary': ['current collector'], 'secondary': ['bla']})
a_quat = pybamm.Symbol('a', domain=['separator'], auxiliary_domains={'secondary': 'current collector', 'tertiary': 'bla', 'quaternary': 'another domain'})
boundary_a_quat = pybamm.boundary_value(a_quat, 'right')
self.assertEqual(boundary_a_quat.domain, ['current collector'])
self.assertDomainEqual(boundary_a_quat.domains, {'primary': ['current collector'], 'secondary': ['bla'], 'tertiary': ['another domain']})
var = pybamm.Variable('var', domain=['negative electrode'])
with self.assertRaisesRegex(pybamm.ModelError, 'Can only take boundary'):
pybamm.boundary_value(var, 'negative tab')
pybamm.boundary_value(var, 'positive tab')
symbol_on_edges = pybamm.PrimaryBroadcastToEdges(1, 'domain')
with self.assertRaisesRegex(ValueError, "Can't take the boundary value of a symbol that evaluates on edges"):
pybamm.boundary_value(symbol_on_edges, 'right')
def test_boundary_gradient(self):
var = pybamm.Variable('var', domain=['negative electrode'])
grad = pybamm.boundary_gradient(var, 'right')
self.assertIsInstance(grad, pybamm.BoundaryGradient)
zero = pybamm.PrimaryBroadcast(0, ['negative electrode'])
grad = pybamm.boundary_gradient(zero, 'right')
self.assertEqual(grad, 0)
def test_unary_simplifications(self):
a = pybamm.Scalar(0)
b = pybamm.Scalar(1)
d = pybamm.Scalar((- 1))
self.assertIsInstance((- a), pybamm.Scalar)
self.assertEqual((- a).evaluate(), 0)
self.assertIsInstance((- b), pybamm.Scalar)
self.assertEqual((- b).evaluate(), (- 1))
self.assertIsInstance(abs(a), pybamm.Scalar)
self.assertEqual(abs(a).evaluate(), 0)
self.assertIsInstance(abs(d), pybamm.Scalar)
self.assertEqual(abs(d).evaluate(), 1)
def test_not_constant(self):
a = pybamm.NotConstant(pybamm.Scalar(1))
self.assertEqual(a.name, 'not_constant')
self.assertEqual(a.domain, [])
self.assertEqual(a.evaluate(), 1)
self.assertEqual(a.jac(pybamm.StateVector(slice(0, 1))).evaluate(), 0)
self.assertFalse(a.is_constant())
self.assertFalse((2 * a).is_constant())
def test_to_equation(self):
sympy = have_optional_dependency('sympy')
sympy_Divergence = have_optional_dependency('sympy.vector.operators', 'Divergence')
sympy_Gradient = have_optional_dependency('sympy.vector.operators', 'Gradient')
a = pybamm.Symbol('a', domain='negative particle')
b = pybamm.Symbol('b', domain='current collector')
c = pybamm.Symbol('c', domain='test')
d = pybamm.Symbol('d', domain=['negative electrode'])
one = pybamm.Symbol('1', domain='negative particle')
pybamm.Floor.print_name = 'test'
self.assertEqual(pybamm.Floor((- 2.5)).to_equation(), sympy.Symbol('test'))
self.assertEqual(pybamm.Negate(4).to_equation(), (- 4.0))
self.assertEqual(pybamm.AbsoluteValue((- 4)).to_equation(), 4.0)
self.assertEqual(pybamm.Gradient(a).to_equation(), sympy_Gradient('a'))
self.assertEqual(pybamm.Divergence(pybamm.Gradient(a)).to_equation(), sympy_Divergence(sympy_Gradient(a)))
self.assertEqual(pybamm.BoundaryValue(one, 'right').to_equation(), sympy.Symbol('1'))
self.assertEqual(pybamm.BoundaryValue(a, 'right').to_equation(), sympy.Symbol('a^{surf}'))
self.assertEqual(pybamm.BoundaryValue(b, 'positive tab').to_equation(), sympy.Symbol(str(b)))
self.assertEqual(pybamm.BoundaryValue(c, 'left').to_equation(), sympy.Symbol('c^{\\mathtt{\\text{left}}}'))
xn = pybamm.SpatialVariable('xn', ['negative electrode'])
self.assertEqual(pybamm.Integral(d, xn).to_equation(), sympy.Integral('d', sympy.Symbol('xn')))
def test_explicit_time_integral(self):
expr = pybamm.ExplicitTimeIntegral(pybamm.Parameter('param'), pybamm.Scalar(1))
self.assertEqual(expr.child, pybamm.Parameter('param'))
self.assertEqual(expr.initial_condition, pybamm.Scalar(1))
self.assertEqual(expr.name, 'explicit time integral')
self.assertEqual(expr.new_copy(), expr)
self.assertFalse(expr.is_constant())
def test_to_from_json(self):
a = pybamm.Symbol('a', domain=['test'])
un = pybamm.UnaryOperator('unary test', a)
un_json = {'name': 'unary test', 'id': mock.ANY, 'domains': {'primary': ['test'], 'secondary': [], 'tertiary': [], 'quaternary': []}}
self.assertEqual(un.to_json(), un_json)
un_json['children'] = [a]
self.assertEqual(pybamm.UnaryOperator._from_json(un_json), un)
vec = pybamm.StateVector(slice(0, 5))
ind = pybamm.Index(vec, 3)
ind_json = {'name': 'Index[3]', 'id': mock.ANY, 'index': {'start': 3, 'stop': 4, 'step': None}, 'check_size': False}
self.assertEqual(ind.to_json(), ind_json)
ind_json['children'] = [vec]
self.assertEqual(pybamm.Index._from_json(ind_json), ind)
spatial_vec = pybamm.SpatialOperator('name', vec)
with self.assertRaises(NotImplementedError):
spatial_vec.to_json()
with self.assertRaises(NotImplementedError):
pybamm.SpatialOperator._from_json({})
expr = pybamm.ExplicitTimeIntegral(pybamm.Parameter('param'), pybamm.Scalar(1))
expr_json = {'name': 'explicit time integral', 'id': mock.ANY}
self.assertEqual(expr.to_json(), expr_json)
expr_json['children'] = [pybamm.Parameter('param')]
expr_json['initial_condition'] = [pybamm.Scalar(1)]
self.assertEqual(pybamm.ExplicitTimeIntegral._from_json(expr_json), expr) |
class HgWorkdir(Workdir):
def from_potential_worktree(cls, wd: _t.PathT) -> (HgWorkdir | None):
res = _run(['hg', 'root'], wd)
if res.returncode:
return None
return cls(Path(res.stdout))
def get_meta(self, config: Configuration) -> (ScmVersion | None):
node: str
tags_str: str
bookmark: str
node_date_str: str
(node, tags_str, bookmark, node_date_str) = self.hg_log('.', '{node}\n{tag}\n{bookmark}\n{date|shortdate}').split('\n')
(branch, dirty_str, dirty_date) = _run(['hg', 'id', '-T', '{branch}\n{if(dirty, 1, 0)}\n{date|shortdate}'], cwd=self.path, check=True).stdout.split('\n')
dirty = bool(int(dirty_str))
node_date = datetime.date.fromisoformat((dirty_date if dirty else node_date_str))
if (node == ('0' * len(node))):
log.debug('initial node %s', self.path)
return meta(Version('0.0'), config=config, dirty=dirty, branch=branch, node_date=node_date)
node = ('h' + node[:7])
tags = tags_str.split()
if ('tip' in tags):
tags.remove('tip')
if tags:
tag = tag_to_version(tags[0], config)
if tag:
return meta(tag, dirty=dirty, branch=branch, config=config)
try:
tag_str = self.get_latest_normalizable_tag()
if (tag_str is None):
dist = self.get_distance_revs('')
else:
dist = self.get_distance_revs(tag_str)
if ((tag_str == 'null') or (tag_str is None)):
tag = Version('0.0')
dist += 1
else:
tag = tag_to_version(tag_str, config=config)
assert (tag is not None)
if (self.check_changes_since_tag(tag_str) or dirty):
return meta(tag, distance=dist, node=node, dirty=dirty, branch=branch, config=config, node_date=node_date)
else:
return meta(tag, config=config, node_date=node_date)
except ValueError as e:
log.exception('error %s', e)
pass
return None
def hg_log(self, revset: str, template: str) -> str:
cmd = ['hg', 'log', '-r', revset, '-T', template]
return _run(cmd, cwd=self.path, check=True).stdout
def get_latest_normalizable_tag(self) -> (str | None):
outlines = self.hg_log(revset="ancestors(.) and tag('re:\\.')", template="{tags}{if(tags, '\n', '')}").split()
if (not outlines):
return None
tag = outlines[(- 1)].split()[(- 1)]
return tag
def get_distance_revs(self, rev1: str, rev2: str='.') -> int:
revset = f'({rev1}::{rev2})'
out = self.hg_log(revset, '.')
return (len(out) - 1)
def check_changes_since_tag(self, tag: (str | None)) -> bool:
if ((tag == '0.0') or (tag is None)):
return True
revset = f"(branch(.) and tag({tag!r})::. and (merge() or file('re:^(?!\.hgtags).*$')) and not tag({tag!r}))"
return bool(self.hg_log(revset, '.')) |
def test_issue940_metaclass_values_funcdef() -> None:
node = builder.extract_node("\n class BaseMeta(type):\n def __members__(cls):\n return ['a', 'func']\n class Parent(metaclass=BaseMeta):\n pass\n Parent.__members__()\n ")
inferred = next(node.infer())
assert isinstance(inferred, nodes.List)
assert ([c.value for c in inferred.elts] == ['a', 'func']) |
class TokenizerHubInterface(object):
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert (self.tokenizer is not None)
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence) |
(strategies.lists(min_size=0, max_size=3, elements=strategies.integers(min_value=0, max_value=(2 ** 31))))
def test_first_last_item(counts):
model = completionmodel.CompletionModel()
for c in counts:
cat = mock.Mock(spec=['layoutChanged', 'layoutAboutToBeChanged'])
cat.rowCount = mock.Mock(return_value=c, spec=[])
model.add_category(cat)
data = [i for (i, row_count) in enumerate(counts) if (row_count > 0)]
if (not data):
assert (not model.first_item().isValid())
assert (not model.last_item().isValid())
else:
first = data[0]
last = data[(- 1)]
assert (model.first_item().row() == 0)
assert (model.first_item().parent().row() == first)
assert (model.last_item().row() == (counts[last] - 1))
assert (model.last_item().parent().row() == last) |
def test_standard(hatch, config_file, helpers):
result = hatch('config', 'set', 'project', 'foo')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent('\n New setting:\n project = "foo"\n '))
config_file.load()
assert (config_file.model.project == 'foo') |
def read_from_memory(addr, size):
inferior = get_inferior()
if ((inferior == (- 1)) or (addr == 0)):
print('Error happens in read_from_memory: addr = {0:x}'.format(int(addr)))
return None
try:
string = inferior.read_memory(addr, size)
return string
except gdb.MemoryError:
print("Can't access memory at {0:x}.".format(int(addr)))
return None |
class test_io(unittest.TestCase):
def test_process_tuple(self):
def funpass(cause, procs, tup, col):
pass
self.assertEqual(tuple(process_tuple((), (), funpass)), ())
self.assertEqual(tuple(process_tuple((int,), ('100',), funpass)), (100,))
self.assertEqual(tuple(process_tuple((int, int), ('100', '200'), funpass)), (100, 200))
self.assertEqual(tuple(process_tuple((int, int), (None, '200'), funpass)), (None, 200))
self.assertEqual(tuple(process_tuple((int, int, int), (None, None, '200'), funpass)), (None, None, 200))
self.assertRaises(RuntimeError, process_tuple, (int,), ('foo',), funpass)
class ThisError(Exception):
pass
data = []
def funraise(cause, procs, tup, col):
data.append((procs, tup, col))
raise ThisError from cause
self.assertRaises(ThisError, process_tuple, (int,), ('foo',), funraise)
self.assertEqual(data[0], ((int,), ('foo',), 0))
del data[0]
self.assertRaises(ThisError, process_tuple, (int, int), ('100', 'bar'), funraise)
self.assertEqual(data[0], ((int, int), ('100', 'bar'), 1))
def testExpectations(self):
testExpectIO(self, expectation_samples)
def testConsistency(self):
for (id, sample) in consistency_samples.items():
(name, pack, unpack) = id
if (pack is not None):
for x in sample:
packed = pack(x)
unpacked = resolve(unpack(packed))
x = resolve(x)
self.assertTrue((x == unpacked), ('inconsistency with %s, %r -> %r -> %r' % (name, x, packed, unpacked)))
def test_hstore(self):
self.assertRaises((ValueError, struct.error), hstore_unpack, b'\x00\x00\x00\x00foo')
self.assertRaises(ValueError, hstore_unpack, b'\x00\x00\x00\x01')
self.assertRaises(ValueError, hstore_unpack, b'\x00\x00\x00\x02\x00\x00\x00\x01G\x00\x00\x00\x01G')
sample = [([('foo', 'bar'), ('k', None), ('zero', 'heroes')], ((b'\x00\x00\x00\x03\x00\x00\x00\x03foo' + b'\x00\x00\x00\x03bar\x00\x00\x00\x01k\xff\xff\xff\xff') + b'\x00\x00\x00\x04zero\x00\x00\x00\x06heroes')), ([('foo', None), ('k', None), ('zero', None)], ((b'\x00\x00\x00\x03\x00\x00\x00\x03foo' + b'\xff\xff\xff\xff\x00\x00\x00\x01k\xff\xff\xff\xff') + b'\x00\x00\x00\x04zero\xff\xff\xff\xff')), ([], b'\x00\x00\x00\x00')]
for x in sample:
(src, serialized) = x
self.assertEqual(hstore_pack(src), serialized)
self.assertEqual(hstore_unpack(serialized), dict(src)) |
def get_bu(model, X_test, X_test_noisy, X_test_adv):
print('Getting Monte Carlo dropout variance predictions...')
uncerts_normal = get_mc_predictions(model, X_test, batch_size=args.batch_size).var(axis=0).mean(axis=1)
uncerts_noisy = get_mc_predictions(model, X_test_noisy, batch_size=args.batch_size).var(axis=0).mean(axis=1)
uncerts_adv = get_mc_predictions(model, X_test_adv, batch_size=args.batch_size).var(axis=0).mean(axis=1)
print('uncerts_normal:', uncerts_normal.shape)
print('uncerts_noisy:', uncerts_noisy.shape)
print('uncerts_adv:', uncerts_adv.shape)
uncerts_pos = uncerts_adv
uncerts_neg = np.concatenate((uncerts_normal, uncerts_noisy))
(artifacts, labels) = merge_and_generate_labels(uncerts_pos, uncerts_neg)
return (artifacts, labels) |
def get_version(config: NsJailConfig) -> int:
cgroup_mounts = (config.cgroup_mem_mount, config.cgroup_pids_mount, config.cgroup_net_cls_mount, config.cgroup_cpu_mount)
v1_exists = any((Path(mount).exists() for mount in cgroup_mounts))
controllers_path = Path(config.cgroupv2_mount, 'cgroup.controllers')
v2_exists = controllers_path.exists()
config_version = (2 if config.use_cgroupv2 else 1)
if (v1_exists and v2_exists):
return config_version
elif v1_exists:
if (config_version == 2):
log.warning('NsJail is configured to use cgroupv2, but only cgroupv1 was detected on the system. Either use_cgroupv2 or cgroupv2_mount is incorrect. Snekbox is unable to override use_cgroupv2. If NsJail has been configured to use cgroups, then it will fail. In such case, please correct the config manually.')
return 1
elif v2_exists:
return 2
else:
log.warning(f'Neither the cgroupv1 controller mounts, nor {str(controllers_path)!r} exists. Either cgroup_xxx_mount and cgroupv2_mount are misconfigured, or all corresponding v1 controllers are disabled on the system. Falling back to the use_cgroupv2 NsJail setting.')
return config_version |
class InputReaderBuilderTest(tf.test.TestCase):
def create_tf_record(self):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
with self.test_session():
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
example = example_pb2.Example(features=feature_pb2.Features(feature={'image/encoded': feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=[encoded_jpeg])), 'image/format': feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=['jpeg'.encode('utf-8')])), 'image/object/bbox/xmin': feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[0.0])), 'image/object/bbox/xmax': feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[1.0])), 'image/object/bbox/ymin': feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[0.0])), 'image/object/bbox/ymax': feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[1.0])), 'image/object/class/label': feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=[2]))}))
writer.write(example.SerializeToString())
writer.close()
return path
def test_build_tf_record_input_reader(self):
tf_record_path = self.create_tf_record()
input_reader_text_proto = "\n shuffle: false\n num_readers: 1\n tf_record_input_reader {{\n input_path: '{0}'\n }}\n ".format(tf_record_path)
input_reader_proto = input_reader_pb2.InputReader()
text_format.Merge(input_reader_text_proto, input_reader_proto)
tensor_dict = input_reader_builder.build(input_reader_proto)
sv = tf.train.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
output_dict = sess.run(tensor_dict)
self.assertEquals((4, 5, 3), output_dict[fields.InputDataFields.image].shape)
self.assertEquals([2], output_dict[fields.InputDataFields.groundtruth_classes])
self.assertEquals((1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
self.assertAllEqual([0.0, 0.0, 1.0, 1.0], output_dict[fields.InputDataFields.groundtruth_boxes][0]) |
def __encoding_name(codepage: int) -> str:
encodings = {CP_ACP: 'mbcs', CP_OEMCP: 'oem', CP_THREAD_ACP: 'mbcs', CP_UTF16: 'utf-16', CP_UTF16BE: 'utf-16be', CP_ASCII: 'ascii', CP_UTF7: 'utf-7', CP_UTF8: 'utf-8'}
if (codepage in encodings):
encname = encodings[codepage]
else:
encname = f'cp{codepage}'
try:
_ = '\x00'.encode(encname)
except LookupError:
encname = 'cp1252'
return encname |
def create_repository(namespace, name, creating_user, visibility='private', repo_kind='image', description=None):
namespace_user = User.get(username=namespace)
yesterday = (datetime.now() - timedelta(days=1))
try:
with db_transaction():
existing = get_repository(namespace, name)
if (existing is not None):
return None
try:
repo = Repository.create(name=name, visibility=Repository.visibility.get_id(visibility), namespace_user=namespace_user, kind=Repository.kind.get_id(repo_kind), description=description)
except IntegrityError as ie:
raise _RepositoryExistsException(ie)
RepositoryActionCount.create(repository=repo, count=0, date=yesterday)
RepositorySearchScore.create(repository=repo, score=0)
if (creating_user and (not creating_user.organization)):
admin = Role.get(name='admin')
RepositoryPermission.create(user=creating_user, repository=repo, role=admin)
except _RepositoryExistsException as ree:
try:
return Repository.get(namespace_user=namespace_user, name=name)
except Repository.DoesNotExist:
logger.error('Got integrity error when trying to create repository %s/%s: %s', namespace, name, ree.internal_exception)
return None
if (creating_user and (not creating_user.organization) and (creating_user.username != namespace)):
permission.apply_default_permissions(repo, creating_user)
return repo |
class DataPortalTestBase(WithDataPortal, WithTradingSessions):
ASSET_FINDER_EQUITY_SIDS = (1, 2, 3)
DIVIDEND_ASSET_SID = 3
START_DATE = pd.Timestamp('2016-08-01')
END_DATE = pd.Timestamp('2016-08-08')
TRADING_CALENDAR_STRS = ('NYSE', 'us_futures')
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = True
OHLC_RATIOS_PER_SID = {10001: 100000}
def make_root_symbols_info(self):
return pd.DataFrame({'root_symbol': ['BAR', 'BUZ'], 'root_symbol_id': [1, 2], 'exchange': ['CMES', 'CMES']})
def make_futures_info(cls):
trading_sessions = cls.trading_sessions['us_futures']
return pd.DataFrame({'sid': [10000, 10001], 'root_symbol': ['BAR', 'BUZ'], 'symbol': ['BARA', 'BUZZ'], 'start_date': [trading_sessions[1], trading_sessions[0]], 'end_date': [cls.END_DATE, cls.END_DATE], 'notice_date': [cls.END_DATE, cls.END_DATE], 'expiration_date': [cls.END_DATE, cls.END_DATE], 'tick_size': [0.01, 0.0001], 'multiplier': [500, 50000], 'exchange': ['CMES', 'CMES']})
def make_equity_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Equity]
dts = trading_calendar.minutes_for_session(cls.trading_days[0])
dfs = []
dfs.append(pd.DataFrame({'open': full(len(dts), nan), 'high': full(len(dts), nan), 'low': full(len(dts), nan), 'close': full(len(dts), nan), 'volume': full(len(dts), 0)}, index=dts))
dts = trading_calendar.minutes_for_session(cls.trading_days[1])
dfs.append(pd.DataFrame({'open': append(100.5, full((len(dts) - 1), nan)), 'high': append(100.9, full((len(dts) - 1), nan)), 'low': append(100.1, full((len(dts) - 1), nan)), 'close': append(100.3, full((len(dts) - 1), nan)), 'volume': append(1000, full((len(dts) - 1), nan))}, index=dts))
dts = trading_calendar.minutes_for_session(cls.trading_days[2])
dfs.append(pd.DataFrame({'open': [nan, 103.5, 102.5, 104.5, 101.5, nan], 'high': [nan, 103.9, 102.9, 104.9, 101.9, nan], 'low': [nan, 103.1, 102.1, 104.1, 101.1, nan], 'close': [nan, 103.3, 102.3, 104.3, 101.3, nan], 'volume': [0, 1003, 1002, 1004, 1001, 0]}, index=dts[:6]))
dts = trading_calendar.minutes_for_session(cls.trading_days[3])
dfs.append(pd.DataFrame({'open': full(len(dts), nan), 'high': full(len(dts), nan), 'low': full(len(dts), nan), 'close': full(len(dts), nan), 'volume': full(len(dts), 0)}, index=dts))
asset1_df = pd.concat(dfs)
(yield (1, asset1_df))
asset2_df = pd.DataFrame({'open': 1.0055, 'high': 1.0059, 'low': 1.0051, 'close': 1.0055, 'volume': 100}, index=asset1_df.index)
(yield (2, asset2_df))
(yield (cls.DIVIDEND_ASSET_SID, asset2_df.copy()))
def make_future_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Future]
trading_sessions = cls.trading_sessions['us_futures']
dts = trading_calendar.minutes_for_session(trading_sessions[1])
dfs = []
dfs.append(pd.DataFrame({'open': full(len(dts), nan), 'high': full(len(dts), nan), 'low': full(len(dts), nan), 'close': full(len(dts), nan), 'volume': full(len(dts), 0)}, index=dts))
dts = trading_calendar.minutes_for_session(trading_sessions[2])
dfs.append(pd.DataFrame({'open': append(200.5, full((len(dts) - 1), nan)), 'high': append(200.9, full((len(dts) - 1), nan)), 'low': append(200.1, full((len(dts) - 1), nan)), 'close': append(200.3, full((len(dts) - 1), nan)), 'volume': append(2000, full((len(dts) - 1), nan))}, index=dts))
dts = trading_calendar.minutes_for_session(trading_sessions[3])
dfs.append(pd.DataFrame({'open': [nan, 203.5, 202.5, 204.5, 201.5, nan], 'high': [nan, 203.9, 202.9, 204.9, 201.9, nan], 'low': [nan, 203.1, 202.1, 204.1, 201.1, nan], 'close': [nan, 203.3, 202.3, 204.3, 201.3, nan], 'volume': [0, 2003, 2002, 2004, 2001, 0]}, index=dts[:6]))
dts = trading_calendar.minutes_for_session(trading_sessions[4])
dfs.append(pd.DataFrame({'open': full(len(dts), nan), 'high': full(len(dts), nan), 'low': full(len(dts), nan), 'close': full(len(dts), nan), 'volume': full(len(dts), 0)}, index=dts))
asset10000_df = pd.concat(dfs)
(yield (10000, asset10000_df))
missing_dts = trading_calendar.minutes_for_session(trading_sessions[0])
asset10001_df = pd.DataFrame({'open': 1.00549, 'high': 1.00591, 'low': 1.00507, 'close': 1.0055, 'volume': 100}, index=missing_dts.append(asset10000_df.index))
(yield (10001, asset10001_df))
def make_dividends_data(cls):
return pd.DataFrame([{'ex_date': cls.trading_days[2].to_datetime64(), 'record_date': cls.trading_days[2].to_datetime64(), 'declared_date': cls.trading_days[2].to_datetime64(), 'pay_date': cls.trading_days[2].to_datetime64(), 'amount': 0.5, 'sid': cls.DIVIDEND_ASSET_SID}], columns=['ex_date', 'record_date', 'declared_date', 'pay_date', 'amount', 'sid'])
def test_get_last_traded_equity_minute(self):
trading_calendar = self.trading_calendars[Equity]
dts = trading_calendar.minutes_for_session(self.trading_days[0])
asset = self.asset_finder.retrieve_asset(1)
self.assertTrue(pd.isnull(self.data_portal.get_last_traded_dt(asset, dts[0], 'minute')))
dts = trading_calendar.minutes_for_session(self.trading_days[2])
self.assertEqual(dts[1], self.data_portal.get_last_traded_dt(asset, dts[1], 'minute'))
self.assertEqual(dts[4], self.data_portal.get_last_traded_dt(asset, dts[5], 'minute'))
def test_get_last_traded_future_minute(self):
asset = self.asset_finder.retrieve_asset(10000)
trading_calendar = self.trading_calendars[Future]
dts = trading_calendar.minutes_for_session(self.trading_days[0])
self.assertTrue(pd.isnull(self.data_portal.get_last_traded_dt(asset, dts[0], 'minute')))
dts = trading_calendar.minutes_for_session(self.trading_days[3])
self.assertEqual(dts[1], self.data_portal.get_last_traded_dt(asset, dts[1], 'minute'))
self.assertEqual(dts[4], self.data_portal.get_last_traded_dt(asset, dts[5], 'minute'))
def test_get_last_traded_dt_equity_daily(self):
asset = self.asset_finder.retrieve_asset(1)
self.assertTrue(pd.isnull(self.data_portal.get_last_traded_dt(asset, self.trading_days[0], 'daily')))
self.assertEqual(self.trading_days[1], self.data_portal.get_last_traded_dt(asset, self.trading_days[1], 'daily'))
self.assertEqual(self.trading_days[2], self.data_portal.get_last_traded_dt(asset, self.trading_days[3], 'daily'))
def test_get_spot_value_equity_minute(self):
trading_calendar = self.trading_calendars[Equity]
asset = self.asset_finder.retrieve_asset(1)
dts = trading_calendar.minutes_for_session(self.trading_days[2])
dt = dts[1]
expected = OrderedDict({'open': 103.5, 'high': 103.9, 'low': 103.1, 'close': 103.3, 'volume': 1003, 'price': 103.3})
result = [self.data_portal.get_spot_value(asset, field, dt, 'minute') for field in expected.keys()]
assert_almost_equal(array(list(expected.values())), result)
dt = dts[100]
expected = OrderedDict({'open': nan, 'high': nan, 'low': nan, 'close': nan, 'volume': 0, 'price': 101.3})
result = [self.data_portal.get_spot_value(asset, field, dt, 'minute') for field in expected.keys()]
assert_almost_equal(array(list(expected.values())), result)
def test_get_spot_value_future_minute(self):
trading_calendar = self.trading_calendars[Future]
asset = self.asset_finder.retrieve_asset(10000)
dts = trading_calendar.minutes_for_session(self.trading_days[3])
dt = dts[1]
expected = OrderedDict({'open': 203.5, 'high': 203.9, 'low': 203.1, 'close': 203.3, 'volume': 2003, 'price': 203.3})
result = [self.data_portal.get_spot_value(asset, field, dt, 'minute') for field in expected.keys()]
assert_almost_equal(array(list(expected.values())), result)
dt = dts[100]
expected = OrderedDict({'open': nan, 'high': nan, 'low': nan, 'close': nan, 'volume': 0, 'price': 201.3})
result = [self.data_portal.get_spot_value(asset, field, dt, 'minute') for field in expected.keys()]
assert_almost_equal(array(list(expected.values())), result)
def test_get_spot_value_multiple_assets(self):
equity = self.asset_finder.retrieve_asset(1)
future = self.asset_finder.retrieve_asset(10000)
trading_calendar = self.trading_calendars[Future]
dts = trading_calendar.minutes_for_session(self.trading_days[3])
expected = pd.DataFrame({equity: [nan, nan, nan, nan, 0, 101.3], future: [203.5, 203.9, 203.1, 203.3, 2003, 203.3]}, index=['open', 'high', 'low', 'close', 'volume', 'price'])
result = [self.data_portal.get_spot_value(assets=[equity, future], field=field, dt=dts[1], data_frequency='minute') for field in expected.index]
assert_almost_equal(expected.values.tolist(), result)
_space(data_frequency=['daily', 'minute'], field=['close', 'price'])
def test_get_adjustments(self, data_frequency, field):
asset = self.asset_finder.retrieve_asset(self.DIVIDEND_ASSET_SID)
calendar = self.trading_calendars[Equity]
day = calendar.day
dividend_date = self.trading_days[2]
prev_day_price = 1.006
dividend_amount = 0.5
ratio = (1.0 - (dividend_amount / prev_day_price))
cases = OrderedDict([(((dividend_date - day), (dividend_date - day)), 1.0), (((dividend_date - day), dividend_date), ratio), (((dividend_date - day), (dividend_date + day)), ratio), ((dividend_date, dividend_date), 1.0), ((dividend_date, (dividend_date + day)), 1.0), (((dividend_date + day), (dividend_date + day)), 1.0)])
for ((dt, perspective_dt), expected) in iteritems(cases):
if (data_frequency == 'minute'):
dt = calendar.session_open(dt)
perspective_dt = calendar.session_open(perspective_dt)
val = self.data_portal.get_adjustments(asset, field, dt, perspective_dt)[0]
assert_almost_equal(val, expected, err_msg='at dt={} perspective={}'.format(dt, perspective_dt))
def test_bar_count_for_simple_transforms(self):
july_9_dt = (self.trading_calendar.open_and_close_for_session(pd.Timestamp('2015-07-09', tz='UTC'))[0] + Timedelta('30 minutes'))
self.assertEqual(((3 * 390) + 31), self.data_portal._get_minute_count_for_transform(july_9_dt, 4))
nov_30_dt = (self.trading_calendar.open_and_close_for_session(pd.Timestamp('2015-11-30', tz='UTC'))[0] + Timedelta('30 minutes'))
self.assertEqual((((390 + 390) + 210) + 31), self.data_portal._get_minute_count_for_transform(nov_30_dt, 4))
def test_get_last_traded_dt_minute(self):
minutes = self.nyse_calendar.minutes_for_session(self.trading_days[2])
equity = self.asset_finder.retrieve_asset(1)
result = self.data_portal.get_last_traded_dt(equity, minutes[3], 'minute')
self.assertEqual(minutes[3], result, 'Asset 1 had a trade on third minute, so should return that as the last trade on that dt.')
result = self.data_portal.get_last_traded_dt(equity, minutes[5], 'minute')
self.assertEqual(minutes[4], result, 'Asset 1 had a trade on fourth minute, so should return that as the last trade on the fifth.')
future = self.asset_finder.retrieve_asset(10000)
calendar = self.trading_calendars[Future]
minutes = calendar.minutes_for_session(self.trading_days[3])
result = self.data_portal.get_last_traded_dt(future, minutes[3], 'minute')
self.assertEqual(minutes[3], result, 'Asset 10000 had a trade on the third minute, so return that as the last trade on that dt.')
result = self.data_portal.get_last_traded_dt(future, minutes[5], 'minute')
self.assertEqual(minutes[4], result, 'Asset 10000 had a trade on fourth minute, so should return that as the last trade on the fifth.')
def test_get_empty_splits(self):
splits = self.data_portal.get_splits([], self.trading_days[2])
self.assertEqual([], splits)
_space(frequency=HISTORY_FREQUENCIES, field=OHLCV_FIELDS)
def test_price_rounding(self, frequency, field):
equity = self.asset_finder.retrieve_asset(2)
future = self.asset_finder.retrieve_asset(10001)
cf = self.data_portal.asset_finder.create_continuous_future('BUZ', 0, 'calendar', None)
minutes = self.nyse_calendar.minutes_for_session(self.trading_days[0])
if (frequency == '1m'):
minute = minutes[0]
expected_equity_volume = 100
expected_future_volume = 100
data_frequency = 'minute'
else:
minute = minutes[0].normalize()
expected_equity_volume = (100 * US_EQUITIES_MINUTES_PER_DAY)
expected_future_volume = (100 * FUTURES_MINUTES_PER_DAY)
data_frequency = 'daily'
expected_equity_values = {'open': 1.006, 'high': 1.006, 'low': 1.005, 'close': 1.006, 'volume': expected_equity_volume}
expected_future_values = {'open': 1.0055, 'high': 1.0059, 'low': 1.0051, 'close': 1.0055, 'volume': expected_future_volume}
result = self.data_portal.get_history_window(assets=[equity, future, cf], end_dt=minute, bar_count=1, frequency=frequency, field=field, data_frequency=data_frequency)
expected_result = pd.DataFrame({equity: expected_equity_values[field], future: expected_future_values[field], cf: expected_future_values[field]}, index=[minute], dtype=float64_dtype)
assert_equal(result, expected_result) |
class TestTestFramework(test.SimpleTest):
def test_create(self):
workflow.delete_files('*.json')
with self.create('one.tf.json'):
one = (yield variable.one(default=True))
(yield output.one(value=one))
self.tf.init()
outputs = self.tf.apply()
assert (outputs == {'one': True})
def test_change(self):
with self.create('one.tf.json'):
one = (yield variable.one(default=False))
(yield output.one(value=one))
with self.create('two.tf.json'):
two = (yield variable.two(default={'x': [1, 2, 3], 'y': 4}))
(yield output.two(value=two))
outputs = self.tf.apply()
assert (outputs == {'one': False, 'two': {'x': [1, 2, 3], 'y': 4}})
def test_destroy(self):
self.tf.destroy() |
.parametrize('direction,mechanism,purview,probability', [(Direction.CAUSE, (0,), (1,), 0.), (Direction.CAUSE, (0,), (2,), 0.), (Direction.CAUSE, (0,), (1, 2), 0.3333333), (Direction.EFFECT, (1,), (0,), 1), (Direction.EFFECT, (2,), (0,), 1), (Direction.EFFECT, (1, 2), (0,), 1)])
def test_probability(direction, mechanism, purview, probability, transition):
assert np.isclose(transition.probability(direction, mechanism, purview), probability) |
class SpatialGate(nn.Module):
def __init__(self, gate_channel, reduction_ratio=16, dilation_conv_num=2, dilation_val=4):
super(SpatialGate, self).__init__()
self.gate_s = nn.Sequential()
self.gate_s.add_module('gate_s_conv_reduce0', nn.Conv2d(gate_channel, (gate_channel // reduction_ratio), kernel_size=1))
self.gate_s.add_module('gate_s_bn_reduce0', nn.BatchNorm2d((gate_channel // reduction_ratio)))
self.gate_s.add_module('gate_s_relu_reduce0', nn.ReLU())
for i in range(dilation_conv_num):
self.gate_s.add_module(('gate_s_conv_di_%d' % i), nn.Conv2d((gate_channel // reduction_ratio), (gate_channel // reduction_ratio), kernel_size=3, padding=dilation_val, dilation=dilation_val))
self.gate_s.add_module(('gate_s_bn_di_%d' % i), nn.BatchNorm2d((gate_channel // reduction_ratio)))
self.gate_s.add_module(('gate_s_relu_di_%d' % i), nn.ReLU())
self.gate_s.add_module('gate_s_conv_final', nn.Conv2d((gate_channel // reduction_ratio), 1, kernel_size=1))
def forward(self, in_tensor):
return self.gate_s(in_tensor).expand_as(in_tensor) |
def load_checkpoint(filepath: Path) -> Dict[(str, torch.Tensor)]:
checkpoint = torch.load(filepath, map_location='cpu')
if ('network' in checkpoint):
state_dict = checkpoint['network']
elif ('state_dict' in checkpoint):
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
return state_dict |
_start_docstrings('The bare Cvt Model transformer outputting raw hidden-states without any specific head on top.', TFCVT_START_DOCSTRING)
class TFCvtModel(TFCvtPreTrainedModel):
def __init__(self, config: CvtConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.cvt = TFCvtMainLayer(config, name='cvt')
_inputs
_start_docstrings_to_model_forward(TFCVT_INPUTS_DOCSTRING)
_return_docstrings(output_type=TFBaseModelOutputWithCLSToken, config_class=_CONFIG_FOR_DOC)
def call(self, pixel_values: Optional[tf.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[(TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor])]:
if (pixel_values is None):
raise ValueError('You have to specify pixel_values')
outputs = self.cvt(pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
if (not return_dict):
return ((outputs[0],) + outputs[1:])
return TFBaseModelOutputWithCLSToken(last_hidden_state=outputs.last_hidden_state, cls_token_value=outputs.cls_token_value, hidden_states=outputs.hidden_states)
def serving_output(self, output: TFBaseModelOutputWithCLSToken) -> TFBaseModelOutputWithCLSToken:
return TFBaseModelOutputWithCLSToken(last_hidden_state=output.last_hidden_state, cls_token_value=output.cls_token_value, hidden_states=output.hidden_states) |
def abandonedShoppingCarts(df, DYNAMIC_CAT_CODE, ORDER_CAT_CODE):
filtered_df = df[((df['wp_type_codes'] == ORDER_CAT_CODE) | (df['wp_type_codes'] == DYNAMIC_CAT_CODE))]
filtered_df['wp_type_codes'] = filtered_df['tstamp_inSec'].astype('string').str.cat(filtered_df['wp_type_codes'].astype('string'), sep=':')
filtered_df = filtered_df.groupby(['wcs_user_sk', 'session_id'], as_index=False, sort=False).agg({'wp_type_codes': 'max'})
last_dynamic_df = filtered_df[filtered_df['wp_type_codes'].str.contains((':' + str(DYNAMIC_CAT_CODE)), regex=False)]
del filtered_df
grouped_count_df = df.groupby(['wcs_user_sk', 'session_id'], as_index=False, sort=False).agg({'tstamp_inSec': 'count'})
result = last_dynamic_df.merge(grouped_count_df, on=['wcs_user_sk', 'session_id'], how='inner')
del (last_dynamic_df, grouped_count_df)
if isinstance(df, cudf.DataFrame):
return cudf.DataFrame({'pagecount': result.tstamp_inSec.sum(), 'count': len(result)})
else:
return pd.DataFrame({'pagecount': result.tstamp_inSec.sum(), 'count': [len(result)]}) |
class TransformerClassifier(nn.Module):
def __init__(self, encoder, generator=None, mpc=False, **kwargs):
super().__init__()
self.encoder = encoder
self.generator = generator
self.num_classes = self.generator[0].linear.weight.size(0)
self.mpc = mpc
if mpc:
input_size = self.encoder.opt.input_size
model_size = self.encoder.opt.model_size
self.mpc_linear = nn.Linear(model_size, input_size)
if (self.encoder.input_type == 'text'):
self.src_vocab_size = self.encoder.word_lut.weight.size(0)
else:
self.src_vocab_size = 0
def forward(self, batch, *args, **kwargs):
if (self.mpc and self.training):
batch.mask_mpc(p=0.2)
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
src_lengths = batch.src_lengths
src = src.transpose(0, 1)
encoder_output = self.encoder(src, input_pos=src_pos, input_lang=src_lang, src_lengths=src_lengths)
encoder_output = defaultdict((lambda : None), encoder_output)
context = encoder_output['context']
output_dict = defaultdict((lambda : None))
output_dict['hidden'] = context
output_dict['context'] = context
output_dict['src_mask'] = encoder_output['src_mask']
output_dict['src'] = src
output_dict['target_mask'] = encoder_output['src_mask']
logprobs = self.generator[0](output_dict)['logits']
output_dict['logprobs'] = logprobs
if self.mpc:
mpc_rec = self.mpc_linear(context)
output_dict['mpc'] = mpc_rec
output_dict['masked_positions'] = batch.get('masked_positions')
output_dict['original_source'] = batch.get('original_source')
return output_dict
def encode(self, batch):
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
src_lengths = batch.src_lengths
src = src.transpose(0, 1)
encoder_output = self.encoder(src, input_pos=src_pos, input_lang=src_lang, src_lengths=src_lengths, return_states=True)
layer_states = encoder_output['layer_states']
return layer_states |
class StatusBarTestCases(unittest.TestCase):
def setUp(self):
Timings.fast()
app = Application()
app.start(os.path.join(controlspy_folder, 'Status bar.exe'))
self.texts = ['Long text', '', 'Status Bar']
self.part_rects = [RECT(0, 2, 65, 22), RECT(67, 2, 90, 22), RECT(92, 2, 261, 22)]
self.app = app
self.dlg = app.MicrosoftControlSpy
self.ctrl = app.MicrosoftControlSpy.StatusBar.find()
def tearDown(self):
self.dlg.send_message(win32defines.WM_CLOSE)
def test_friendly_class_name(self):
self.assertEqual(self.ctrl.friendly_class_name(), 'StatusBar')
def test_texts(self):
self.assertEqual(self.ctrl.texts()[1:], self.texts)
def testGetProperties(self):
props = self.ctrl.get_properties()
self.assertEqual(self.ctrl.friendly_class_name(), props['friendly_class_name'])
self.assertEqual(self.ctrl.texts(), props['texts'])
for prop_name in props:
self.assertEqual(getattr(self.ctrl, prop_name)(), props[prop_name])
def testBorderWidths(self):
self.assertEqual(self.ctrl.border_widths(), dict(Horizontal=0, Vertical=2, Inter=2))
def testPartCount(self):
self.assertEqual(self.ctrl.part_count(), 3)
def testPartRightEdges(self):
for i in range(0, (self.ctrl.part_count() - 1)):
self.assertEqual(self.ctrl.part_right_edges()[i], self.part_rects[i].right)
self.assertEqual(self.ctrl.part_right_edges()[(i + 1)], (- 1))
def testGetPartRect(self):
for i in range(0, self.ctrl.part_count()):
part_rect = self.ctrl.get_part_rect(i)
self.assertEqual(part_rect.left, self.part_rects[i].left)
if (i != (self.ctrl.part_count() - 1)):
self.assertEqual(part_rect.right, self.part_rects[i].right)
self.assertEqual(part_rect.top, self.part_rects[i].top)
self.assertFalse((abs((part_rect.bottom - self.part_rects[i].bottom)) > 2))
self.assertRaises(IndexError, self.ctrl.get_part_rect, 99)
def testClientRects(self):
self.assertEqual(self.ctrl.client_rect(), self.ctrl.client_rects()[0])
client_rects = self.ctrl.client_rects()[1:]
for (i, client_rect) in enumerate(client_rects):
self.assertEqual(self.part_rects[i].left, client_rect.left)
if (i != (len(client_rects) - 1)):
self.assertEqual(self.part_rects[i].right, client_rect.right)
self.assertEqual(self.part_rects[i].top, client_rect.top)
self.assertFalse((abs((self.part_rects[i].bottom - client_rect.bottom)) > 2))
def testGetPartText(self):
self.assertRaises(IndexError, self.ctrl.get_part_text, 99)
for (i, text) in enumerate(self.texts):
self.assertEqual(text, self.ctrl.get_part_text(i)) |
def test_find_files_stop_at_root_hg(wd: WorkDir, monkeypatch: pytest.MonkeyPatch) -> None:
wd.commit_testfile()
project = (wd.cwd / 'project')
project.mkdir()
project.joinpath('setup.cfg').touch()
assert (setuptools_scm._file_finders.find_files(str(project)) == [])
wd.add_and_commit()
monkeypatch.chdir(project)
assert (setuptools_scm._file_finders.find_files() == ['setup.cfg']) |
class Screen(metaclass=ImmutableStruct):
_names = ['width', 'height', 'size', 'aspect']
width = config.size[0]
height = config.size[1]
size = Vector2(config.size)
aspect = (config.size[0] / config.size[1])
def _edit(cls, width, height):
cls._set('width', width)
cls._set('height', height)
cls._set('size', Vector2(width, height))
cls._set('aspect', (width / height)) |
class UtilTestCase(unittest.TestCase):
def setUpClass(cls):
cls.tempdir = tempfile.mkdtemp()
def tearDownClass(cls):
shutil.rmtree(cls.tempdir)
def fpath(self, fn):
return os.path.join(self.tempdir, fn)
def testTime(self):
for (fmt, accu) in zip(['%Y-%m-%d %H:%M:%S.3FRAC', '%Y-%m-%d %H:%M:%S.2FRAC', '%Y-%m-%d %H:%M:%S.1FRAC', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H.%M.%S.3FRAC'], [0.001, 0.01, 0.1, 1.0, 0.001, 0.001]):
(ta, tb, _, _) = util.working_system_time_range()
for i in range(10000):
t1 = (ta + (random() * (tb - ta)))
s = util.time_to_str(t1, format=fmt)
t2 = util.str_to_time(s, format=fmt)
assert (abs((t1 - t2)) < accu)
fmt_opt = (re.sub('\\.[0-9]FRAC$', '', fmt) + '.OPTFRAC')
t3 = util.str_to_time(s, format=fmt_opt)
assert (abs((t1 - t3)) < accu)
def testTimeType(self):
t = util.str_to_time('2020-01-01 10:10:10')
util.check_time_class(t)
util.check_time_class(0.0)
def testTimeTypeError(self):
with self.assertRaises(util.TimestampTypeError):
if (util.get_time_float() is float):
try:
v = util.hpfloat(1.0)
except util.HPFloatUnavailable:
raise unittest.SkipTest('platform does not support hpfloat')
else:
v = 1.0
util.check_time_class(v)
def testTimeRange(self):
(tmin, tmax) = util.get_working_system_time_range()[:2]
(stmin, stmax) = map(util.time_to_str, (tmin, tmax))
(tmin2, tmax2) = map(util.str_to_time, (stmin, stmax))
assert (tmin == tmin2)
assert (tmax == tmax2)
if (sys.maxsize > (2 ** 32)):
assert ((tmax - tmin) > ((((200 * 365) * 24) * 60) * 60))
def testBigTime(self):
(ymin, ymax) = util.get_working_system_time_range()[2:]
s = '2500-01-01 00:00:00.000'
if (ymin <= 2500 <= ymax):
tx = util.str_to_time(s)
assert (s == util.time_to_str(tx))
else:
with self.assertRaises(util.TimeStrError):
util.str_to_time(s)
def testIterTimes(self):
tmin = util.str_to_time('1999-03-20 20:10:10')
tmax = util.str_to_time('2001-05-20 10:00:05')
ii = 0
for (ymin, ymax) in util.iter_years(tmin, tmax):
for (mmin, mmax) in util.iter_months(ymin, ymax):
ii += 1
s1 = util.time_to_str(mmin)
s2 = util.time_to_str(mmax)
assert (ii == (12 * 3))
assert (s1 == '2001-12-01 00:00:00.000')
assert (s2 == '2002-01-01 00:00:00.000')
def testTimeError(self):
ok = False
try:
util.str_to_time('abc')
except util.TimeStrError:
ok = True
assert ok
def benchmark_stt_tts(self):
for x in range(2):
if (x == 1):
util.util_ext = None
t = util.str_to_time('1999-03-20 20:10:10')
tt1 = time.time()
for i in range(10000):
s = util.tts(t)
util.stt(s)
tt2 = time.time()
print((tt2 - tt1))
def test_consistency_merge(self):
data = [('a', 1, 2, 3.0), ('a', 2, 2, 3.0), ('a', 1, 2, 3.0)]
merged = util.consistency_merge(data, error='ignore')
assert (merged == (1, 2, 3.0))
def test_leap_seconds(self):
from_sys = {}
if (platform.system() not in ('Darwin', 'Windows')):
for (t, n) in util.read_leap_seconds():
from_sys[t] = n
for (t, n) in util.read_leap_seconds2():
if (t in from_sys):
assert (from_sys[t] == n)
def test_gps_utc_offset(self):
for t_utc_0 in [x[0] for x in util.read_leap_seconds2()]:
t_utc_0 = util.to_time_float(t_utc_0)
ts_utc = num.linspace((t_utc_0 - 2.0), (t_utc_0 + 2.0), 17)
for t_utc in ts_utc:
t_gps = (t_utc + util.gps_utc_offset(t_utc))
t_utc2 = (t_gps + util.utc_gps_offset(t_gps))
self.assertEqual(util.tts(t_utc), util.tts(t_utc2))
ts_gps = num.linspace((ts_utc[0] + util.gps_utc_offset(ts_utc[0])), (ts_utc[(- 1)] + util.gps_utc_offset(ts_utc[(- 1)])), (17 + 4))
t_utc_wrapped = []
for t_gps in ts_gps:
t_utc = (t_gps + util.utc_gps_offset(t_gps))
t_utc_wrapped.append((t_utc - t_utc_0))
num.testing.assert_almost_equal(t_utc_wrapped, num.concatenate((num.linspace((- 2.0), 0.75, 12), num.linspace(0.0, 2.0, 9))))
def test_plf_integration(self):
import numpy as num
x = num.array([1.0, 1.0, 3.0, 3.0])
y = num.array([0.0, 1.0, 1.0, 0.0])
x_edges = num.array([0.5, 1.5, 2.5, 3.5])
yy = util.plf_integrate_piecewise(x_edges, x, y)
assert num.all((num.abs((yy - num.array([0.5, 1.0, 0.5]))) < 1e-06))
x = num.array([0.0, 1.0, 2.0, 3.0])
y = num.array([0.0, 1.0, 1.0, 0.0])
x_edges = num.array([0.0, 1.0, 2.0, 3.0])
yy = util.plf_integrate_piecewise(x_edges, x, y)
assert num.all((num.abs((yy - num.array([0.5, 1.0, 0.5]))) < 1e-06))
def test_arange2(self):
num.testing.assert_almost_equal(util.arange2(0.0, 1.0, 0.1), num.linspace(0.0, 1.0, 11))
with self.assertRaises(util.ArangeError):
util.arange2(0.0, 1.05, 0.1)
num.testing.assert_almost_equal(util.arange2(0.0, 1.04, 0.1, error='round'), num.linspace(0.0, 1.0, 11))
num.testing.assert_almost_equal(util.arange2(0.0, 1.05, 0.1, error='floor'), num.linspace(0.0, 1.0, 11))
num.testing.assert_almost_equal(util.arange2(0.0, 1.05, 0.1, error='ceil'), num.linspace(0.0, 1.1, 12))
def test_gform(self):
s = ''
for i in range((- 11), 12):
v = ((1 / 3.0) * (10 ** i))
s += ('|%s|\n' % util.gform(v))
self.assertEqual(s.strip(), '\n| 3.33E-12 |\n| 3.33E-11 |\n| 3.33E-10 |\n| 3.33E-09 |\n| 3.33E-08 |\n| 3.33E-07 |\n| 3.33E-06 |\n| 3.33E-05 |\n| 3.33E-04 |\n| 3.33E-03 |\n| 3.33E-02 |\n| 0.333 |\n| 3.33 |\n| 33.3 |\n| 333. |\n| 3.33E+03 |\n| 3.33E+04 |\n| 3.33E+05 |\n| 3.33E+06 |\n| 3.33E+07 |\n| 3.33E+08 |\n| 3.33E+09 |\n| 3.33E+10 |'.strip())
def test_download(self):
fn = self.fpath('responses.xml')
url = '
stat = []
def status(d):
stat.append(d)
util.download_file(url, fn, status_callback=status)
url = '
dn = self.fpath('my_test_dir')
util.download_dir(url, dn, status_callback=status)
d = stat[(- 1)]
dwant = {'ntotal_files': 4, 'nread_files': 4, 'ntotal_bytes_all_files': 22, 'nread_bytes_all_files': 22, 'ntotal_bytes_current_file': 8, 'nread_bytes_current_file': 8}
for k in dwant:
assert (k in d)
assert (d[k] == dwant[k])
def test_escape(self):
def random_word():
return ''.join([choice('\\\'" ') for _ in range(randint(0, 8))])
for i in range(100):
s1 = random_word()
se = util.escape_s(s1)
s2 = util.unescape_s(se)
assert (s1 == s2)
se = util.escape_d(s1)
s2 = util.unescape_d(se)
assert (s1 == s2)
def test_qsplit(self):
def random_word():
return ''.join([choice(' abc\\"\'\t\n,[].') for _ in range(randint(0, 10))])
def random_line():
return [random_word() for _ in range(randint(0, 10))]
for sep in (None, ',', '.', '\n', '\t', '[', ']', ' '):
for i in range(100):
line_in = random_line()
for qj in (util.qjoin_s, util.qjoin_d):
s = qj(line_in, sep)
line_out = util.qsplit(s, sep)
assert (line_in == line_out)
def test_qsplit_empty(self):
for sep in (',', '.', '\n', '\t', '[', ']', ' '):
for n in (0, 1, 2, 3):
for qj in (util.qjoin_s, util.qjoin_d):
s = qj(([''] * n), sep)
line = util.qsplit(s, sep)
assert (line == ([''] * n))
s = qj(([' '] * n), sep)
line = util.qsplit(s, sep)
assert (line == ([' '] * n))
def test_lockfile(self):
fn = self.fpath('my_lock')
with util.Lockfile(fn):
with self.assertRaises(util.Timeout):
with util.Lockfile(fn, timeout=0.5, timewarn=0.1):
pass
with util.Lockfile(fn):
with self.assertRaises(util.Timeout):
with util.Lockfile(fn, timeout=0.5, timewarn=0.1):
pass
def test_threadpoolctl_or_dummy(self):
threadpool_limits = util.get_threadpool_limits()
with threadpool_limits(limits=1, user_api='blas'):
pass
def test_short_to_list(self):
for n in range(20):
it = util.short_to_list(10, iter(range(n)))
if (n > 10):
assert (not isinstance(it, list))
else:
assert isinstance(it, list)
assert (list(it) == list(range(n))) |
def test_ashrae():
thetas = np.array([(- 90.0), (- 67.5), (- 45.0), (- 22.5), 0.0, 22.5, 45.0, 67.5, 89.0, 90.0, np.nan])
expected = np.array([0, 0.9193437, 0., 0., 1.0, 0., 0., 0.9193437, 0, 0, np.nan])
iam = _iam.ashrae(thetas, 0.05)
assert_allclose(iam, expected, equal_nan=True)
iam_series = _iam.ashrae(pd.Series(thetas))
assert_series_equal(iam_series, pd.Series(expected)) |
def convert_weights(layer, weights):
if (layer.__class__.__name__ == 'GRU'):
W = [np.split(w, 3, axis=(- 1)) for w in weights]
return sum(map(list, zip(*W)), [])
elif (layer.__class__.__name__ in ('LSTM', 'ConvLSTM2D')):
W = [np.split(w, 4, axis=(- 1)) for w in weights]
for w in W:
(w[2], w[1]) = (w[1], w[2])
return sum(map(list, zip(*W)), [])
elif (layer.__class__.__name__ == 'Conv2DTranspose'):
return [np.transpose(weights[0], (2, 3, 0, 1)), weights[1]]
return weights |
class SponsorshipAssetsAPIListTests(APITestCase):
def setUp(self):
self.user = baker.make('users.User')
token = Token.objects.get(user=self.user)
self.permission = Permission.objects.get(name='Can access sponsor placement API')
self.user.user_permissions.add(self.permission)
self.authorization = f'Token {token.key}'
self.internal_name = 'txt_assets'
self.url = (reverse_lazy('assets_list') + f'?internal_name={self.internal_name}')
self.sponsorship = baker.make(Sponsorship, sponsor__name='Sponsor 1')
self.sponsor = baker.make(Sponsor, name='Sponsor 2')
self.txt_asset = TextAsset.objects.create(internal_name=self.internal_name, uuid=uuid.uuid4(), content_object=self.sponsorship)
self.img_asset = ImgAsset.objects.create(internal_name='img_assets', uuid=uuid.uuid4(), content_object=self.sponsor)
def tearDown(self):
if self.img_asset.has_value:
self.img_asset.value.delete()
def test_invalid_token(self):
Token.objects.all().delete()
response = self.client.get(self.url, HTTP_AUTHORIZATION=self.authorization)
self.assertEqual(401, response.status_code)
def test_superuser_user_have_permission_by_default(self):
self.user.user_permissions.remove(self.permission)
self.user.is_superuser = True
self.user.is_staff = True
self.user.save()
response = self.client.get(self.url, HTTP_AUTHORIZATION=self.authorization)
self.assertEqual(200, response.status_code)
def test_staff_have_permission_by_default(self):
self.user.user_permissions.remove(self.permission)
self.user.is_staff = True
self.user.save()
response = self.client.get(self.url, HTTP_AUTHORIZATION=self.authorization)
self.assertEqual(200, response.status_code)
def test_user_must_have_required_permission(self):
self.user.user_permissions.remove(self.permission)
response = self.client.get(self.url, HTTP_AUTHORIZATION=self.authorization)
self.assertEqual(403, response.status_code)
def test_bad_request_if_no_internal_name(self):
url = reverse_lazy('assets_list')
response = self.client.get(url, HTTP_AUTHORIZATION=self.authorization)
self.assertEqual(400, response.status_code)
self.assertIn('internal_name', response.json())
def test_list_assets_by_internal_name(self):
response = self.client.get(self.url, HTTP_AUTHORIZATION=self.authorization)
data = response.json()
self.assertEqual(200, response.status_code)
self.assertEqual(0, len(data))
self.txt_asset.value = 'Text Content'
self.txt_asset.save()
response = self.client.get(self.url, HTTP_AUTHORIZATION=self.authorization)
data = response.json()
self.assertEqual(1, len(data))
self.assertEqual(data[0]['internal_name'], self.internal_name)
self.assertEqual(data[0]['uuid'], str(self.txt_asset.uuid))
self.assertEqual(data[0]['value'], 'Text Content')
self.assertEqual(data[0]['content_type'], 'Sponsorship')
self.assertEqual(data[0]['sponsor'], 'Sponsor 1')
self.assertEqual(data[0]['sponsor_slug'], 'sponsor-1')
def test_enable_to_filter_by_assets_with_no_value_via_querystring(self):
self.url += '&list_empty=true'
response = self.client.get(self.url, HTTP_AUTHORIZATION=self.authorization)
data = response.json()
self.assertEqual(1, len(data))
self.assertEqual(data[0]['uuid'], str(self.txt_asset.uuid))
self.assertEqual(data[0]['value'], '')
self.assertEqual(data[0]['sponsor'], 'Sponsor 1')
self.assertEqual(data[0]['sponsor_slug'], 'sponsor-1')
def test_serialize_img_value_as_url_to_image(self):
self.img_asset.value = SimpleUploadedFile(name='test_image.jpg', content=b'content', content_type='image/jpeg')
self.img_asset.save()
url = (reverse_lazy('assets_list') + f'?internal_name={self.img_asset.internal_name}')
response = self.client.get(url, HTTP_AUTHORIZATION=self.authorization)
data = response.json()
self.assertEqual(1, len(data))
self.assertEqual(data[0]['uuid'], str(self.img_asset.uuid))
self.assertEqual(data[0]['value'], self.img_asset.value.url)
self.assertEqual(data[0]['sponsor'], 'Sponsor 2')
self.assertEqual(data[0]['sponsor_slug'], 'sponsor-2') |
.slow
def test_api_with_venv(tmpfolder):
venv_path = (Path(tmpfolder) / 'proj/.venv')
assert (not venv_path.exists())
api.create_project(project_path='proj', extensions=[venv.Venv()], venv_install=['pytest>=6.0.0'])
assert venv_path.is_dir()
assert list(venv_path.glob('*/python*'))
assert list(venv_path.glob('*/pip*'))
assert list(venv_path.glob('*/pytest*')) |
class Splat2DFunction(ag.Function):
def forward(ctx, input, coordinates, values, sigma, soft_normalize=False):
_splat = _import_splat()
assert (('FloatTensor' in coordinates.type()) and ('FloatTensor' in values.type())), 'Splat2D only takes float coordinates and values, got {} and {} instead.'.format(coordinates.type(), values.type())
assert ((coordinates.size(0) == values.size(0)) and (coordinates.size(1) == values.size(1))), 'coordinates should be size (N, num_points, 2) and values should be size (N, num_points, *), got {} and {} instead.'.format(coordinates.shape, values.shape)
assert ((input.size(0) == coordinates.size(0)) and (input.dim() == 4)), 'input should be of size (N, *, H, W), got {} instead'.format(input.shape)
assert (sigma.size(0) == input.size(0)), 'sigma should be a tensor of size (N,)'
input = input.contiguous()
coordinates = coordinates.contiguous()
values = values.contiguous()
sigma = sigma.contiguous()
if coordinates.is_cuda:
output = _splat.splat_forward_cuda(input, coordinates, values, sigma, soft_normalize)
ctx.params = ()
else:
raise NotImplementedError('Splat2D currently only has support for GPU (cuda).')
return output
def backward(ctx, grad_output):
raise NotImplementedError |
.parametrize('n, initial', [(3, (1, [0, 1])), (3, (1, [0, 1])), (5, (1, [0, 3, 4])), (6, (1, [0, 1, 2, 3])), (7, (1, [0, 1, 5, 6])), (9, (1, [2, 4, 6]))])
def test_ffft_multi_fermionic_mode_non_power_of_2(n, initial):
initial_state = _multi_fermionic_mode_base_state(n, *initial)
expected_state = _fourier_transform_multi_fermionic_mode(n, *initial)
qubits = LineQubit.range(n)
sim = cirq.Simulator(dtype=np.complex128)
circuit = cirq.Circuit(ffft(qubits), strategy=cirq.InsertStrategy.EARLIEST)
state = sim.simulate(circuit, initial_state=initial_state, qubit_order=qubits).final_state_vector
cirq.testing.assert_allclose_up_to_global_phase(state, expected_state, atol=1e-08) |
_attr(allow_interpreted_subclasses=True)
class TraverserVisitor(NodeVisitor[None]):
def __init__(self) -> None:
pass
def visit_mypy_file(self, o: MypyFile) -> None:
for d in o.defs:
d.accept(self)
def visit_block(self, block: Block) -> None:
for s in block.body:
s.accept(self)
def visit_func(self, o: FuncItem) -> None:
if (o.arguments is not None):
for arg in o.arguments:
init = arg.initializer
if (init is not None):
init.accept(self)
for arg in o.arguments:
self.visit_var(arg.variable)
o.body.accept(self)
def visit_func_def(self, o: FuncDef) -> None:
self.visit_func(o)
def visit_overloaded_func_def(self, o: OverloadedFuncDef) -> None:
for item in o.items:
item.accept(self)
if o.impl:
o.impl.accept(self)
def visit_class_def(self, o: ClassDef) -> None:
for d in o.decorators:
d.accept(self)
for base in o.base_type_exprs:
base.accept(self)
if o.metaclass:
o.metaclass.accept(self)
for v in o.keywords.values():
v.accept(self)
o.defs.accept(self)
if o.analyzed:
o.analyzed.accept(self)
def visit_decorator(self, o: Decorator) -> None:
o.func.accept(self)
o.var.accept(self)
for decorator in o.decorators:
decorator.accept(self)
def visit_expression_stmt(self, o: ExpressionStmt) -> None:
o.expr.accept(self)
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
o.rvalue.accept(self)
for l in o.lvalues:
l.accept(self)
def visit_operator_assignment_stmt(self, o: OperatorAssignmentStmt) -> None:
o.rvalue.accept(self)
o.lvalue.accept(self)
def visit_while_stmt(self, o: WhileStmt) -> None:
o.expr.accept(self)
o.body.accept(self)
if o.else_body:
o.else_body.accept(self)
def visit_for_stmt(self, o: ForStmt) -> None:
o.index.accept(self)
o.expr.accept(self)
o.body.accept(self)
if o.else_body:
o.else_body.accept(self)
def visit_return_stmt(self, o: ReturnStmt) -> None:
if (o.expr is not None):
o.expr.accept(self)
def visit_assert_stmt(self, o: AssertStmt) -> None:
if (o.expr is not None):
o.expr.accept(self)
if (o.msg is not None):
o.msg.accept(self)
def visit_del_stmt(self, o: DelStmt) -> None:
if (o.expr is not None):
o.expr.accept(self)
def visit_if_stmt(self, o: IfStmt) -> None:
for e in o.expr:
e.accept(self)
for b in o.body:
b.accept(self)
if o.else_body:
o.else_body.accept(self)
def visit_raise_stmt(self, o: RaiseStmt) -> None:
if (o.expr is not None):
o.expr.accept(self)
if (o.from_expr is not None):
o.from_expr.accept(self)
def visit_try_stmt(self, o: TryStmt) -> None:
o.body.accept(self)
for i in range(len(o.types)):
tp = o.types[i]
if (tp is not None):
tp.accept(self)
o.handlers[i].accept(self)
for v in o.vars:
if (v is not None):
v.accept(self)
if (o.else_body is not None):
o.else_body.accept(self)
if (o.finally_body is not None):
o.finally_body.accept(self)
def visit_with_stmt(self, o: WithStmt) -> None:
for i in range(len(o.expr)):
o.expr[i].accept(self)
targ = o.target[i]
if (targ is not None):
targ.accept(self)
o.body.accept(self)
def visit_match_stmt(self, o: MatchStmt) -> None:
o.subject.accept(self)
for i in range(len(o.patterns)):
o.patterns[i].accept(self)
guard = o.guards[i]
if (guard is not None):
guard.accept(self)
o.bodies[i].accept(self)
def visit_member_expr(self, o: MemberExpr) -> None:
o.expr.accept(self)
def visit_yield_from_expr(self, o: YieldFromExpr) -> None:
o.expr.accept(self)
def visit_yield_expr(self, o: YieldExpr) -> None:
if o.expr:
o.expr.accept(self)
def visit_call_expr(self, o: CallExpr) -> None:
o.callee.accept(self)
for a in o.args:
a.accept(self)
if o.analyzed:
o.analyzed.accept(self)
def visit_op_expr(self, o: OpExpr) -> None:
o.left.accept(self)
o.right.accept(self)
if (o.analyzed is not None):
o.analyzed.accept(self)
def visit_comparison_expr(self, o: ComparisonExpr) -> None:
for operand in o.operands:
operand.accept(self)
def visit_slice_expr(self, o: SliceExpr) -> None:
if (o.begin_index is not None):
o.begin_index.accept(self)
if (o.end_index is not None):
o.end_index.accept(self)
if (o.stride is not None):
o.stride.accept(self)
def visit_cast_expr(self, o: CastExpr) -> None:
o.expr.accept(self)
def visit_assert_type_expr(self, o: AssertTypeExpr) -> None:
o.expr.accept(self)
def visit_reveal_expr(self, o: RevealExpr) -> None:
if (o.kind == REVEAL_TYPE):
assert (o.expr is not None)
o.expr.accept(self)
else:
pass
def visit_assignment_expr(self, o: AssignmentExpr) -> None:
o.target.accept(self)
o.value.accept(self)
def visit_unary_expr(self, o: UnaryExpr) -> None:
o.expr.accept(self)
def visit_list_expr(self, o: ListExpr) -> None:
for item in o.items:
item.accept(self)
def visit_tuple_expr(self, o: TupleExpr) -> None:
for item in o.items:
item.accept(self)
def visit_dict_expr(self, o: DictExpr) -> None:
for (k, v) in o.items:
if (k is not None):
k.accept(self)
v.accept(self)
def visit_set_expr(self, o: SetExpr) -> None:
for item in o.items:
item.accept(self)
def visit_index_expr(self, o: IndexExpr) -> None:
o.base.accept(self)
o.index.accept(self)
if o.analyzed:
o.analyzed.accept(self)
def visit_generator_expr(self, o: GeneratorExpr) -> None:
for (index, sequence, conditions) in zip(o.indices, o.sequences, o.condlists):
sequence.accept(self)
index.accept(self)
for cond in conditions:
cond.accept(self)
o.left_expr.accept(self)
def visit_dictionary_comprehension(self, o: DictionaryComprehension) -> None:
for (index, sequence, conditions) in zip(o.indices, o.sequences, o.condlists):
sequence.accept(self)
index.accept(self)
for cond in conditions:
cond.accept(self)
o.key.accept(self)
o.value.accept(self)
def visit_list_comprehension(self, o: ListComprehension) -> None:
o.generator.accept(self)
def visit_set_comprehension(self, o: SetComprehension) -> None:
o.generator.accept(self)
def visit_conditional_expr(self, o: ConditionalExpr) -> None:
o.cond.accept(self)
o.if_expr.accept(self)
o.else_expr.accept(self)
def visit_type_application(self, o: TypeApplication) -> None:
o.expr.accept(self)
def visit_lambda_expr(self, o: LambdaExpr) -> None:
self.visit_func(o)
def visit_star_expr(self, o: StarExpr) -> None:
o.expr.accept(self)
def visit_await_expr(self, o: AwaitExpr) -> None:
o.expr.accept(self)
def visit_super_expr(self, o: SuperExpr) -> None:
o.call.accept(self)
def visit_as_pattern(self, o: AsPattern) -> None:
if (o.pattern is not None):
o.pattern.accept(self)
if (o.name is not None):
o.name.accept(self)
def visit_or_pattern(self, o: OrPattern) -> None:
for p in o.patterns:
p.accept(self)
def visit_value_pattern(self, o: ValuePattern) -> None:
o.expr.accept(self)
def visit_sequence_pattern(self, o: SequencePattern) -> None:
for p in o.patterns:
p.accept(self)
def visit_starred_pattern(self, o: StarredPattern) -> None:
if (o.capture is not None):
o.capture.accept(self)
def visit_mapping_pattern(self, o: MappingPattern) -> None:
for key in o.keys:
key.accept(self)
for value in o.values:
value.accept(self)
if (o.rest is not None):
o.rest.accept(self)
def visit_class_pattern(self, o: ClassPattern) -> None:
o.class_ref.accept(self)
for p in o.positionals:
p.accept(self)
for v in o.keyword_values:
v.accept(self)
def visit_import(self, o: Import) -> None:
for a in o.assignments:
a.accept(self)
def visit_import_from(self, o: ImportFrom) -> None:
for a in o.assignments:
a.accept(self) |
.requires_internet
def test_post_install_commands(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
helpers.update_project_environment(project, 'default', {'post-install-commands': ['python -c "with open(\'test.txt\', \'w\') as f: f.write(\'content\')"'], **project.config.envs['default']})
helpers.update_project_environment(project, 'test', {})
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('env', 'create', 'test')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent('\n Creating environment: test\n Installing project in development mode\n Running post-installation commands\n Checking dependencies\n '))
assert (project_path / 'test.txt').is_file() |
def main(args):
if args.use_pasd_light:
from models.pasd_light.unet_2d_condition import UNet2DConditionModel
from models.pasd_light.controlnet import ControlNetModel
else:
from models.pasd.unet_2d_condition import UNet2DConditionModel
from models.pasd.controlnet import ControlNetModel
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, logging_dir=logging_dir, project_config=accelerator_project_config)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
if (args.seed is not None):
set_seed(args.seed)
if accelerator.is_main_process:
if (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(repo_id=(args.hub_model_id or Path(args.output_dir).name), exist_ok=True, token=args.hub_token).repo_id
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
elif args.pretrained_model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer', revision=args.revision, use_fast=False)
text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder='scheduler')
text_encoder = text_encoder_cls.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder', revision=args.revision)
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae', revision=args.revision)
unet = UNet2DConditionModel.from_pretrained_orig(args.pretrained_model_name_or_path, subfolder='unet', revision=args.revision)
if args.controlnet_model_name_or_path:
logger.info('Loading existing controlnet weights')
controlnet = ControlNetModel.from_pretrained(args.controlnet_model_name_or_path)
else:
logger.info('Initializing controlnet weights from unet')
controlnet = ControlNetModel.from_unet(unet)
if (version.parse(accelerate.__version__) >= version.parse('0.16.0')):
def save_model_hook(models, weights, output_dir):
i = (len(weights) - 1)
assert ((len(models) == 2) and (len(weights) == 2))
for (i, model) in enumerate(models):
sub_dir = ('unet' if isinstance(model, UNet2DConditionModel) else 'controlnet')
model.save_pretrained(os.path.join(output_dir, sub_dir))
weights.pop()
def load_model_hook(models, input_dir):
assert (len(models) == 2)
for i in range(len(models)):
model = models.pop()
if (not isinstance(model, UNet2DConditionModel)):
load_model = ControlNetModel.from_pretrained(input_dir, subfolder='controlnet')
else:
load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder='unet')
model.register_to_config(**load_model.config)
model.load_state_dict(load_model.state_dict())
del load_model
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
vae.requires_grad_(False)
unet.requires_grad_(False)
text_encoder.requires_grad_(False)
controlnet.train()
for (name, module) in unet.named_modules():
if name.endswith(tuple(args.trainable_modules)):
print(name)
for params in module.parameters():
params.requires_grad = True
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if (xformers_version == version.parse('0.0.16')):
logger.warn('xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See for more details.')
unet.enable_xformers_memory_efficient_attention()
controlnet.enable_xformers_memory_efficient_attention()
else:
raise ValueError('xformers is not available. Make sure it is installed correctly')
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
controlnet.enable_gradient_checkpointing()
low_precision_error_string = ' Please make sure to always have all model weights in full float32 precision when starting training - even if doing mixed precision training, copy of the weights should still be float32.'
if (accelerator.unwrap_model(controlnet).dtype != torch.float32):
raise ValueError(f'Controlnet loaded as datatype {accelerator.unwrap_model(controlnet).dtype}. {low_precision_error_string}')
if (accelerator.unwrap_model(unet).dtype != torch.float32):
raise ValueError(f'Controlnet loaded as datatype {accelerator.unwrap_model(unet).dtype}. {low_precision_error_string}')
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (((args.learning_rate * args.gradient_accumulation_steps) * args.train_batch_size) * accelerator.num_processes)
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError('To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`.')
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
params_to_optimize = (list(controlnet.parameters()) + list(unet.parameters()))
optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon)
train_dataset = WebImageDataset(image_size=args.resolution, tokenizer=tokenizer, accelerator=accelerator, control_type=args.control_type, null_text_ratio=0.5, resize_bak=True)
train_dataloader = torch.utils.data.DataLoader(train_dataset, num_workers=args.dataloader_num_workers, batch_size=args.train_batch_size, shuffle=False)
overrode_max_train_steps = False
if (not isinstance(train_dataset, WebImageDataset)):
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
overrode_max_train_steps = True
else:
args.max_train_steps =
lr_scheduler = get_scheduler(args.lr_scheduler, optimizer=optimizer, num_warmup_steps=(args.lr_warmup_steps * args.gradient_accumulation_steps), num_training_steps=(args.max_train_steps * args.gradient_accumulation_steps), num_cycles=args.lr_num_cycles, power=args.lr_power)
(unet, controlnet, optimizer, lr_scheduler) = accelerator.prepare(unet, controlnet, optimizer, lr_scheduler)
weight_dtype = torch.float32
if (accelerator.mixed_precision == 'fp16'):
weight_dtype = torch.float16
elif (accelerator.mixed_precision == 'bf16'):
weight_dtype = torch.bfloat16
vae.to(accelerator.device, dtype=weight_dtype)
text_encoder.to(accelerator.device, dtype=weight_dtype)
if (not isinstance(train_dataset, WebImageDataset)):
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if overrode_max_train_steps:
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
else:
num_update_steps_per_epoch = 10000
args.num_train_epochs = 1000
if accelerator.is_main_process:
tracker_config = dict(vars(args))
tracker_config.pop('validation_prompt')
tracker_config.pop('validation_image')
tracker_config.pop('trainable_modules')
accelerator.init_trackers(args.tracker_project_name, config=tracker_config)
total_batch_size = ((args.train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
if (not isinstance(train_dataset, WebImageDataset)):
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num batches each epoch = {len(train_dataloader)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
global_step = 0
first_epoch = 0
if args.resume_from_checkpoint:
if (args.resume_from_checkpoint != 'latest'):
path = os.path.basename(args.resume_from_checkpoint)
else:
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith('checkpoint')]
dirs = sorted(dirs, key=(lambda x: int(x.split('-')[1])))
path = (dirs[(- 1)] if (len(dirs) > 0) else None)
if (path is None):
accelerator.print(f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run.")
args.resume_from_checkpoint = None
initial_global_step = 0
else:
accelerator.print(f'Resuming from checkpoint {path}')
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split('-')[1])
initial_global_step = (global_step * args.gradient_accumulation_steps)
first_epoch = (global_step // num_update_steps_per_epoch)
else:
initial_global_step = 0
progress_bar = tqdm(range(0, args.max_train_steps), initial=initial_global_step, desc='Steps', disable=(not accelerator.is_local_main_process))
for epoch in range(first_epoch, args.num_train_epochs):
for (step, batch) in enumerate(train_dataloader):
with accelerator.accumulate(controlnet), accelerator.accumulate(unet):
pixel_values = batch['pixel_values'].to(accelerator.device, dtype=weight_dtype)
latents = vae.encode(pixel_values).latent_dist.sample()
latents = (latents * vae.config.scaling_factor)
noise = torch.randn_like(latents)
bsz = latents.shape[0]
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
encoder_hidden_states = text_encoder(batch['input_ids'].to(accelerator.device))[0]
controlnet_image = batch['conditioning_pixel_values'].to(accelerator.device, dtype=weight_dtype)
(controlnet_cond_mid, down_block_res_samples, mid_block_res_sample) = controlnet(noisy_latents, timesteps, encoder_hidden_states=encoder_hidden_states, controlnet_cond=controlnet_image, return_dict=False)
model_pred = unet(noisy_latents, timesteps, encoder_hidden_states=encoder_hidden_states, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample).sample
if (noise_scheduler.config.prediction_type == 'epsilon'):
target = noise
elif (noise_scheduler.config.prediction_type == 'v_prediction'):
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(f'Unknown prediction type {noise_scheduler.config.prediction_type}')
loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean')
if (controlnet_cond_mid is not None):
if isinstance(controlnet_cond_mid, list):
for values in controlnet_cond_mid:
loss += F.l1_loss(F.interpolate(pixel_values, size=values.shape[(- 2):], mode='bilinear').float(), values.float(), reduction='mean')
else:
loss += F.l1_loss(pixel_values.float(), controlnet_cond_mid.float(), reduction='mean')
accelerator.backward(loss)
if accelerator.sync_gradients:
params_to_clip = (list(controlnet.parameters()) + list(unet.parameters()))
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad(set_to_none=args.set_grads_to_none)
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if accelerator.is_main_process:
if ((global_step % args.checkpointing_steps) == 0):
save_path = os.path.join(args.output_dir, f'checkpoint-{global_step}')
accelerator.save_state(save_path)
logger.info(f'Saved state to {save_path}')
if False:
log_validation(vae, text_encoder, tokenizer, unet, controlnet, args, accelerator, weight_dtype, global_step)
logs = {'loss': loss.detach().item(), 'lr': lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if (global_step >= args.max_train_steps):
break
accelerator.wait_for_everyone()
if accelerator.is_main_process:
controlnet = accelerator.unwrap_model(controlnet)
controlnet.save_pretrained(args.output_dir)
unet = accelerator.unwrap_model(unet)
unet.save_pretrained(args.output_dir)
if args.push_to_hub:
upload_folder(repo_id=repo_id, folder_path=args.output_dir, commit_message='End of training', ignore_patterns=['step_*', 'epoch_*'])
accelerator.end_training() |
def fmt_ac_ria(ria, extended_purview=None):
causality = {Direction.CAUSE: ((fmt_mechanism(ria.purview, ria.node_labels) if (extended_purview is None) else fmt_extended_purview(ria.extended_purview, ria.node_labels)), ARROW_LEFT, fmt_mechanism(ria.mechanism, ria.node_labels)), Direction.EFFECT: (fmt_mechanism(ria.mechanism, ria.node_labels), ARROW_RIGHT, (fmt_mechanism(ria.purview, ria.node_labels) if (extended_purview is None) else fmt_extended_purview(ria.extended_purview, ria.node_labels)))}[ria.direction]
causality = ' '.join(causality)
return '{ALPHA} = {alpha} {causality}'.format(ALPHA=ALPHA, alpha=round(ria.alpha, 4), causality=causality) |
def state_bind_checkbox(owner, state, path, widget):
def make_funcs():
def update_state(widget, state):
state.set(path, bool(widget.isChecked()))
def update_widget(state, widget):
widget.blockSignals(True)
widget.setChecked(state.get(path))
widget.blockSignals(False)
return (update_state, update_widget)
(update_state, update_widget) = make_funcs()
state_bind(owner, state, [path], update_state, widget, [widget.toggled], update_widget) |
def test_persist_history_permission_error(hist_file, mocker, capsys):
app = cmd2.Cmd(persistent_history_file=hist_file)
run_cmd(app, 'help')
mock_open = mocker.patch('builtins.open')
mock_open.side_effect = PermissionError
app._persist_history()
(out, err) = capsys.readouterr()
assert (not out)
assert ('Cannot write' in err) |
class Command(BaseCommand):
def handle(self, *args, **options):
try:
git_path = Path(settings.BASE_DIR).parent
os.chdir(git_path)
run('git checkout master -q && git pull -q ')
version_cmd = 'curl -s | grep \'tag_name\' | cut -d : -f2,3 | tr -d \\" | tr -d ,'
version = run(version_cmd, capture_output=True).stdout.decode().strip()
checkout_cmd = f'git checkout {version} -q'
run(checkout_cmd)
run('pip3 install --upgrade -r requirements.txt')
os.chdir(settings.BASE_DIR)
run('python3 manage.py migrate')
run('python3 manage.py collectstatic --no-input')
run('python3 manage.py compilemessages')
except subprocess.CalledProcessError as e:
print(e)
if (e.stdout != None):
print(f'stdout: {e.stdout}')
if (e.stderr != None):
print(f'stderr:{e.stderr}') |
class TestDeprecation(object):
def setup_method(self):
warnings.simplefilter('always', DeprecationWarning)
return
def teardown_method(self):
return
def test_convert_timestamp_to_datetime(self):
warn_msgs = [' '.join(['New kwargs added to `pysat.utils.io.load_netCDF4`', 'for generalized handling, deprecated', 'function will be removed in pysat 3.2.0+'])]
test = pysat.Instrument('pysat', 'testing', use_header=True)
test.load(2009, 1)
with warnings.catch_warnings(record=True) as war:
gen.convert_timestamp_to_datetime(test, epoch_name='uts')
assert (len(war) >= len(warn_msgs))
pysat.utils.testing.eval_warnings(war, warn_msgs)
return |
class MlpGeLUFunctionBLASLT(torch.autograd.Function):
_fwd(cast_inputs=torch.float16)
def forward(ctx, p, *args):
outputs = mlp_gelu_blaslt.forward(p, args)
ctx.save_for_backward(*args)
ctx.outputs = outputs
dropout_mask = outputs[(- 1)]
ctx.p = p
return (outputs[0], dropout_mask)
_bwd
def backward(ctx, *grad_o):
p = ctx.p
grads = mlp_gelu_blaslt.backward(p, grad_o[0], ctx.outputs, ctx.saved_tensors)
del ctx.outputs
return (None, *grads) |
def test_tan_hhduc(fints_client):
with fints_client:
accounts = fints_client.get_sepa_accounts()
a = fints_client.simple_sepa_transfer(accounts[0], 'DE', 'GENODE23X42', 'Test Receiver', Decimal('5.23'), 'Test Sender', 'Test transfer hhduc 2step')
from fints.hhd.flicker import parse
assert (a.challenge == 'Geben Sie den Startcode als TAN an')
flicker = parse(a.challenge_hhduc)
b = fints_client.send_tan(a, flicker.startcode.data)
assert (b.status == ResponseStatus.SUCCESS) |
def window_sorter(win):
patterns = (('.', 'E-mail'), ('Gmail', 'E-mail'), ('SquirrelMail', 'E-mail'), ('zeromq', 'Docs'), ('PyYAML', 'Docs'), ('documentation', 'Docs'), ('-ietf-', 'Docs'), ('GNOME Live!', 'Docs'), ('Guide', 'Docs'))
for (k, v) in patterns:
if (k in win.name):
return v |
class StocktickerArgs(_QtileMigrator):
ID = 'UpdateStocktickerArgs'
SUMMARY = 'Updates ``StockTicker`` argument signature.'
HELP = '\n The ``StockTicker`` widget had a keyword argument called ``function``. This needs to be\n renamed to ``func`` to prevent clashes with the ``function()`` method of ``CommandObject``.\n\n For example:\n\n .. code:: python\n\n widget.StockTicker(function="TIME_SERIES_INTRADAY")\n\n should be changed to:\n\n .. code::\n\n widget.StockTicker(func="TIME_SERIES_INTRADAY")\n\n '
AFTER_VERSION = '0.22.1'
TESTS = [Change('StockTicker(function="TIME_SERIES_INTRADAY")', 'StockTicker(func="TIME_SERIES_INTRADAY")'), Change('widget.StockTicker(function="TIME_SERIES_INTRADAY")', 'widget.StockTicker(func="TIME_SERIES_INTRADAY")'), Change('libqtile.widget.StockTicker(function="TIME_SERIES_INTRADAY")', 'libqtile.widget.StockTicker(func="TIME_SERIES_INTRADAY")'), NoChange('StockTicker(func="TIME_SERIES_INTRADAY")'), NoChange('widget.StockTicker(func="TIME_SERIES_INTRADAY")'), NoChange('libqtile.widget.StockTicker(func="TIME_SERIES_INTRADAY")'), Check('\n import libqtile\n from libqtile import bar, widget\n from libqtile.widget import StockTicker\n\n bar.Bar(\n [\n StockTicker(function="TIME_SERIES_INTRADAY"),\n widget.StockTicker(function="TIME_SERIES_INTRADAY"),\n libqtile.widget.StockTicker(function="TIME_SERIES_INTRADAY")\n ],\n 20\n )\n ', '\n import libqtile\n from libqtile import bar, widget\n from libqtile.widget import StockTicker\n\n bar.Bar(\n [\n StockTicker(func="TIME_SERIES_INTRADAY"),\n widget.StockTicker(func="TIME_SERIES_INTRADAY"),\n libqtile.widget.StockTicker(func="TIME_SERIES_INTRADAY")\n ],\n 20\n )\n ')]
visitor = StocktickerArgsTransformer |
class SomeMinionCL(Component):
def recv(s, msg):
assert (s.entry is None)
s.entry = msg
def recv_rdy(s):
return (s.entry is None)
def read(s, addr):
addr = int(addr)
return s.reg_file[addr]
def write(s, addr, data):
addr = int(addr)
s.reg_file[addr] = data
def construct(s, ReqType, RespType, nregs=16):
s.xcel = XcelMinionIfcCL(ReqType, RespType, req=s.recv, req_rdy=s.recv_rdy)
s.entry = None
s.reg_file = [0 for _ in range(nregs)]
_once
def up_process():
if ((s.entry is not None) and s.xcel.resp.rdy()):
req = s.entry
s.entry = None
if (req.type_ == XcelMsgType.READ):
resp = RespType(req.type_, s.read(req.addr))
elif (req.type_ == XcelMsgType.WRITE):
s.write(req.addr, req.data)
resp = RespType(req.type_, 0)
s.xcel.resp(resp)
s.add_constraints((U(up_process) < M(s.xcel.req)))
def line_trace(s):
return str(s.xcel) |
.parametrize('x,y,expected', [(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 5.0]), np.array([2.0, 1.0, 0.0, 1.0, 2.0, 3.0, 2.0, 1.0, 2.0, 3.0]), (np.array([[0.0, (- 1.0), 2.0], [(- 0.5), (- 1.0), 1.0], [(- 0.75), (- 0.5), 3.0], [0.75, (- 1.5), 0.375], [0.125, (- 1.25), 2.5625], [1.5, 0.0, 0.0], [(- 0.5), (- 1.0), 2.0], [(- 0.25), 1.5, 0.375], [0.75, (- 1.5), 1.375], [0.5, 1.0, 1.0], [1.5, 0.0, 1.0], [0.0278, (- 0.3333), 2.1667], [(- 0.75), 1.5, 1.625], [(- 0.25), 1.5, 1.375], [0.1667, 0.0, 2.0], [0.0, 1.0, 2.0]]), np.array([0.0, 1.0, 1.0, 1.5, 1.5, 2.0, 2.0, 2.5, 2.5, 3.0, 3.0, 3.0, 3.5, 3.5, 4.0, 4.0, 5.0]), np.array([2.0, 1.0, 3.0, 0.375, 2.5625, 0.0, 2.0, 0.375, 1.375, 1.0, 1.0, 2.1667, 1.625, 1.375, 2.0, 2.0, 3.0]), np.array([0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]))), (np.array([1.0, 2.0, 3.0, 4.0, 5.0]), np.array([(- 2.0), (- 1.0), 0.0, 1.0, 2.0]), (np.array([[0.0, 1.0, (- 2.0)], [0.0, 1.0, (- 1.0)], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0]]), np.array([1.0, 2.0, 3.0, 4.0, 5.0]), np.array([(- 2.0), (- 1.0), 0.0, 1.0, 2.0]), np.array([0.0, 0.0, 0.0, 0.0, 0.0]))), (np.array([(- 0.5), (- 0.1), 0.0, 0.2, 0.3]), np.array([(- 5.0), (- 1.0), 0.2, 0.5, 2.0]), (np.array([[2.2727, 9.0909, (- 5.0)], [63.0303, 10.9091, (- 1.0)], [(- 72.7273), 17.2121, (- 0.297)], [(- 11.8182), 2.6667, 0.2], [6.0606, 0.303, 0.3485], [122.7273, 2.7273, 0.5]]), np.array([(- 0.5), (- 0.1), (- 0.05), 0.0, 0.1, 0.2, 0.3]), np.array([(- 5.0), (- 1.0), (- 0.297), 0.2, 0.3485, 0.5, 2.0]), np.array([0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0])))])
def test__schmumaker_qspline(x, y, expected):
[t, c, yhat, kflag] = _schumaker_qspline(x, y)
np.testing.assert_allclose(c, expected[0], atol=0.0001)
np.testing.assert_allclose(t, expected[1], atol=0.0001)
np.testing.assert_allclose(yhat, expected[2], atol=0.0001)
np.testing.assert_allclose(kflag, expected[3], atol=0.0001) |
def get_parser():
parser = argparse.ArgumentParser(description="\n Convert a config file with Tdnn components to their equivalent\n Affine/Linear components. Useful when we are using MACE (a deep learning\n inference framework using Kaldi's trained models) that doesn't\n support Tdnn components.\n Usage:\n convert_config_tdnn_to_affine.py exp/chain/tdnn_1a/configs/final.config > \\\n exp/chain/tdnn_1a/configs/converted.config\n ")
parser.add_argument('input', type=str)
return parser |
def get_legends(img, colors, palette):
rtn = []
rtn_lines = 1
draw = ImageDraw.Draw(img)
if (platform.system() == 'Windows'):
font = ImageFont.truetype('arial.ttf', 15)
elif (platform.system() == 'Linux'):
font = ImageFont.truetype('DejaVuSans.ttf', 14)
else:
assert False, 'not supported platform'
x_len = 15
y_len = 15
x_pos = 7
y_pos = 7
x_off = 0
for color in colors:
if (color == 0):
continue
text_size = draw.textsize(palette[color]['label'], font)[0]
if (text_size > x_off):
x_off = text_size
x_off += (x_len + 20)
y_off = (y_len + 7)
for color in colors:
if (color == 0):
continue
if ((x_pos + x_off) >= img.width):
x_pos = 7
y_pos += y_off
rtn_lines += 1
rtn.append({'rect_pos': [x_pos, y_pos, (x_pos + x_len), (y_pos + y_len)], 'label_pos': (((x_pos + x_len) + 5), y_pos), 'fill': palette[color]['rgb'], 'label': palette[color]['label']})
x_pos += x_off
return (rtn, (((7 + y_len) * rtn_lines) + 7)) |
class CheckParametersConvergence(Callback):
def __init__(self, every=100, tolerance=0.001, diff='relative', ord=np.inf):
self._diff = _diff[diff]
self.ord = ord
self.every = every
self.prev = None
self.tolerance = tolerance
def __call__(self, approx, _, i) -> None:
if (self.prev is None):
self.prev = self.flatten_shared(approx.params)
return
if ((i % self.every) or (i < self.every)):
return
current = self.flatten_shared(approx.params)
prev = self.prev
delta: np.ndarray = self._diff(current, prev)
self.prev = current
norm = np.linalg.norm(delta, self.ord)
if (norm < self.tolerance):
raise StopIteration(('Convergence achieved at %d' % i))
def flatten_shared(shared_list):
return np.concatenate([sh.get_value().flatten() for sh in shared_list]) |
(models.ProposalSectionReviewerVote)
class ProposalSectionReviewerVoteAdmin(TimeAuditAdmin):
list_filter = ['vote_value', 'proposal__proposal_type__name']
list_display = (('proposal', 'voter', 'role', 'vote_value') + TimeAuditAdmin.list_display)
def get_queryset(self, request):
qs = super(ProposalSectionReviewerVoteAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
moderators = service.list_conference_moderator(user=request.user)
return qs.filter(proposal__conference__in=[m.conference for m in moderators]) |
def _parse_env_kwarg(kwargs, keyword, env_name, env_type):
if (keyword not in kwargs):
env_value = os.environ.get(env_name, None)
if (env_value is not None):
if (env_type is bool):
kwargs[keyword] = ((env_value == '1') or (env_value.lower() == 'true'))
elif (env_type is str):
kwargs[keyword] = env_value
elif (env_type is float):
kwargs[keyword] = float(env_value)
elif (env_type is int):
kwargs[keyword] = int(env_value)
else:
raise NotImplementedError(f'Parsing environment variables of type {env_type} is not supported.')
return kwargs |
class Information(Cog):
def __init__(self, bot: Bot):
self.bot = bot
def get_channel_type_counts(guild: Guild) -> defaultdict[(str, int)]:
channel_counter = defaultdict(int)
for channel in guild.channels:
if is_staff_channel(channel):
channel_counter['staff'] += 1
else:
channel_counter[str(channel.type)] += 1
return channel_counter
def join_role_stats(role_ids: list[int], guild: Guild, name: (str | None)=None) -> dict[(str, int)]:
member_count = 0
for role_id in role_ids:
if ((role := guild.get_role(role_id)) is not None):
member_count += len(role.members)
else:
raise NonExistentRoleError(role_id)
return {(name or role.name.title()): member_count}
def get_member_counts(guild: Guild) -> dict[(str, int)]:
role_ids = [constants.Roles.helpers, constants.Roles.mod_team, constants.Roles.admins, constants.Roles.owners, constants.Roles.contributors]
role_stats = {}
for role_id in role_ids:
role_stats.update(Information.join_role_stats([role_id], guild))
role_stats.update(Information.join_role_stats([constants.Roles.project_leads, constants.Roles.domain_leads], guild, 'Leads'))
return role_stats
async def get_extended_server_info(self, ctx: Context) -> str:
talentpool_info = ''
talentpool_cog: (TalentPool | None) = self.bot.get_cog('Talentpool')
if talentpool_cog:
num_nominated = len((await talentpool_cog.api.get_nominations(active=True)))
talentpool_info = f'''Nominated: {num_nominated}
'''
bb_info = ''
bb_cog: (BigBrother | None) = self.bot.get_cog('Big Brother')
if bb_cog:
bb_info = f'''BB-watched: {len(bb_cog.watched_users)}
'''
defcon_info = ''
defcon_cog: (Defcon | None) = self.bot.get_cog('Defcon')
if defcon_cog:
threshold = (time.humanize_delta(defcon_cog.threshold) if defcon_cog.threshold else '-')
defcon_info = f'''Defcon threshold: {threshold}
'''
verification = f'''Verification level: {ctx.guild.verification_level.name}
'''
python_general = self.bot.get_channel(constants.Channels.python_general)
return textwrap.dedent(f'''
{talentpool_info} {bb_info} {defcon_info} {verification} {python_general.mention} cooldown: {python_general.slowmode_delay}s
''')
_any_role(*constants.STAFF_PARTNERS_COMMUNITY_ROLES)
(name='roles')
async def roles_info(self, ctx: Context) -> None:
roles = sorted(ctx.guild.roles[1:], key=(lambda role: role.name))
role_list = []
for role in roles:
role_list.append(f'`{role.id}` - {role.mention}')
embed = Embed(title=f"Role information (Total {len(roles)} role{('s' * (len(role_list) > 1))})", colour=Colour.og_blurple())
(await LinePaginator.paginate(role_list, ctx, embed, empty=False))
_any_role(*constants.STAFF_PARTNERS_COMMUNITY_ROLES)
(name='role')
async def role_info(self, ctx: Context, *roles: (Role | str)) -> None:
parsed_roles = set()
failed_roles = set()
all_roles = {role.id: role.name for role in ctx.guild.roles}
for role_name in roles:
if isinstance(role_name, Role):
parsed_roles.add(role_name)
continue
match = rapidfuzz.process.extractOne(role_name, all_roles, score_cutoff=80, scorer=rapidfuzz.fuzz.ratio)
if (not match):
failed_roles.add(role_name)
continue
role = ctx.guild.get_role(match[2])
parsed_roles.add(role)
if failed_roles:
(await ctx.send(f":x: Could not retrieve the following roles: {', '.join(failed_roles)}"))
for role in parsed_roles:
(h, s, v) = colorsys.rgb_to_hsv(*role.colour.to_rgb())
embed = Embed(title=f'{role.name} info', colour=role.colour)
embed.add_field(name='ID', value=role.id, inline=True)
embed.add_field(name='Colour (RGB)', value=f'#{role.colour.value:0>6x}', inline=True)
embed.add_field(name='Colour (HSV)', value=f'{h:.2f} {s:.2f} {v}', inline=True)
embed.add_field(name='Member count', value=len(role.members), inline=True)
embed.add_field(name='Position', value=role.position)
embed.add_field(name='Permission code', value=role.permissions.value, inline=True)
(await ctx.send(embed=embed))
(name='server', aliases=['server_info', 'guild', 'guild_info'])
async def server_info(self, ctx: Context) -> None:
embed = Embed(colour=Colour.og_blurple(), title='Server Information')
created = time.format_relative(ctx.guild.created_at)
num_roles = (len(ctx.guild.roles) - 1)
if (ctx.channel.id in (*constants.MODERATION_CHANNELS, constants.Channels.dev_core)):
features = f'''
Features: {', '.join(ctx.guild.features)}'''
else:
features = ''
py_invite = (await self.bot.fetch_invite(constants.Guild.invite))
online_presences = py_invite.approximate_presence_count
offline_presences = (py_invite.approximate_member_count - online_presences)
member_status = f'{constants.Emojis.status_online} {online_presences:,} {constants.Emojis.status_offline} {offline_presences:,}'
embed.description = f'''Created: {created}{features}
Roles: {num_roles}
Member status: {member_status}'''
embed.set_thumbnail(url=ctx.guild.icon.url)
total_members = f'{ctx.guild.member_count:,}'
member_counts = self.get_member_counts(ctx.guild)
member_info = '\n'.join((f'{role}: {count}' for (role, count) in member_counts.items()))
embed.add_field(name=f'Members: {total_members}', value=member_info)
total_channels = len(ctx.guild.channels)
channel_counts = self.get_channel_type_counts(ctx.guild)
channel_info = '\n'.join((f'{channel.title()}: {count}' for (channel, count) in sorted(channel_counts.items())))
embed.add_field(name=f'Channels: {total_channels}', value=channel_info)
if is_mod_channel(ctx.channel):
embed.add_field(name='Moderation:', value=(await self.get_extended_server_info(ctx)))
(await ctx.send(embed=embed))
(name='user', aliases=['user_info', 'member', 'member_info', 'u'])
async def user_info(self, ctx: Context, user_or_message: (MemberOrUser | Message)=None) -> None:
if (passed_as_message := isinstance(user_or_message, Message)):
user = user_or_message.author
else:
user = user_or_message
if (user is None):
user = ctx.author
elif ((user != ctx.author) and (await has_no_roles_check(ctx, *constants.MODERATION_ROLES))):
(await ctx.send('You may not use this command on users other than yourself.'))
return
if in_whitelist_check(ctx, roles=constants.STAFF_PARTNERS_COMMUNITY_ROLES):
embed = (await self.create_user_embed(ctx, user, passed_as_message))
(await ctx.send(embed=embed))
async def create_user_embed(self, ctx: Context, user: MemberOrUser, passed_as_message: bool) -> Embed:
on_server = bool((await get_or_fetch_member(ctx.guild, user.id)))
created = time.format_relative(user.created_at)
name = str(user)
if (on_server and user.nick):
name = f'{user.nick} ({name})'
name = escape_markdown(name)
if passed_as_message:
name += ' - From Message'
if user.public_flags.verified_bot:
name += f' {constants.Emojis.verified_bot}'
elif user.bot:
name += f' {constants.Emojis.bot}'
badges = []
for (badge, is_set) in user.public_flags:
if (is_set and (emoji := getattr(constants.Emojis, f'badge_{badge}', None))):
badges.append(emoji)
if on_server:
if user.joined_at:
joined = time.format_relative(user.joined_at)
else:
joined = 'Unable to get join date'
roles = ', '.join((role.mention for role in user.roles[:0:(- 1)]))
membership = {'Joined': joined, 'Verified': (not user.pending), 'Roles': (roles or None)}
if (not is_mod_channel(ctx.channel)):
membership.pop('Verified')
membership = textwrap.dedent('\n'.join([f'{key}: {value}' for (key, value) in membership.items()]))
else:
membership = 'The user is not a member of the server'
fields = [('User information', textwrap.dedent(f'''
Created: {created}
Profile: {user.mention}
ID: {user.id}
''').strip()), ('Member information', membership), (await self.user_messages(user))]
if is_mod_channel(ctx.channel):
fields.append((await self.expanded_user_infraction_counts(user)))
fields.append((await self.user_nomination_counts(user)))
else:
fields.append((await self.basic_user_infraction_counts(user)))
embed = Embed(title=name, description=' '.join(badges))
for (field_name, field_content) in fields:
embed.add_field(name=field_name, value=field_content, inline=False)
embed.set_thumbnail(url=user.display_avatar.url)
embed.colour = (user.colour if (user.colour != Colour.default()) else Colour.og_blurple())
return embed
async def basic_user_infraction_counts(self, user: MemberOrUser) -> tuple[(str, str)]:
infractions = (await self.bot.api_client.get('bot/infractions', params={'hidden': 'False', 'user__id': str(user.id)}))
total_infractions = len(infractions)
active_infractions = sum((infraction['active'] for infraction in infractions))
infraction_output = f'''Total: {total_infractions}
Active: {active_infractions}'''
return ('Infractions', infraction_output)
async def expanded_user_infraction_counts(self, user: MemberOrUser) -> tuple[(str, str)]:
infractions = (await self.bot.api_client.get('bot/infractions', params={'user__id': str(user.id)}))
infraction_output = []
if (not infractions):
infraction_output.append('No infractions')
else:
infraction_types = set()
infraction_counter = defaultdict(int)
for infraction in infractions:
infraction_type = infraction['type']
infraction_active = ('active' if infraction['active'] else 'inactive')
infraction_types.add(infraction_type)
infraction_counter[f'{infraction_active} {infraction_type}'] += 1
for infraction_type in sorted(infraction_types):
active_count = infraction_counter[f'active {infraction_type}']
total_count = (active_count + infraction_counter[f'inactive {infraction_type}'])
line = f'{infraction_type.capitalize()}s: {total_count}'
if active_count:
line += f' ({active_count} active)'
infraction_output.append(line)
return ('Infractions', '\n'.join(infraction_output))
async def user_nomination_counts(self, user: MemberOrUser) -> tuple[(str, str)]:
nominations = (await self.bot.api_client.get('bot/nominations', params={'user__id': str(user.id)}))
output = []
if (not nominations):
output.append('No nominations')
else:
count = len(nominations)
is_currently_nominated = any((nomination['active'] for nomination in nominations))
nomination_noun = ('nomination' if (count == 1) else 'nominations')
if is_currently_nominated:
output.append(f'''This user is **currently** nominated
({count} {nomination_noun} in total)''')
else:
output.append(f'This user has {count} historical {nomination_noun}, but is currently not nominated.')
return ('Nominations', '\n'.join(output))
async def user_messages(self, user: MemberOrUser) -> tuple[(str, str)]:
activity_output = []
try:
user_activity = (await self.bot.api_client.get(f'bot/users/{user.id}/metricity_data'))
except ResponseCodeError as e:
if (e.status == 404):
activity_output = 'No activity'
else:
total_message_text = (f"{user_activity['total_messages']:,}" if user_activity['total_messages'] else 'No messages')
activity_blocks_text = (f"{user_activity['activity_blocks']:,}" if user_activity['activity_blocks'] else 'No activity')
activity_output.append(total_message_text)
activity_output.append(activity_blocks_text)
activity_output = '\n'.join((f'{name}: {metric}' for (name, metric) in zip(['Messages', 'Activity blocks'], activity_output, strict=True)))
return ('Activity', activity_output)
def format_fields(self, mapping: Mapping[(str, Any)], field_width: (int | None)=None) -> str:
fields = sorted(mapping.items(), key=(lambda item: item[0]))
if (field_width is None):
field_width = len(max(mapping.keys(), key=len))
out = ''
for (key, val) in fields:
if isinstance(val, dict):
inner_width = int((field_width * 1.6))
val = ('\n' + self.format_fields(val, field_width=inner_width))
elif isinstance(val, str):
text = textwrap.fill(val, width=100, replace_whitespace=False)
val = textwrap.indent(text, (' ' * (field_width + len(': '))))
val = val.lstrip()
if (key == 'color'):
val = hex(val)
out += '{0:>{width}}: {1}\n'.format(key, val, width=field_width)
return out.rstrip()
async def send_raw_content(self, ctx: Context, message: Message, json: bool=False) -> None:
if (not message.channel.permissions_for(ctx.author).read_messages):
(await ctx.send(':x: You do not have permissions to see the channel this message is in.'))
return
raw_data = (await ctx.bot. message.id))
paginator = Paginator()
def add_content(title: str, content: str) -> None:
paginator.add_line(f'''== {title} ==
''')
paginator.add_line(content.replace('`', '`\u200b'))
paginator.close_page()
if message.content:
add_content('Raw message', message.content)
transformer = (pprint.pformat if json else self.format_fields)
for field_name in ('embeds', 'attachments'):
data = raw_data[field_name]
if (not data):
continue
total = len(data)
for (current, item) in enumerate(data, start=1):
title = f'Raw {field_name} ({current}/{total})'
add_content(title, transformer(item))
for page in paginator.pages:
(await ctx.send(page, allowed_mentions=AllowedMentions.none()))
_with_role_bypass(2, (60 * 3), BucketType.member, bypass_roles=constants.STAFF_PARTNERS_COMMUNITY_ROLES)
(invoke_without_command=True)
_whitelist(channels=(constants.Channels.bot_commands,), roles=constants.STAFF_PARTNERS_COMMUNITY_ROLES)
async def raw(self, ctx: Context, message: Message) -> None:
(await self.send_raw_content(ctx, message))
()
async def json(self, ctx: Context, message: Message) -> None:
(await self.send_raw_content(ctx, message, json=True))
async def _set_rules_command_help(self) -> None:
help_string = f'''{self.rules.help}
'''
help_string += '__Available keywords per rule__:\n\n'
full_rules = (await self.bot.api_client.get('rules', params={'link_format': 'md'}))
for (index, (_, keywords)) in enumerate(full_rules, start=1):
help_string += f'''**Rule {index}**: {', '.join(keywords)}
'''
self.rules.help = help_string
(aliases=('rule',))
async def rules(self, ctx: Context, *, args: (str | None)) -> (set[int] | None):
rules_embed = Embed(title='Rules', color=Colour.og_blurple(), url='
(keywords, rule_numbers) = ([], [])
full_rules = (await self.bot.api_client.get('rules', params={'link_format': 'md'}))
keyword_to_rule_number = dict()
for (rule_number, (_, rule_keywords)) in enumerate(full_rules, start=1):
for rule_keyword in rule_keywords:
keyword_to_rule_number[rule_keyword] = rule_number
if args:
for word in args.split(maxsplit=100):
try:
rule_numbers.append(int(word))
except ValueError:
if ((kw := word.lower()) not in keyword_to_rule_number):
break
keywords.append(kw)
if ((not rule_numbers) and (not keywords)):
rules_embed.description = DEFAULT_RULES_DESCRIPTION
(await ctx.send(embed=rules_embed))
return None
rule_numbers = sorted(set(rule_numbers))
invalid = ', '.join((str(rule_number) for rule_number in rule_numbers if ((rule_number < 1) or (rule_number > len(full_rules)))))
if invalid:
(await ctx.send(shorten((':x: Invalid rule indices: ' + invalid), 75, placeholder=' ...')))
return None
final_rules = []
final_rule_numbers = {keyword_to_rule_number[keyword] for keyword in keywords}
final_rule_numbers.update(rule_numbers)
for rule_number in sorted(final_rule_numbers):
self.bot.stats.incr(f'rule_uses.{rule_number}')
final_rules.append(f'**{rule_number}.** {full_rules[(rule_number - 1)][0]}')
(await LinePaginator.paginate(final_rules, ctx, rules_embed, max_lines=3))
return final_rule_numbers
async def cog_load(self) -> None:
(await self._set_rules_command_help()) |
class Encoder(nn.Module):
def __init__(self, n_feat, kernel_size, reduction, act, bias, scale_unetfeats, csff):
super(Encoder, self).__init__()
self.encoder_level1 = [CAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(2)]
self.encoder_level2 = [CAB((n_feat + scale_unetfeats), kernel_size, reduction, bias=bias, act=act) for _ in range(2)]
self.encoder_level3 = [CAB((n_feat + (scale_unetfeats * 2)), kernel_size, reduction, bias=bias, act=act) for _ in range(2)]
self.encoder_level1 = nn.Sequential(*self.encoder_level1)
self.encoder_level2 = nn.Sequential(*self.encoder_level2)
self.encoder_level3 = nn.Sequential(*self.encoder_level3)
self.down12 = DownSample(n_feat, scale_unetfeats)
self.down23 = DownSample((n_feat + scale_unetfeats), scale_unetfeats)
if csff:
self.csff_enc1 = nn.Conv2d(n_feat, n_feat, kernel_size=1, bias=bias)
self.csff_enc2 = nn.Conv2d((n_feat + scale_unetfeats), (n_feat + scale_unetfeats), kernel_size=1, bias=bias)
self.csff_enc3 = nn.Conv2d((n_feat + (scale_unetfeats * 2)), (n_feat + (scale_unetfeats * 2)), kernel_size=1, bias=bias)
self.csff_dec1 = nn.Conv2d(n_feat, n_feat, kernel_size=1, bias=bias)
self.csff_dec2 = nn.Conv2d((n_feat + scale_unetfeats), (n_feat + scale_unetfeats), kernel_size=1, bias=bias)
self.csff_dec3 = nn.Conv2d((n_feat + (scale_unetfeats * 2)), (n_feat + (scale_unetfeats * 2)), kernel_size=1, bias=bias)
def forward(self, x, encoder_outs=None, decoder_outs=None):
enc1 = self.encoder_level1(x)
if ((encoder_outs is not None) and (decoder_outs is not None)):
enc1 = ((enc1 + self.csff_enc1(encoder_outs[0])) + self.csff_dec1(decoder_outs[0]))
x = self.down12(enc1)
enc2 = self.encoder_level2(x)
if ((encoder_outs is not None) and (decoder_outs is not None)):
enc2 = ((enc2 + self.csff_enc2(encoder_outs[1])) + self.csff_dec2(decoder_outs[1]))
x = self.down23(enc2)
enc3 = self.encoder_level3(x)
if ((encoder_outs is not None) and (decoder_outs is not None)):
enc3 = ((enc3 + self.csff_enc3(encoder_outs[2])) + self.csff_dec3(decoder_outs[2]))
return [enc1, enc2, enc3] |
def evaluate(config, workdir, eval_folder='eval'):
eval_dir = os.path.join(workdir, eval_folder)
tf.io.gfile.makedirs(eval_dir)
rng = jax.random.PRNGKey((config.seed + 1))
(train_ds, eval_ds, _) = datasets.get_dataset(config, additional_dim=1, uniform_dequantization=config.data.uniform_dequantization, evaluation=True)
scaler = datasets.get_data_scaler(config)
inverse_scaler = datasets.get_data_inverse_scaler(config)
(rng, model_rng) = jax.random.split(rng)
(score_model, init_model_state, initial_params) = mutils.init_model(model_rng, config)
optimizer = losses.get_optimizer(config).create(initial_params)
state = mutils.State(step=0, optimizer=optimizer, lr=config.optim.lr, model_state=init_model_state, ema_rate=config.model.ema_rate, params_ema=initial_params, rng=rng)
checkpoint_dir = os.path.join(workdir, 'checkpoints')
if (config.training.sde.lower() == 'vpsde'):
sde = sde_lib.VPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=config.model.num_scales)
sampling_eps = 0.001
elif (config.training.sde.lower() == 'subvpsde'):
sde = sde_lib.subVPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=config.model.num_scales)
sampling_eps = 0.001
elif (config.training.sde.lower() == 'vesde'):
sde = sde_lib.VESDE(sigma_min=config.model.sigma_min, sigma_max=config.model.sigma_max, N=config.model.num_scales)
sampling_eps = 1e-05
else:
raise NotImplementedError(f'SDE {config.training.sde} unknown.')
if config.eval.enable_loss:
optimize_fn = losses.optimization_manager(config)
continuous = config.training.continuous
likelihood_weighting = config.training.likelihood_weighting
reduce_mean = config.training.reduce_mean
eval_step = losses.get_step_fn(sde, score_model, train=False, optimize_fn=optimize_fn, reduce_mean=reduce_mean, continuous=continuous, likelihood_weighting=likelihood_weighting)
p_eval_step = jax.pmap(functools.partial(jax.lax.scan, eval_step), axis_name='batch', donate_argnums=1)
(train_ds_bpd, eval_ds_bpd, _) = datasets.get_dataset(config, additional_dim=None, uniform_dequantization=True, evaluation=True)
if (config.eval.bpd_dataset.lower() == 'train'):
ds_bpd = train_ds_bpd
bpd_num_repeats = 1
elif (config.eval.bpd_dataset.lower() == 'test'):
ds_bpd = eval_ds_bpd
bpd_num_repeats = 5
else:
raise ValueError(f'No bpd dataset {config.eval.bpd_dataset} recognized.')
if config.eval.enable_bpd:
likelihood_fn = likelihood.get_likelihood_fn(sde, score_model, inverse_scaler)
if config.eval.enable_sampling:
sampling_shape = ((config.eval.batch_size // jax.local_device_count()), config.data.image_size, config.data.image_size, config.data.num_channels)
sampling_fn = sampling.get_sampling_fn(config, sde, score_model, sampling_shape, inverse_scaler, sampling_eps)
rng = jax.random.fold_in(rng, jax.host_id())
.dataclass
class EvalMeta():
ckpt_id: int
sampling_round_id: int
bpd_round_id: int
rng: Any
num_sampling_rounds = ((config.eval.num_samples // config.eval.batch_size) + 1)
num_bpd_rounds = (len(ds_bpd) * bpd_num_repeats)
eval_meta = EvalMeta(ckpt_id=config.eval.begin_ckpt, sampling_round_id=(- 1), bpd_round_id=(- 1), rng=rng)
eval_meta = checkpoints.restore_checkpoint(eval_dir, eval_meta, step=None, prefix=f'meta_{jax.host_id()}_')
if (eval_meta.bpd_round_id < (num_bpd_rounds - 1)):
begin_ckpt = eval_meta.ckpt_id
begin_bpd_round = (eval_meta.bpd_round_id + 1)
begin_sampling_round = 0
elif (eval_meta.sampling_round_id < (num_sampling_rounds - 1)):
begin_ckpt = eval_meta.ckpt_id
begin_bpd_round = num_bpd_rounds
begin_sampling_round = (eval_meta.sampling_round_id + 1)
else:
begin_ckpt = (eval_meta.ckpt_id + 1)
begin_bpd_round = 0
begin_sampling_round = 0
rng = eval_meta.rng
inceptionv3 = (config.data.image_size >= 256)
inception_model = evaluation.get_inception_model(inceptionv3=inceptionv3)
logging.info(('begin checkpoint: %d' % (begin_ckpt,)))
for ckpt in range(begin_ckpt, (config.eval.end_ckpt + 1)):
waiting_message_printed = False
ckpt_filename = os.path.join(checkpoint_dir, 'checkpoint_{}'.format(ckpt))
while (not tf.io.gfile.exists(ckpt_filename)):
if ((not waiting_message_printed) and (jax.host_id() == 0)):
logging.warning(('Waiting for the arrival of checkpoint_%d' % (ckpt,)))
waiting_message_printed = True
time.sleep(60)
try:
state = checkpoints.restore_checkpoint(checkpoint_dir, state, step=ckpt)
except:
time.sleep(60)
try:
state = checkpoints.restore_checkpoint(checkpoint_dir, state, step=ckpt)
except:
time.sleep(120)
state = checkpoints.restore_checkpoint(checkpoint_dir, state, step=ckpt)
pstate = flax.jax_utils.replicate(state)
if config.eval.enable_loss:
all_losses = []
eval_iter = iter(eval_ds)
for (i, batch) in enumerate(eval_iter):
eval_batch = jax.tree_map((lambda x: scaler(x._numpy())), batch)
(rng, *next_rng) = jax.random.split(rng, num=(jax.local_device_count() + 1))
next_rng = jnp.asarray(next_rng)
((_, _), p_eval_loss) = p_eval_step((next_rng, pstate), eval_batch)
eval_loss = flax.jax_utils.unreplicate(p_eval_loss)
all_losses.extend(eval_loss)
if ((((i + 1) % 1000) == 0) and (jax.host_id() == 0)):
logging.info(('Finished %dth step loss evaluation' % (i + 1)))
all_losses = jnp.asarray(all_losses)
with tf.io.gfile.GFile(os.path.join(eval_dir, f'ckpt_{ckpt}_loss.npz'), 'wb') as fout:
io_buffer = io.BytesIO()
np.savez_compressed(io_buffer, all_losses=all_losses, mean_loss=all_losses.mean())
fout.write(io_buffer.getvalue())
if config.eval.enable_bpd:
bpds = []
begin_repeat_id = (begin_bpd_round // len(ds_bpd))
begin_batch_id = (begin_bpd_round % len(ds_bpd))
for repeat in range(begin_repeat_id, bpd_num_repeats):
bpd_iter = iter(ds_bpd)
for _ in range(begin_batch_id):
next(bpd_iter)
for batch_id in range(begin_batch_id, len(ds_bpd)):
batch = next(bpd_iter)
eval_batch = jax.tree_map((lambda x: scaler(x._numpy())), batch)
(rng, *step_rng) = jax.random.split(rng, (jax.local_device_count() + 1))
step_rng = jnp.asarray(step_rng)
bpd = likelihood_fn(step_rng, pstate, eval_batch['image'])[0]
bpd = bpd.reshape((- 1))
bpds.extend(bpd)
logging.info(('ckpt: %d, repeat: %d, batch: %d, mean bpd: %6f' % (ckpt, repeat, batch_id, jnp.mean(jnp.asarray(bpds)))))
bpd_round_id = (batch_id + (len(ds_bpd) * repeat))
with tf.io.gfile.GFile(os.path.join(eval_dir, f'{config.eval.bpd_dataset}_ckpt_{ckpt}_bpd_{bpd_round_id}.npz'), 'wb') as fout:
io_buffer = io.BytesIO()
np.savez_compressed(io_buffer, bpd)
fout.write(io_buffer.getvalue())
eval_meta = eval_meta.replace(ckpt_id=ckpt, bpd_round_id=bpd_round_id, rng=rng)
checkpoints.save_checkpoint(eval_dir, eval_meta, step=((ckpt * (num_sampling_rounds + num_bpd_rounds)) + bpd_round_id), keep=1, prefix=f'meta_{jax.host_id()}_')
else:
eval_meta = eval_meta.replace(ckpt_id=ckpt, bpd_round_id=(num_bpd_rounds - 1))
checkpoints.save_checkpoint(eval_dir, eval_meta, step=(((ckpt * (num_sampling_rounds + num_bpd_rounds)) + num_bpd_rounds) - 1), keep=1, prefix=f'meta_{jax.host_id()}_')
if config.eval.enable_sampling:
state = jax.device_put(state)
for r in range(begin_sampling_round, num_sampling_rounds):
if (jax.host_id() == 0):
logging.info(('sampling -- ckpt: %d, round: %d' % (ckpt, r)))
this_sample_dir = os.path.join(eval_dir, f'ckpt_{ckpt}_host_{jax.host_id()}')
tf.io.gfile.makedirs(this_sample_dir)
(rng, *sample_rng) = jax.random.split(rng, (jax.local_device_count() + 1))
sample_rng = jnp.asarray(sample_rng)
(samples, n) = sampling_fn(sample_rng, pstate)
samples = np.clip((samples * 255.0), 0, 255).astype(np.uint8)
samples = samples.reshape(((- 1), config.data.image_size, config.data.image_size, config.data.num_channels))
with tf.io.gfile.GFile(os.path.join(this_sample_dir, f'samples_{r}.npz'), 'wb') as fout:
io_buffer = io.BytesIO()
np.savez_compressed(io_buffer, samples=samples)
fout.write(io_buffer.getvalue())
gc.collect()
latents = evaluation.run_inception_distributed(samples, inception_model, inceptionv3=inceptionv3)
gc.collect()
with tf.io.gfile.GFile(os.path.join(this_sample_dir, f'statistics_{r}.npz'), 'wb') as fout:
io_buffer = io.BytesIO()
np.savez_compressed(io_buffer, pool_3=latents['pool_3'], logits=latents['logits'])
fout.write(io_buffer.getvalue())
eval_meta = eval_meta.replace(ckpt_id=ckpt, sampling_round_id=r, rng=rng)
if (r < (num_sampling_rounds - 1)):
checkpoints.save_checkpoint(eval_dir, eval_meta, step=(((ckpt * (num_sampling_rounds + num_bpd_rounds)) + r) + num_bpd_rounds), keep=1, prefix=f'meta_{jax.host_id()}_')
if (jax.host_id() == 0):
all_logits = []
all_pools = []
for host in range(jax.host_count()):
this_sample_dir = os.path.join(eval_dir, f'ckpt_{ckpt}_host_{host}')
stats = tf.io.gfile.glob(os.path.join(this_sample_dir, 'statistics_*.npz'))
wait_message = False
while (len(stats) < num_sampling_rounds):
if (not wait_message):
logging.warning(('Waiting for statistics on host %d' % (host,)))
wait_message = True
stats = tf.io.gfile.glob(os.path.join(this_sample_dir, 'statistics_*.npz'))
time.sleep(30)
for stat_file in stats:
with tf.io.gfile.GFile(stat_file, 'rb') as fin:
stat = np.load(fin)
if (not inceptionv3):
all_logits.append(stat['logits'])
all_pools.append(stat['pool_3'])
if (not inceptionv3):
all_logits = np.concatenate(all_logits, axis=0)[:config.eval.num_samples]
all_pools = np.concatenate(all_pools, axis=0)[:config.eval.num_samples]
data_stats = evaluation.load_dataset_stats(config)
data_pools = data_stats['pool_3']
if (not inceptionv3):
inception_score = tfgan.eval.classifier_score_from_logits(all_logits)
else:
inception_score = (- 1)
fid = tfgan.eval.frechet_classifier_distance_from_activations(data_pools, all_pools)
tf_data_pools = tf.convert_to_tensor(data_pools)
tf_all_pools = tf.convert_to_tensor(all_pools)
kid = tfgan.eval.kernel_classifier_distance_from_activations(tf_data_pools, tf_all_pools).numpy()
del tf_data_pools, tf_all_pools
logging.info(('ckpt-%d --- inception_score: %.6e, FID: %.6e, KID: %.6e' % (ckpt, inception_score, fid, kid)))
with tf.io.gfile.GFile(os.path.join(eval_dir, f'report_{ckpt}.npz'), 'wb') as f:
io_buffer = io.BytesIO()
np.savez_compressed(io_buffer, IS=inception_score, fid=fid, kid=kid)
f.write(io_buffer.getvalue())
else:
while (not tf.io.gfile.exists(os.path.join(eval_dir, f'report_{ckpt}.npz'))):
time.sleep(1.0)
checkpoints.save_checkpoint(eval_dir, eval_meta, step=(((ckpt * (num_sampling_rounds + num_bpd_rounds)) + r) + num_bpd_rounds), keep=1, prefix=f'meta_{jax.host_id()}_')
else:
eval_meta = eval_meta.replace(ckpt_id=ckpt, sampling_round_id=(num_sampling_rounds - 1), rng=rng)
checkpoints.save_checkpoint(eval_dir, eval_meta, step=((((ckpt * (num_sampling_rounds + num_bpd_rounds)) + num_sampling_rounds) - 1) + num_bpd_rounds), keep=1, prefix=f'meta_{jax.host_id()}_')
begin_bpd_round = 0
begin_sampling_round = 0
meta_files = tf.io.gfile.glob(os.path.join(eval_dir, f'meta_{jax.host_id()}_*'))
for file in meta_files:
tf.io.gfile.remove(file) |
class TestSplitWindowPriceLST(unittest.TestCase):
sample_band_10 = np.zeros((5, 5))
mask = np.random.randint(0, high=1, size=(5, 5), dtype=int)
mask = (mask == 1)
def test_that_output_and_input_size_equal(self):
output = SplitWindowPriceLST()(emissivity_10=self.sample_band_10, emissivity_11=self.sample_band_10, brightness_temperature_10=self.sample_band_10, brightness_temperature_11=self.sample_band_10, mask=self.mask)
self.assertEqual(self.sample_band_10.shape, output.shape)
def test_that_max_temp_less_than_or_equal_max_earth_temp(self):
lst_algorithm = SplitWindowPriceLST()
self.assertEqual(lst_algorithm.max_earth_temp, (273.15 + 56.7)) |
class Library():
_one = None
def one(cls, *args, **kwargs):
if (cls._one is None):
cls._one = cls(*args, **kwargs)
return cls._one
def __init__(self, db=None):
if self._one:
warnings.warn('to guarantee consistency, Library should be used as a singleton throughout: use `Library.one()`', stacklevel=3)
if (db is None):
db = ('sqlite:///%s' % self.find_db())
self.db_get(db)
def find_db(self):
name = 'library.sqlite'
dir = os.path.join(site.getuserbase(), 'rayopt')
main = os.path.join(dir, name)
if (not os.path.exists(main)):
base = resource_filename(Requirement.parse('rayopt'), name)
if (not os.path.exists(base)):
base = os.path.join(os.path.split(__file__)[0], name)
if (not os.path.exists(dir)):
os.makedirs(dir)
shutil.copy(base, main)
return main
def db_get(self, db):
self.engine = create_engine(db)
Base.metadata.create_all(self.engine)
Session = orm.sessionmaker(bind=self.engine)
self.session = Session()
def load_all(self, paths, **kwargs):
for path in paths:
for i in os.listdir(path):
file_path = os.path.join(path, i)
try:
self.load(file_path, **kwargs)
except KeyError:
pass
except Exception as e:
logger.exception('Could not load %s.', file_path)
continue
def load(self, fil, mode='refresh'):
if (mode in ('refresh', 'reload')):
res = self.session.query(Catalog).filter((Catalog.file == fil)).first()
if (not res):
pass
elif (mode == 'refresh'):
stat = os.stat(fil)
if ((stat.st_mtime <= res.date) or (stat.st_size == res.size)):
return
self.session.delete(res)
elif (mode == 'reload'):
self.session.delete(res)
try:
if Catalog.parse(fil, self.session):
self.session.commit()
print(('added %s' % fil))
except:
self.session.rollback()
raise
def get(self, *args, **kwargs):
for k in self.get_all(*args, **kwargs):
return k
def get_all(self, typ, name=None, catalog=None, source=None, **kwargs):
Typ = {'material': Material, 'lens': Lens}[typ]
res = self.session.query(Typ).join(Catalog)
if (catalog is not None):
res = res.filter((Catalog.name == catalog))
if (source is not None):
res = res.filter((Catalog.source == source))
if (name is not None):
res = res.filter((Typ.name == name))
res = res.order_by(Typ.name)
if (not res.count()):
raise KeyError('{} {}/{}/{} not found'.format(typ, source, catalog, name))
for item in res:
(yield item.parse()) |
def find_caller():
def current_frame():
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
f = current_frame()
if (f is not None):
f = f.f_back
rv = ('(unknown file)', 0, '(unknown function)')
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
rv = (co.co_filename, f.f_lineno, co.co_name)
if (filename == _srcfile):
f = f.f_back
continue
break
rv = list(rv)
rv[0] = os.path.basename(rv[0])
return rv |
def ql_syscall_recvmsg(ql: Qiling, sockfd: int, msg_addr: int, flags: int):
if (sockfd not in range(NR_OPEN)):
return (- 1)
sock: Optional[ql_socket] = ql.os.fd[sockfd]
if (sock is None):
return (- 1)
abits = ql.arch.bits
endian = ql.arch.endian
msghdr = make_msghdr(abits, endian)
msg = msghdr.load_from(ql.mem, msg_addr)
try:
(data, ancdata, mflags, addr) = sock.recvmsg(msg.msg_namelen, msg.msg_controllen, flags)
except ConnectionError:
return (- 1)
iovec = make_iovec(abits, endian)
iovec_addr = msg.msg_iov
written = 0
for _ in range(msg.msg_iovlen):
with iovec.ref(ql.mem, iovec_addr) as obj:
size = min(obj.iov_len, (len(data) - written))
ql.mem.write(obj.iov_base, data[written:(written + size)])
written += size
iovec_addr += iovec.sizeof()
cmsghdr = make_cmsghdr(abits, endian)
cmsg_addr = msg.msg_control
for (cmsg_level, cmsg_type, cmsg_data) in ancdata:
with cmsghdr.ref(ql.mem, cmsg_addr) as obj:
obj.cmsg_len = len(cmsg_data)
obj.cmsg_level = cmsg_level
obj.cmsg_type = cmsg_type
cmsg_addr += cmsghdr.sizeof()
ql.mem.write(cmsg_addr, cmsg_data)
cmsg_addr += len(cmsg_data)
msg.msg_flags = mflags
msg.save_to(ql.mem, msg_addr)
return len(data) |
def get_outputs_after_fold(model, test_data):
onnx.checker.check_model(model.model)
filename = './onnx_test_model.onnx'
onnx.save(model.model, filename)
(conv_bn, bn_conv) = fold_all_batch_norms_to_weight(model.model)
pairs = (conv_bn + bn_conv)
onnx.checker.check_model(model.model)
folded_filename = './onnx_test_model_folded.onnx'
onnx.save(model.model, folded_filename)
sess = rt.InferenceSession(filename, providers=providers)
fold_sess = rt.InferenceSession(folded_filename, providers=providers)
input_name = sess.get_inputs()[0].name
baseline_output = sess.run(None, {input_name: test_data})
input_name = fold_sess.get_inputs()[0].name
folded_output = fold_sess.run(None, {input_name: test_data})
return (baseline_output, folded_output, pairs) |
class PointNetfeat(nn.Module):
def __init__(self, global_feat=True, feature_transform=False):
super(PointNetfeat, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 256, 1)
self.bn1 = nn.InstanceNorm1d(64)
self.bn2 = nn.InstanceNorm1d(128)
self.bn3 = nn.InstanceNorm1d(256)
self.global_feat = global_feat
self.feature_transform = feature_transform
def forward(self, x):
n_pts = x.size()[2]
x = F.relu(self.bn1(self.conv1(x)))
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view((- 1), 256)
return x |
('/v1/find/all')
class ConductSearch(ApiResource):
_args()
_param('query', 'The search query.', type=str, default='')
_scope(scopes.READ_REPO)
('conductSearch')
def get(self, parsed_args):
query = parsed_args['query']
if (not query):
return {'results': []}
username = None
results = []
if get_authenticated_user():
username = get_authenticated_user().username
encountered_teams = set()
conduct_team_search(username, query, encountered_teams, results)
conduct_admined_team_search(username, query, encountered_teams, results)
conduct_robot_search(username, query, results)
conduct_repo_search(username, query, results)
conduct_namespace_search(username, query, results)
for result in results:
name = result.get('short_name', result['name'])
lm_score = (liquidmetal.score(name, query) or 0.5)
result['score'] = (result['score'] * lm_score)
return {'results': sorted(results, key=itemgetter('score'), reverse=True)} |
def qtwe_version_patcher(monkeypatch):
try:
from qutebrowser.qt import webenginecore
except ImportError:
pytest.skip('QtWebEngine not available')
def patch(ver, chromium_version=None):
monkeypatch.setattr(configfiles.version, 'qtwebengine_versions', (lambda avoid_init=False: version.WebEngineVersions(webengine=utils.VersionNumber.parse(ver), chromium=chromium_version, source='test')))
return patch |
class ModuleMock(nn.Module):
def __init__(self, *methods):
super().__init__()
self._call_args_list = []
for method in methods:
setattr(self, method, unittest.mock.MagicMock(name=f'{type(self).__name__}.{method}'))
def forward(self, *args, **kwargs):
self._call_args_list.insert(0, (args, kwargs))
return NoOpFunction.apply(*args, **kwargs)
def call_args_list(self):
return self._call_args_list
def called(self):
return bool(self.call_args_list)
def call_args(self):
return self.call_args_list[0]
def call_count(self):
return len(self.call_args_list)
def assert_called(self, count=None):
assert self.called
if (count is not None):
assert (self.call_count == count)
def assert_called_with(self, input, *args, **kwargs):
self.assert_called()
((input_, *args_), kwargs_) = self.call_args
ptu.assert_allclose(input_, input)
assert (tuple(args_) == args)
assert (kwargs_ == kwargs)
def assert_called_once(self):
self.assert_called(count=1)
def assert_called_once_with(self, input, *args, **kwargs):
self.assert_called_once()
self.assert_called_with(input, *args, **kwargs) |
def test_get_eval_class():
context = Context({'c': 789})
context.pystring_globals_update({'A': ArbClassForEvalTest})
assert (context.get_eval_string('A.a') == 123)
assert (context.get_eval_string('A().b') == 456)
assert (context.get_eval_string('A().dothing(1)') == 124)
assert (context.get_eval_string('A.dothing_class_method(1)') == 122)
assert (context.get_eval_string('A.dothing_static_method(1) + c') == 792)
assert (context == {'c': 789}) |
def create_data(tracker, iterations=20, obj_per_iteration=100):
objects = []
for x in range(iterations):
for y in range(obj_per_iteration):
objects.append(Alpha())
objects.append(Beta())
objects.append(Gamma())
tracker.create_snapshot()
return objects |
def test_everything_annotated() -> None:
pyanalyze_dir = Path(__file__).parent
failures = []
for filename in sorted(files_with_extension_from_directory('py', pyanalyze_dir)):
tree = annotate_file(filename, show_errors=True)
for node in ast.walk(tree):
if (hasattr(node, 'lineno') and (not hasattr(node, 'inferred_value')) and (not isinstance(node, (ast.keyword, ast.arg)))):
failures.append((filename, node))
if failures:
for (filename, node) in failures:
print(f'{filename}:{node.lineno}:{node.col_offset}: {ast.dump(node)}')
assert False, f'found no annotations on {len(failures)} expressions' |
def test_list_value():
p = ListParameter('Test', choices=[1, 2.2, 'three', 'and four'])
p.value = 1
assert (p.value == 1)
p.value = 2.2
assert (p.value == 2.2)
p.value = '1'
assert (p.value == 1)
p.value = '2.2'
assert (p.value == 2.2)
p.value = 'three'
assert (p.value == 'three')
p.value = 'and four'
assert (p.value == 'and four')
with pytest.raises(ValueError):
p.value = 5
assert (p.cli_args[0] is None)
assert (p.cli_args[1] == [('units are', 'units'), 'default', ('choices are', 'choices')]) |
class DiscreteAgent(Agent):
def __init__(self, xs, ys, map_matrix, obs_range=3, n_channels=3, seed=1, flatten=False):
self.random_state = np.random.RandomState(seed)
self.xs = xs
self.ys = ys
self.eactions = [0, 1, 2, 3, 4]
self.motion_range = [[(- 1), 0], [1, 0], [0, 1], [0, (- 1)], [0, 0]]
self.current_pos = np.zeros(2, dtype=np.int32)
self.last_pos = np.zeros(2, dtype=np.int32)
self.temp_pos = np.zeros(2, dtype=np.int32)
self.map_matrix = map_matrix
self.terminal = False
self._obs_range = obs_range
if flatten:
self._obs_shape = (((n_channels * (obs_range ** 2)) + 1),)
else:
self._obs_shape = (obs_range, obs_range, 4)
def observation_space(self):
return spaces.Box(low=(- np.inf), high=np.inf, shape=self._obs_shape)
def action_space(self):
return spaces.Discrete(5)
def step(self, a):
cpos = self.current_pos
lpos = self.last_pos
if self.terminal:
return cpos
if self.inbuilding(cpos[0], cpos[1]):
self.terminal = True
return cpos
tpos = self.temp_pos
tpos[0] = cpos[0]
tpos[1] = cpos[1]
tpos += self.motion_range[a]
x = tpos[0]
y = tpos[1]
if (not self.inbounds(x, y)):
return cpos
if self.inbuilding(x, y):
return cpos
else:
lpos[0] = cpos[0]
lpos[1] = cpos[1]
cpos[0] = x
cpos[1] = y
return cpos
def get_state(self):
return self.current_pos
def inbounds(self, x, y):
if ((0 <= x < self.xs) and (0 <= y < self.ys)):
return True
return False
def inbuilding(self, x, y):
if (self.map_matrix[(x, y)] == (- 1)):
return True
return False
def nactions(self):
return len(self.eactions)
def set_position(self, xs, ys):
self.current_pos[0] = xs
self.current_pos[1] = ys
def current_position(self):
return self.current_pos
def last_position(self):
return self.last_pos |
class CallableArgument(ProperType):
__slots__ = ('typ', 'name', 'constructor')
typ: Type
name: (str | None)
constructor: (str | None)
def __init__(self, typ: Type, name: (str | None), constructor: (str | None), line: int=(- 1), column: int=(- 1)) -> None:
super().__init__(line, column)
self.typ = typ
self.name = name
self.constructor = constructor
def accept(self, visitor: TypeVisitor[T]) -> T:
assert isinstance(visitor, SyntheticTypeVisitor)
ret: T = visitor.visit_callable_argument(self)
return ret
def serialize(self) -> JsonDict:
assert False, "Synthetic types don't serialize" |
class OnlineStats(object):
def __init__(self, init_func=(lambda : 0), update_func=(lambda x, y: (x + y)), readout_func=(lambda x, y: (x / y))):
super(OnlineStats, self).__init__()
self.num_steps = 0
self.update_func = update_func
self.readout_func = readout_func
self.init_func = init_func
self.init()
def init(self):
self.stats = self.init_func()
self.num_steps = 0
def step(self, new_stat, n_step=1):
self.num_steps += n_step
self.stats = self.update_func(self.stats, new_stat)
def summary(self):
return self.readout_func(self.stats, self.num_steps) |
def dump_data(features, labels, user_negative, num_neg, is_training):
if (not os.path.exists(DATA_PATH)):
os.makedirs(DATA_PATH)
(features, labels) = add_negative(features, user_negative, labels, num_neg, is_training)
data_dict = dict([('user', features['user']), ('item', features['item']), ('label', labels)])
print(data_dict)
if is_training:
np.save(os.path.join(DATA_PATH, 'train_data.npy'), data_dict)
else:
np.save(os.path.join(DATA_PATH, 'test_data.npy'), data_dict) |
class ScalarMeanTracker(object):
def __init__(self) -> None:
self._sums = {}
self._counts = {}
def add_scalars(self, scalars):
for k in scalars:
if (k != 'tools'):
if (k not in self._sums):
self._sums[k] = scalars[k]
self._counts[k] = 1
else:
self._sums[k] += scalars[k]
self._counts[k] += 1
def pop_and_reset(self):
means = {k: (self._sums[k] / self._counts[k]) for k in self._sums}
self._sums = {}
self._counts = {}
return means |
def lang(category: str, key: str, replacements: typing.Optional[dict]=None, default=None, user: typing.Optional[tweepy.models.User]=None):
string = _language_config.get(category, key, fallback=default)
if string:
if replacements:
for (rkey, rvalue) in replacements.items():
string = string.replace(f'{{{rkey}}}', str(rvalue))
if user:
string = _member_replacements(string, user)
else:
logging.getLogger(__name__).warning(f'Missing {_language} language string: {key} ({category})')
return '<Missing language string>'
return string |
def test_py_string_with_imports():
context = Context({'a': (- 3), 'b': 4})
from math import sqrt
context.pystring_globals_update({'squareroot': sqrt})
assert (PyString('abs(a) + squareroot(b)').get_value(context) == 5)
assert (context == {'a': (- 3), 'b': 4})
assert (context._pystring_globals == {'squareroot': sqrt}) |
class SmoothedValue():
def __init__(self, window_size=20, fmt=None):
if (fmt is None):
fmt = '{median:.4f} ({global_avg:.4f})'
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += (value * n)
def synchronize_between_processes(self):
t = reduce_across_processes([self.count, self.total])
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
def median(self):
if (not self.deque):
return 0
d = torch.tensor(list(self.deque))
return d.median().item()
def avg(self):
if (not self.deque):
return 0
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
def global_avg(self):
try:
return (self.total / self.count)
except ZeroDivisionError:
return 0
def max(self):
if (not self.deque):
return 0
return max(self.deque)
def value(self):
if (not self.deque):
return 0
return self.deque[(- 1)]
def __str__(self):
return self.fmt.format(median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value) |
def safe_inspect_signature(runtime: Any) -> (inspect.Signature | None):
try:
try:
return inspect.signature(runtime)
except ValueError:
if (hasattr(runtime, '__text_signature__') and ('<unrepresentable>' in runtime.__text_signature__)):
sig = runtime.__text_signature__.replace('<unrepresentable>', '...')
sig = inspect._signature_fromstr(inspect.Signature, runtime, sig)
assert isinstance(sig, inspect.Signature)
new_params = [(parameter.replace(default=UNREPRESENTABLE) if (parameter.default is ...) else parameter) for parameter in sig.parameters.values()]
return sig.replace(parameters=new_params)
else:
raise
except Exception:
return None |
def get_mean_width(X):
n = X.shape[0]
Xmed = X
G = np.sum((Xmed * Xmed), 1).reshape(n, 1)
Q = np.tile(G, (1, n))
R = np.tile(G.T, (n, 1))
dists = ((Q + R) - (2 * np.dot(Xmed, Xmed.T)))
dists = (dists - np.tril(dists))
dists = dists.reshape((n ** 2), 1)
width_x = np.sqrt((0.5 * np.mean(dists[(dists > 0)])))
return width_x |
class Editable(BaseEditable):
def __init__(self, module: nn.Module, loss_function, optimizer=IngraphGradientDescent(0.01), max_steps=float('inf'), get_editable_parameters=(lambda module: module.parameters()), is_edit_finished=(lambda loss, **kwargs: (loss.item() <= 0))):
super().__init__()
(self.module, self.loss_function, self.optimizer) = (module, loss_function, optimizer)
self.get_editable_parameters = get_editable_parameters
self.is_edit_finished = is_edit_finished
self.max_steps = max_steps
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
def edit(self, inputs, targets, max_steps=None, model_kwargs=None, loss_kwargs=None, opt_kwargs=None, **kwargs):
(model_kwargs, loss_kwargs, opt_kwargs) = ((model_kwargs or {}), (loss_kwargs or {}), (opt_kwargs or {}))
optimizer_state = self.optimizer.get_initial_state(self, **opt_kwargs)
editable = self
for step in count():
prediction = editable(inputs, **model_kwargs)
loss = self.loss_function(prediction, targets, **loss_kwargs)
if self.is_edit_finished(**locals()):
return self.EditResult(editable, success=True, loss=loss, complexity=step)
elif (step >= (max_steps or self.max_steps)):
return self.EditResult(editable, success=False, loss=loss, complexity=step)
(optimizer_state, editable) = self.optimizer.step(optimizer_state, editable, loss, parameters=editable.get_editable_parameters(editable.module), **kwargs)
def extra_repr(self):
return 'max_steps={}, loss_function={}'.format(self.max_steps, repr(self.loss_function)) |
def import_view(element, save=False, user=None):
try:
view = View.objects.get(uri=element.get('uri'))
except View.DoesNotExist:
view = View()
set_common_fields(view, element)
view.order = (element.get('order') or 0)
view.template = element.get('template')
set_lang_field(view, 'title', element)
set_lang_field(view, 'help', element)
view.available = element.get('available', True)
validate_instance(view, element, ViewLockedValidator, ViewUniqueURIValidator)
check_permissions(view, element, user)
if (save and (not element.get('errors'))):
if view.id:
element['updated'] = True
logger.info('View %s updated.', element.get('uri'))
else:
element['created'] = True
logger.info('View created with uri %s.', element.get('uri'))
view.save()
set_m2m_instances(view, 'catalogs', element)
view.sites.add(Site.objects.get_current())
view.editors.add(Site.objects.get_current())
return view |
def train(args, model, train_features, benchmarks):
train_dataloader = DataLoader(train_features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True)
total_steps = int(((len(train_dataloader) * args.num_train_epochs) // args.gradient_accumulation_steps))
warmup_steps = int((total_steps * args.warmup_ratio))
scaler = GradScaler()
optimizer = AdamW(model.parameters(), lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps)
print('Total steps: {}'.format(total_steps))
print('Warmup steps: {}'.format(warmup_steps))
num_steps = 0
for epoch in range(int(args.num_train_epochs)):
model.zero_grad()
for (step, batch) in enumerate(tqdm(train_dataloader)):
model.train()
inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'labels': batch[2].to(args.device), 'ss': batch[3].to(args.device), 'os': batch[4].to(args.device)}
outputs = model(**inputs)
loss = (outputs[0] / args.gradient_accumulation_steps)
scaler.scale(loss).backward()
if ((step % args.gradient_accumulation_steps) == 0):
num_steps += 1
if (args.max_grad_norm > 0):
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scaler.step(optimizer)
scaler.update()
scheduler.step()
model.zero_grad()
wandb.log({'loss': loss.item()}, step=num_steps)
if (((num_steps % args.evaluation_steps) == 0) and ((step % args.gradient_accumulation_steps) == 0)):
for (tag, features) in benchmarks:
(f1, output) = evaluate(args, model, features, tag=tag)
wandb.log(output, step=num_steps)
for (tag, features) in benchmarks:
(f1, output) = evaluate(args, model, features, tag=tag)
wandb.log(output, step=num_steps) |
def test_option_subscribe():
opt = Option('A_FAKE_OPTION', 'default')
calls = []
opt.subscribe(calls.append)
assert (calls == ['default'])
opt.current = 'default'
assert (calls == ['default'])
opt.current = 'new-1'
opt.current = 'new-2'
assert (calls == ['default', 'new-1', 'new-2'])
opt.unset()
assert (calls == ['default', 'new-1', 'new-2', 'default']) |
(dbus, 'dbus missing')
class TDbusUtils(TestCase):
def test_prop_sig(self):
value = apply_signature(2, 'u')
self.assertTrue(isinstance(value, dbus.UInt32))
value = apply_signature({'a': 'b'}, 'a{ss}')
self.assertEqual(value.signature, 'ss')
self.assertTrue(isinstance(value, dbus.Dictionary))
value = apply_signature(('a',), 'a(s)')
self.assertEqual(value.signature, 's')
self.assertTrue(isinstance(value, dbus.Struct))
value = apply_signature(('a', 'b'), 'as')
self.assertEqual(value.signature, 's')
self.assertTrue(isinstance(value, dbus.Array))
self.assertRaises(TypeError, apply_signature, 2, 'a(s)')
text = b'\xc3\xb6\xc3\xa4\xc3\xbc'
value = apply_signature(text, 's', utf8_strings=True)
self.assertTrue(isinstance(value, str))
value = apply_signature(text, 's')
self.assertTrue(isinstance(value, str))
text = 'oau'
value = apply_signature(text, 's', utf8_strings=True)
self.assertTrue(isinstance(value, str))
value = apply_signature(text, 's')
self.assertTrue(isinstance(value, str))
def test_list_props(self):
props = list_spec_properties(ANN1)
self.assertEqual(props['Position']['access'], 'read')
self.assertEqual(props['Position']['emit'], 'false')
self.assertEqual(props['Position']['type'], 's')
self.assertEqual(props['MinimumRate']['emit'], 'true')
props = list_spec_properties(ANN2)
self.assertEqual(props['Foobar']['emit'], 'invalidates')
self.assertEqual(props['XXX']['emit'], 'false')
def test_filter_props(self):
spec = filter_property_spec(ANN1, wl=['Position'])
self.assertEqual(list(list_spec_properties(spec).keys()), ['Position'])
props = list_spec_properties(spec)
self.assertEqual(props['Position']['emit'], 'false')
spec = filter_property_spec(ANN1, bl=['Position'])
self.assertEqual(list(list_spec_properties(spec).keys()), ['MinimumRate'])
spec = filter_property_spec(ANN1)
self.assertEqual(len(list_spec_properties(spec).keys()), 2)
def test_validate_utf8(self):
self.assertEqual(dbus_unicode_validate('X\ufffeX'), 'XX')
self.assertEqual(dbus_unicode_validate(b'X\xef\xbf\xbeX'), 'XX')
def test_property_mixin(self):
class X(DBusProperty):
SUPPORTS_MULTIPLE_OBJECT_PATHS = False
def set_introspection(self, *args):
pass
def get_property(self, interface, name):
return interface
def set_property(self, interface, name, value):
pass
x = X()
x.set_properties('a1', ANN1)
x.set_properties('a2', ANN2)
x.implement_interface('a1', 'a2')
props = x.get_properties('a1')
self.assertTrue((('a1', 'Position') in props))
self.assertTrue((('a2', 'XXX') in props))
props = x.get_properties('a2')
self.assertFalse((('a1', 'Position') in props))
self.assertEqual(x.get_interface('a2', 'XXX'), 'a2')
self.assertEqual(x.get_interface('a1', 'XXX'), 'a2')
self.assertEqual(x.get_value('a2', 'XXX'), 'a2')
self.assertEqual(x.get_value('a1', 'XXX'), 'a2')
self.assertEqual(x.get_value('a1', 'Position'), 'a1') |
def test_simple_1d_dataset_cutting_plane():
X = np.random.uniform(size=(30, 1))
Y = (X.ravel() > 0.5).astype(np.int)
X = np.hstack([X, np.ones((X.shape[0], 1))])
pbl = MultiClassClf(n_features=2)
svm = NSlackSSVM(pbl, check_constraints=True, C=10000)
svm.fit(X, Y)
assert_array_equal(Y, np.hstack(svm.predict(X))) |
def build_pom_and_export_to_maven(**kwargs):
target_path = kwargs.get('target_path')
target = kwargs.get('target')
pom_path = kwargs.get('pom_path')
source_dirs = kwargs.get('source_dirs')
output_dir = kwargs.get('output_dir')
final_name = kwargs.get('final_name')
packaging = kwargs.get('packaging')
target_dependencies = kwargs.get('target_dependencies')
test_target_dependencies = kwargs.get('test_target_dependencies')
test_target_dependencies_exclude = kwargs.get('test_target_dependencies_exclude')
modules_path = kwargs.get('modules_path')
prop_vars = kwargs.get('properties')
external_jars = kwargs.get('external_jars')
resources = kwargs.get('resources')
run_java_programs = [json.loads(base64.b64decode(i)) for i in kwargs.get('run_java_programs')]
test_source_dirs = kwargs.get('test_source_dirs')
test_resource_dirs = kwargs.get('test_resource_dirs')
modules = []
def _indent(elem, level=0):
ind = ('\n' + (level * ' '))
if len(elem):
if ((not elem.text) or (not elem.text.strip())):
elem.text = (ind + ' ')
if ((not elem.tail) or (not elem.tail.strip())):
elem.tail = ind
for elem in elem:
_indent(elem, (level + 1))
if ((not elem.tail) or (not elem.tail.strip())):
elem.tail = ind
elif (level and ((not elem.tail) or (not elem.tail.strip()))):
elem.tail = ind
project = et.Element('{}{}{}project'.format('{', DEFAULT_NAMESPACE, '}'), attrib={'{}{}{}schemaLocation'.format('{', XSI_NAMESPACE, '}'): SCHEMA_LOCATION})
(group_id, artifact_id, version) = target.split(':')
et.SubElement(project, 'modelVersion').text = MODEL_VERSION
et.SubElement(project, 'groupId').text = group_id
et.SubElement(project, 'artifactId').text = artifact_id
et.SubElement(project, 'version').text = version
et.SubElement(project, 'packaging').text = packaging
properties = et.SubElement(project, 'properties')
et.SubElement(properties, 'project.build.sourceEncoding').text = 'UTF-8'
if prop_vars:
for (property, value) in json.loads(base64.b64decode(prop_vars)).items():
et.SubElement(properties, property).text = value
if modules_path:
with open(modules_path) as f:
modules = [i.strip() for i in f if i.strip()]
if modules:
modules_el = et.SubElement(project, 'modules')
for module in modules:
et.SubElement(modules_el, 'module').text = module
build = et.SubElement(project, 'build')
if source_dirs:
et.SubElement(build, 'sourceDirectory').text = source_dirs[0]
source_dirs = source_dirs[1:]
if test_source_dirs:
et.SubElement(build, 'testSourceDirectory').text = test_source_dirs[0]
test_source_dirs = test_source_dirs[1:]
if output_dir:
et.SubElement(build, 'outputDirectory').text = output_dir
if final_name:
et.SubElement(build, 'finalName').text = final_name
if resources:
resource_element = et.SubElement(et.SubElement(build, 'resources'), 'resource')
et.SubElement(resource_element, 'directory').text = '${basedir}'
includes = et.SubElement(resource_element, 'includes')
for resource in resources:
et.SubElement(includes, 'include').text = resource
if test_resource_dirs:
test_resource_element = et.SubElement(build, 'testResources')
for test_resource_dir in test_resource_dirs:
et.SubElement(et.SubElement(test_resource_element, 'testResource'), 'directory').text = ('${basedir}' + (('/' + test_resource_dir) if (test_resource_dir != '.') else ''))
plugins = et.SubElement(build, 'plugins')
if (packaging != 'pom'):
maven_plugin = et.SubElement(plugins, 'plugin')
et.SubElement(maven_plugin, 'groupId').text = MAVEN_PLUGIN_GROUP_ID
et.SubElement(maven_plugin, 'artifactId').text = MAVEN_PLUGIN_ARTIFACT_ID
et.SubElement(maven_plugin, 'version').text = MAVEN_PLUGIN_VERSION
configuration = et.SubElement(maven_plugin, 'configuration')
et.SubElement(configuration, 'source').text = JAVA_LANGUAGE_LEVEL
et.SubElement(configuration, 'target').text = JAVA_LANGUAGE_LEVEL
if (source_dirs or external_jars or test_source_dirs):
build_helper_plugin = et.SubElement(plugins, 'plugin')
et.SubElement(build_helper_plugin, 'groupId').text = MAVEN_BUILD_HELPER_GROUP_ID
et.SubElement(build_helper_plugin, 'artifactId').text = MAVEN_BUILD_HELPER_ARTIFACT_ID
et.SubElement(build_helper_plugin, 'version').text = MAVEN_BUILD_HELPER_VERSION
executions = et.SubElement(build_helper_plugin, 'executions')
if source_dirs:
execution = et.SubElement(executions, 'execution')
et.SubElement(execution, 'id').text = 'add-source'
et.SubElement(execution, 'phase').text = 'generate-sources'
et.SubElement(et.SubElement(execution, 'goals'), 'goal').text = 'add-source'
sources = et.SubElement(et.SubElement(execution, 'configuration'), 'sources')
for source_dir in source_dirs:
et.SubElement(sources, 'source').text = source_dir
if external_jars:
execution = et.SubElement(executions, 'execution')
et.SubElement(execution, 'id').text = 'attach-artifacts'
et.SubElement(execution, 'phase').text = 'generate-sources'
et.SubElement(et.SubElement(execution, 'goals'), 'goal').text = 'attach-artifact'
artifacts = et.SubElement(et.SubElement(execution, 'configuration'), 'artifacts')
for external_jar in external_jars:
external_artifact = et.SubElement(artifacts, 'artifact')
et.SubElement(external_artifact, 'file').text = ('${basedir}/' + external_jar)
et.SubElement(external_artifact, 'type').text = 'jar'
if test_source_dirs:
execution = et.SubElement(executions, 'execution')
et.SubElement(execution, 'id').text = 'add-test-source'
et.SubElement(execution, 'phase').text = 'generate-test-sources'
et.SubElement(et.SubElement(execution, 'goals'), 'goal').text = 'add-test-source'
sources = et.SubElement(et.SubElement(execution, 'configuration'), 'sources')
for source_dir in source_dirs:
et.SubElement(sources, 'source').text = source_dir
if run_java_programs:
exec_plugin = et.SubElement(plugins, 'plugin')
et.SubElement(exec_plugin, 'groupId').text = MAVEN_EXEC_GROUP_ID
et.SubElement(exec_plugin, 'artifactId').text = MAVEN_EXEC_ARTIFACT_ID
et.SubElement(exec_plugin, 'version').text = MAVEN_EXEC_VERSION
jp_dependencies = et.SubElement(exec_plugin, 'dependencies')
executions = et.SubElement(exec_plugin, 'executions')
for java_program in run_java_programs:
execution = et.SubElement(executions, 'execution')
et.SubElement(execution, 'phase').text = 'generate-sources'
et.SubElement(et.SubElement(execution, 'goals'), 'goal').text = 'java'
jp_configuration = et.SubElement(execution, 'configuration')
(main_cls, args) = (None, [])
for word in java_program['cmd']:
if ((not main_cls) and (not word.startswith('-'))):
main_cls = word
else:
args.append(word)
et.SubElement(jp_configuration, 'mainClass').text = main_cls
et.SubElement(jp_configuration, 'includePluginDependencies').text = 'true'
et.SubElement(jp_configuration, 'includeProjectDependencies').text = 'false'
if args:
jp_arguments = et.SubElement(jp_configuration, 'arguments')
for arg in args:
et.SubElement(jp_arguments, 'argument').text = arg
if java_program['deps']:
for jp_dep in java_program['deps']:
jp_dependency = et.SubElement(jp_dependencies, 'dependency')
(jp_g, jp_a, jp_v) = jp_dep.split(':')
et.SubElement(jp_dependency, 'groupId').text = jp_g
et.SubElement(jp_dependency, 'artifactId').text = jp_a
et.SubElement(jp_dependency, 'version').text = jp_v
et.SubElement(jp_dependency, 'type').text = 'jar'
if (target_dependencies + test_target_dependencies):
dependencies = et.SubElement(project, 'dependencies')
for target_dependency in (target_dependencies + test_target_dependencies):
dependency = et.SubElement(dependencies, 'dependency')
dependency_info = target_dependency.rsplit('::', 2)
(group_id, artifact_id, version, classifier) = dependency_info[0].split(':')
et.SubElement(dependency, 'groupId').text = group_id
et.SubElement(dependency, 'artifactId').text = artifact_id
et.SubElement(dependency, 'version').text = version
if classifier:
et.SubElement(dependency, 'classifier').text = classifier
if (target_dependency in test_target_dependencies):
et.SubElement(dependency, 'scope').text = 'test'
if (len(dependency_info) > 1):
exclusions = et.SubElement(dependency, 'exclusions')
for exclude in dependency_info[1:]:
(group_id, artifact_id) = exclude.split(':')
exclusion_el = et.SubElement(exclusions, 'exclusion')
et.SubElement(exclusion_el, 'groupId').text = group_id
et.SubElement(exclusion_el, 'artifactId').text = artifact_id
if test_target_dependencies_exclude:
surefire_plugin = et.SubElement(plugins, 'plugin')
et.SubElement(surefire_plugin, 'groupId').text = MAVEN_SUREFIRE_GROUP_ID
et.SubElement(surefire_plugin, 'artifactId').text = MAVEN_SUREFIRE_ARTIFACT_ID
et.SubElement(surefire_plugin, 'version').text = MAVEN_SUREFIRE_VERSION
classpath_excludes = et.SubElement(et.SubElement(surefire_plugin, 'configuration'), 'classpathDependencyExcludes')
for classpath_exclude in test_target_dependencies_exclude:
et.SubElement(classpath_excludes, 'classpathDependencyExclude').text = classpath_exclude
et.register_namespace('', DEFAULT_NAMESPACE)
et.register_namespace('xsi', XSI_NAMESPACE)
_indent(project)
et.ElementTree(project).write(pom_path)
sys.stderr.write('[MAVEN EXPORT] Generated {} file for target {}\n'.format(os.path.basename(pom_path), target_path)) |
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell((opt.input_encoding_size + (opt.rnn_size * 2)), opt.rnn_size)
self.lang_lstm = nn.LSTMCell((opt.rnn_size * 2), opt.rnn_size)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][(- 1)]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
(h_att, c_att) = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
(h_lang, c_lang) = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return (output, state) |
class TestDocumentDataSuppressionMethods(unittest.TestCase):
def test_remove_section_with_default_args(self):
document = parse(USER_ONLY)
self.assertEqual(2, document.count('user'))
document.remove('user')
user = document.get('user')
expected_body = ['id = 1', "name = 'alex'"]
self.assertEqual(expected_body, user.body)
self.assertEqual(1, document.count('user'))
self.assertEqual(1, count_sections(document, 'user'))
def test_remove_section_at_relative_index(self):
document = parse(USER_ONLY)
self.assertEqual(2, document.count('user'))
document.remove('user', 0)
user = document.get('user')
expected_body = ['id = 2', "name = 'rustic'"]
self.assertEqual(expected_body, user.body)
self.assertEqual(1, document.count('user'))
self.assertEqual(1, count_sections(document, 'user'))
document = parse(USER_ONLY)
document.remove('user', 1)
user = document.get('user')
expected_body = ['id = 1', "name = 'alex'"]
self.assertEqual(expected_body, user.body)
self.assertEqual(1, document.count('user'))
self.assertEqual(1, count_sections(document, 'user'))
def test_remove_all_method(self):
document = parse(USER_AND_INFO)
self.assertEqual(2, document.count('user'))
self.assertEqual(2, count_sections(document, 'user'))
self.assertEqual(1, document.count('info'))
self.assertEqual(1, count_sections(document, 'info'))
document.remove_all('user')
self.assertEqual(0, document.count('user'))
self.assertEqual(0, count_sections(document, 'user'))
self.assertEqual(1, document.count('info'))
self.assertEqual(1, count_sections(document, 'info')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.