code stringlengths 281 23.7M |
|---|
def retrieve_artifact(name: str):
_artifact = {}
if os.path.exists(name):
files = os.listdir(name)
for file in files:
try:
with open(os.path.join(name, file)) as f:
_artifact[file.split('.')[0]] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'Could not open {os.path.join(name, file)}.') from e
return _artifact |
class Video_TANetDataSet(data.Dataset):
def __init__(self, list_file, num_segments=3, new_length=1, modality='RGB', vid_format='.mp4', transform=None, random_shift=True, test_mode=False, video_data_dir=None, remove_missing=False, dense_sample=False, test_sample='dense-10', if_sample_tta_aug_views=None, tta_view_sample_style_list=None, n_tta_aug_views=None, debug=False, debug_vid=50):
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.vid_format = vid_format
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.video_data_dir = video_data_dir
self.remove_missing = remove_missing
self.dense_sample = dense_sample
self.test_sample = test_sample
self.debug = debug
self.debug_vid = debug_vid
self.if_sample_tta_aug_views = if_sample_tta_aug_views
self.tta_view_sample_style_list = tta_view_sample_style_list
self.n_tta_aug_views = n_tta_aug_views
if self.dense_sample:
print('=> Using dense sample for the dataset...')
if (self.modality == 'RGBDiff'):
self.new_length += 1
self._parse_list()
def _load_image_deprecated(self, directory, idx):
if ((self.modality == 'RGB') or (self.modality == 'RGBDiff')):
try:
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(idx))).convert('RGB')]
except Exception:
print('error loading image:', os.path.join(self.root_path, directory, self.image_tmpl.format(idx)))
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(1))).convert('RGB')]
elif (self.modality == 'Flow'):
if (self.image_tmpl == 'flow_{}_{:05d}.jpg'):
x_img = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format('x', idx))).convert('L')
y_img = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format('y', idx))).convert('L')
elif (self.image_tmpl == '{:06d}-{}_{:05d}.jpg'):
x_img = Image.open(os.path.join(self.root_path, '{:06d}'.format(int(directory)), self.image_tmpl.format(int(directory), 'x', idx))).convert('L')
y_img = Image.open(os.path.join(self.root_path, '{:06d}'.format(int(directory)), self.image_tmpl.format(int(directory), 'y', idx))).convert('L')
else:
try:
flow = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(idx))).convert('RGB')
except Exception:
print('error loading flow file:', os.path.join(self.root_path, directory, self.image_tmpl.format(idx)))
flow = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(1))).convert('RGB')
(flow_x, flow_y, _) = flow.split()
x_img = flow_x.convert('L')
y_img = flow_y.convert('L')
return [x_img, y_img]
def _parse_list(self):
tmp = [x.strip().split(' ') for x in open(self.list_file)]
if ((not self.test_mode) or self.remove_missing):
tmp = [item for item in tmp if (int(item[1]) >= 3)]
self.video_list = [VideoRecord(item) for item in tmp]
if self.debug:
self.video_list = self.video_list[:self.debug_vid]
def _sample_tta_augmented_views(self, record, tta_view_sample_style):
if (tta_view_sample_style == 'uniform'):
num_clips = 1
tick = (((record.num_frames - self.new_length) + 1) / float(self.num_segments))
offsets = [int(((tick / 2.0) + (tick * x))) for x in range(self.num_segments)]
return (np.array(offsets) + 1)
elif (tta_view_sample_style == 'dense'):
num_clips = 1
t_stride = (64 // self.num_segments)
sample_pos = max(1, ((1 + record.num_frames) - (t_stride * self.num_segments)))
start_idx = (sample_pos // 2)
offsets = [(((idx * t_stride) + start_idx) % record.num_frames) for idx in range(self.num_segments)]
return (np.array(offsets) + 1)
elif (tta_view_sample_style == 'uniform_equidist'):
num_clips = self.n_tta_aug_views
tick = (((record.num_frames - self.new_length) + 1) / float(self.num_segments))
start_list = np.linspace(0, (tick - 1), num=num_clips, dtype=int)
offsets = []
for start_idx in start_list.tolist():
offsets += [(int((start_idx + (tick * x))) % record.num_frames) for x in range(self.num_segments)]
return (np.array(offsets) + 1)
elif (tta_view_sample_style == 'dense_equidist'):
num_clips = self.n_tta_aug_views
t_stride = (64 // self.num_segments)
sample_pos = max(1, ((1 + record.num_frames) - (t_stride * self.num_segments)))
start_list = np.linspace(0, (sample_pos - 1), num=num_clips, dtype=int)
offsets = []
for start_idx in start_list.tolist():
offsets += [(((idx * t_stride) + start_idx) % record.num_frames) for idx in range(self.num_segments)]
return (np.array(offsets) + 1)
elif (tta_view_sample_style == 'uniform_rand'):
num_clips = 1
average_duration = (((record.num_frames - self.new_length) + 1) // self.num_segments)
if (average_duration > 0):
offsets = (np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments))
elif (record.num_frames > self.num_segments):
offsets = np.sort(randint(((record.num_frames - self.new_length) + 1), size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return (offsets + 1)
elif (tta_view_sample_style == 'dense_rand'):
num_clips = 1
t_stride = (64 // self.num_segments)
sample_pos = max(1, ((1 + record.num_frames) - (t_stride * self.num_segments)))
start_idx = (0 if (sample_pos == 1) else randint(0, (sample_pos - 1)))
offsets = [(((idx * t_stride) + start_idx) % record.num_frames) for idx in range(self.num_segments)]
return (np.array(offsets) + 1)
elif (tta_view_sample_style == 'random'):
num_clips = 1
if (record.num_frames >= self.num_segments):
offsets = np.sort(np.random.choice(record.num_frames, size=self.num_segments, replace=False))
else:
offsets = np.array((list(range(record.num_frames)) + ([(record.num_frames - 1)] * (self.num_segments - record.num_frames))))
return np.array(offsets)
def _sample_indices(self, record):
if self.dense_sample:
t_stride = (64 // self.num_segments)
sample_pos = max(1, ((1 + record.num_frames) - (t_stride * self.num_segments)))
start_idx = (0 if (sample_pos == 1) else np.random.randint(0, (sample_pos - 1)))
offsets = [(((idx * t_stride) + start_idx) % record.num_frames) for idx in range(self.num_segments)]
return (np.array(offsets) + 1)
else:
average_duration = (((record.num_frames - self.new_length) + 1) // self.num_segments)
if (average_duration > 0):
offsets = (np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments))
elif (record.num_frames > self.num_segments):
offsets = np.sort(randint(((record.num_frames - self.new_length) + 1), size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return (offsets + 1)
def _get_val_indices(self, record):
if self.dense_sample:
t_stride = (64 // self.num_segments)
sample_pos = max(1, ((1 + record.num_frames) - (t_stride * self.num_segments)))
start_idx = (sample_pos // 2)
offsets = [(((idx * t_stride) + start_idx) % record.num_frames) for idx in range(self.num_segments)]
return (np.array(offsets) + 1)
else:
if (record.num_frames > ((self.num_segments + self.new_length) - 1)):
tick = (((record.num_frames - self.new_length) + 1) / float(self.num_segments))
offsets = np.array([int(((tick / 2.0) + (tick * x))) for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
return (offsets + 1)
def _get_test_indices(self, record):
if ('dense' in self.test_sample):
num_clips = int(self.test_sample.split('-')[(- 1)])
t_stride = (64 // self.num_segments)
sample_pos = max(1, ((1 + record.num_frames) - (t_stride * self.num_segments)))
if (num_clips == 1):
start_idx = (sample_pos // 2)
offsets = [(((idx * t_stride) + start_idx) % record.num_frames) for idx in range(self.num_segments)]
else:
start_list = np.linspace(0, (sample_pos - 1), num=num_clips, dtype=int)
offsets = []
for start_idx in start_list.tolist():
offsets += [(((idx * t_stride) + start_idx) % record.num_frames) for idx in range(self.num_segments)]
return (np.array(offsets) + 1)
elif ('uniform' in self.test_sample):
num_clips = int(self.test_sample.split('-')[(- 1)])
if (num_clips == 1):
tick = (((record.num_frames - self.new_length) + 1) / float(self.num_segments))
offsets = [int(((tick / 2.0) + (tick * x))) for x in range(self.num_segments)]
else:
tick = (((record.num_frames - self.new_length) + 1) / float(self.num_segments))
start_list = np.linspace(0, (tick - 1), num=num_clips, dtype=int)
offsets = []
for start_idx in start_list.tolist():
offsets += [(int((start_idx + (tick * x))) % record.num_frames) for x in range(self.num_segments)]
return (np.array(offsets) + 1)
else:
raise NotImplementedError('{} not exist'.format(self.test_sample))
def __getitem__(self, index):
record = self.video_list[index]
if self.if_sample_tta_aug_views:
segment_indices = []
for tta_view_sample_style in self.tta_view_sample_style_list:
segment_indices += list(self._sample_tta_augmented_views(record, tta_view_sample_style))
elif (not self.test_mode):
segment_indices = (self._sample_indices(record) if self.random_shift else self._get_val_indices(record))
else:
segment_indices = self._get_test_indices(record)
return self.get(record, segment_indices)
def get(self, record, indices):
vid_path = osp.join(self.video_data_dir, f'{record.path}{self.vid_format}')
container = decord.VideoReader(vid_path)
frame_indices = indices
frame_indices = np.minimum(frame_indices, (container._num_frame - 1))
images = container.get_batch(frame_indices).asnumpy()
images = list(images)
images = [Image.fromarray(image).convert('RGB') for image in images]
(process_data, label) = self.transform((images, record.label))
return (process_data, label)
def get_img_file_deprecated(self, record, indices):
images = list()
for seg_ind in indices:
p = int(seg_ind)
for i in range(self.new_length):
seg_imgs = self._load_image(record.path, p)
images.extend(seg_imgs)
if (p < record.num_frames):
p += 1
(process_data, label) = self.transform((images, record.label))
return (process_data, label)
def __len__(self):
return len(self.video_list) |
class TemporaryFileItem():
__slots__ = ['_tree', '_proxy', '__weakref__']
def __init__(self, tree, pathProxy):
self._tree = tree
self._proxy = pathProxy
self._proxy.taskFinished.connect(self.onSearchResult)
tree._temporaryItems.add(self)
def search(self, searchFilter):
self._proxy.pushTask(tasks.SearchTask(**searchFilter))
def onSearchResult(self, task):
self._tree._temporaryItems.discard(self)
result = task.result()
if result:
item = FileItem(self._tree, self._proxy, 'search')
for r in result:
SubFileItem(item, *r, showlinenr=True)
searchInfoItem = self._tree.topLevelItem(0)
if isinstance(searchInfoItem, SearchInfoItem):
searchInfoItem.addFile(bool(result)) |
class UdemyChapters(object):
def __init__(self):
self._chapter_id = None
self._chapter_index = None
self._chapter_title = None
self._lectures_count = None
self._lectures = []
def __repr__(self):
chapter = '{title}'.format(title=self.title)
return chapter
def id(self):
return self._chapter_id
def index(self):
return self._chapter_index
def title(self):
return self._chapter_title
def lectures(self):
return self._lectures_count
def get_lectures(self, lecture_number=None, lecture_start=None, lecture_end=None):
if (lecture_number and (not lecture_start) and (not lecture_end) and isinstance(lecture_number, int)):
is_okay = bool((0 < lecture_number <= self.lectures))
if is_okay:
self._lectures = [self._lectures[(lecture_number - 1)]]
if (lecture_start and (not lecture_number) and isinstance(lecture_start, int)):
is_okay = bool((0 < lecture_start <= self.lectures))
if is_okay:
self._lectures = self._lectures[(lecture_start - 1):]
if (lecture_end and (not lecture_number) and isinstance(lecture_end, int)):
is_okay = bool((0 < lecture_end <= self.lectures))
if is_okay:
self._lectures = self._lectures[:(lecture_end - 1)]
return self._lectures |
class TestReference():
def test_complex_scalar(self):
nblock = np.array([1])
itype = np.array([2])
z = np.array([[(1 + 2j)]])
mu = ab13md(z, nblock, itype)[0]
assert_allclose(mu, abs(z))
def test_real_scalar_real_uncertainty(self):
nblock = np.array([1])
itype = np.array([1])
z = np.array([[5.34]])
mu = ab13md(z, nblock, itype)[0]
assert_allclose(mu, abs(z))
def test_complex_scalar_real_uncertainty(self):
nblock = np.array([1])
itype = np.array([1])
z = np.array([[6.78j]])
mu = ab13md(z, nblock, itype)[0]
assert_allclose(mu, 0)
def test_sp85_part1(self):
M = np.array([[2, 2], [(- 1), (- 1)]])
muref = 3.162
nblock = np.array([2])
itype = np.array([2])
mu = ab13md(M, nblock, itype)[0]
assert_allclose(mu, muref, rtol=0.0005)
def test_sp85_part2(self):
M = np.array([[2, 2], [(- 1), (- 1)]])
muref = 3.0
nblock = np.array([1, 1])
itype = np.array([2, 2])
mu = ab13md(M, nblock, itype)[0]
assert_allclose(mu, muref, rtol=0.0005)
def test_slicot(self):
muref = 41.
(Z, nblock, itype) = slicot_example()
(mu, d, g, x) = ab13md(Z, nblock, itype)
assert_allclose(mu, muref)
ZH = Z.T.conj()
D = np.diag(d)
G = np.diag(g)
negsemidef = ((((ZH (D ** 2)) Z) + (1j * ((G Z) - (ZH G)))) - ((mu ** 2) * (D ** 2)))
assert_allclose(negsemidef, negsemidef.T.conj())
evals = np.linalg.eigvalsh(negsemidef)
assert (max(evals) < (np.finfo(float).eps ** 0.5)) |
def get_jalali_date_from_julian_day(julian_day):
julian_day = (floor(julian_day) + 0.5)
offset = (julian_day - 2121445.5)
cycle = floor((offset / 1029983))
remaining = (offset % 1029983)
if (remaining == 1029982):
year_cycle = 2820
else:
a1 = floor((remaining / 366))
a2 = (remaining % 366)
year_cycle = ((floor(((((2134 * a1) + (2816 * a2)) + 2815) / 1028522)) + a1) + 1)
y = ((year_cycle + (2820 * cycle)) + 474)
if (y <= 0):
y -= 1
days_in_years = ((julian_day - get_julian_day_from_jalali_date(y, 1, 1)) + 1)
m = ceil([((days_in_years - 6) / 30), (days_in_years / 31)][(days_in_years <= 186)])
day = ((julian_day - get_julian_day_from_jalali_date(y, m, 1)) + 1)
return (y, m, day) |
def MGGAN_main(opt):
g = Globals()
opt.workers = 2
opt.batchSize = 64
opt.imageSize = 64
nc = (1 if opt.data.startswith('mnist') else 3)
opt.nz = 100
opt.ngf = 64
opt.ndf = 64
opt.niter = 30
opt.lr = 0.0002
opt.beta1 = 0.5
opt.cuda = True
opt.ngpu = 1
opt.netG = ''
opt.netD = ''
opt.outf = (g.default_model_dir + 'MGGAN/')
opt.manualSeed = None
opt = addDataInfo(opt)
opt.outf = ((opt.outf + opt.data) + '/')
print_prop(opt)
try:
os.makedirs(opt.outf)
except OSError:
pass
if os.path.exists((opt.outf + '/mark')):
print('Already generated before. Now exit.')
return
if (opt.manualSeed is None):
opt.manualSeed = random.randint(1, 10000)
print('Random Seed: ', opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
(dataset, dataloader) = getDataSet(opt)
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
def weights_init(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
m.weight.data.normal_(0.0, 0.02)
elif (classname.find('BatchNorm') != (- 1)):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
netG = DCGAN_G(nz, nc, ngf)
netG.apply(weights_init)
if (opt.netG != ''):
netG.load_state_dict(torch.load(opt.netG))
print(netG)
nloss = 200
class _netD(nn.Module):
def __init__(self, ngpu):
super(_netD, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ndf, (ndf * 2), 4, 2, 1, bias=False), nn.BatchNorm2d((ndf * 2)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((ndf * 2), (ndf * 4), 4, 2, 1, bias=False), nn.BatchNorm2d((ndf * 4)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((ndf * 4), (ndf * 8), 4, 2, 1, bias=False))
self.main2 = nn.Sequential(nn.BatchNorm2d((ndf * 8)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((ndf * 8), nloss, 4, 1, 0, bias=False), nn.Sigmoid())
def forward(self, input):
self.feature = self.main.forward(input)
output = self.main2.forward(self.feature)
return output.view((- 1), 1)
netD = _netD(ngpu)
netD.apply(weights_init)
if (opt.netD != ''):
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion = nn.BCELoss()
input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
label = torch.FloatTensor(opt.batchSize, nloss)
real_label = 1
fake_label = 0
if opt.cuda:
netD.cuda()
netG.cuda()
criterion.cuda()
(input, label) = (input.cuda(), label.cuda())
(noise, fixed_noise) = (noise.cuda(), fixed_noise.cuda())
input = Variable(input)
label = Variable(label)
noise = Variable(noise)
fixed_noise = Variable(fixed_noise)
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
real_batch = 11
grow_speed = 5
for epoch in range(opt.niter):
if ((epoch % grow_speed) == 0):
if (real_batch > 1):
real_batch -= 1
real_inputs = torch.FloatTensor((real_batch * opt.batchSize), nc, opt.imageSize, opt.imageSize)
pointer = 0
for (i, data) in enumerate(dataloader, 0):
(real_cpu, _) = data
batch_size = real_cpu.size(0)
if (batch_size < opt.batchSize):
continue
pointer = ((pointer % real_batch) + 1)
if (pointer < real_batch):
real_inputs[(pointer * batch_size):((pointer + 1) * batch_size)].copy_(real_cpu)
continue
input.data.resize_(real_inputs.size()).copy_(real_inputs)
netD(input)
true_features = netD.feature.view(real_inputs.size(0), (- 1))
noise.data.resize_(batch_size, nz, 1, 1)
noise.data.normal_(0, 1)
fake = netG(noise)
label.data.fill_(fake_label)
output = netD(fake.detach())
fake_features = netD.feature.view(batch_size, (- 1))
map = solve(fake_features.data, true_features.data)
input.data.resize_(real_cpu.size())
for j in range(0, batch_size):
input.data[j].copy_(real_inputs[map[j]])
tot_mini_batch = 10
for mini_batch in range(0, tot_mini_batch):
label.data.fill_(real_label)
netD.zero_grad()
output = netD(input)
errD_real = criterion(output, label)
errD_real.backward()
D_x = output.data.mean()
fake = netG(noise)
label.data.fill_(fake_label)
output = netD(fake.detach())
errD_fake = criterion(output, label)
errD_fake.backward()
D_G_z1 = output.data.mean()
errD = (errD_real + errD_fake)
optimizerD.step()
netG.zero_grad()
label.data.fill_(real_label)
output = netD(fake)
errG = criterion(output, label)
errG.backward()
D_G_z2 = output.data.mean()
optimizerG.step()
print(('[%d/%d][%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f' % (epoch, opt.niter, i, len(dataloader), mini_batch, tot_mini_batch, errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2)))
saveImage(real_cpu, ('%s/real_samples.png' % opt.outf))
fake = netG(fixed_noise)
saveImage(fake.data, ('%s/fake_samples_epoch_%03d.png' % (opt.outf, epoch)))
torch.save(netG.state_dict(), ('%s/netG_epoch_%d.pth' % (opt.outf, epoch)))
torch.save(netD.state_dict(), ('%s/netD_epoch_%d.pth' % (opt.outf, epoch)))
with open((opt.outf + '/mark'), 'w') as f:
f.write('') |
def get_fock(h1e, s1e, vhf, dm, cycle=(- 1), diis=None, diis_start_cycle=None, level_shift_factor=None, damp_factor=None):
if (cycle < 5):
level_shift_factor = (level_shift0 * (0.5 ** cycle))
print(('Set level shift to %g' % level_shift_factor))
else:
level_shift_factor = 0
return old_get_fock(h1e, s1e, vhf, dm, cycle, diis, diis_start_cycle, level_shift_factor, damp_factor) |
class TestMPMWithMechanics(TestCase):
def test_well_posed_negative_cracking_not_implemented(self):
options = {'particle mechanics': ('swelling and cracking', 'none')}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.MPM(options)
def test_well_posed_positive_cracking_not_implemented(self):
options = {'particle mechanics': ('none', 'swelling and cracking')}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.MPM(options)
def test_well_posed_both_cracking_not_implemented(self):
options = {'particle mechanics': 'swelling and cracking'}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.MPM(options)
def test_well_posed_both_swelling_only_not_implemented(self):
options = {'particle mechanics': 'swelling only'}
with self.assertRaises(NotImplementedError):
pybamm.lithium_ion.MPM(options) |
def gen_br2_template(num_nops_src0, num_nops_src1, reg_src0, reg_src1, inst, src0, src1, taken):
if taken:
control_flow_pattern = 42
else:
control_flow_pattern = 63
global gen_br2_template_id
id_a = 'label_{}'.format((gen_br2_template_id + 1))
id_b = 'label_{}'.format((gen_br2_template_id + 2))
id_c = 'label_{}'.format((gen_br2_template_id + 3))
gen_br2_template_id += 3
return "\n\n # x3 will track the control flow pattern\n addi x3, x0, 0\n\n # Move src0 value into register\n csrr {reg_src0}, mngr2proc < {src0}\n {nops_src0}\n\n # Move src1 value into register\n csrr {reg_src1}, mngr2proc < {src1}\n {nops_src1}\n\n {inst} {reg_src0}, {reg_src1}, {id_a} # br -.\n addi x3, x3, 0b000001 # |\n # |\n{id_b}: # <---+-.\n addi x3, x3, 0b000010 # | |\n # | |\n {inst} {reg_src0}, {reg_src1}, {id_c} # br -+-+-.\n addi x3, x3, 0b000100 # | | |\n # | | |\n{id_a}: # <---' | |\n addi x3, x3, 0b001000 # | |\n # | |\n {inst} {reg_src0}, {reg_src1}, {id_b} # br ---' |\n addi x3, x3, 0b010000 # |\n # |\n{id_c}: # <-------'\n addi x3, x3, 0b100000 #\n\n # Check the control flow pattern\n csrw proc2mngr, x3 > {control_flow_pattern}\n\n ".format(nops_src0=gen_nops(num_nops_src0), nops_src1=gen_nops(num_nops_src1), **locals()) |
class TestPywrRandomGenerator():
def test_current_model(self, two_reservoir_problem):
generator = PywrRandomGenerator(wrapper=two_reservoir_problem)
algorithm = NSGAII(two_reservoir_problem.problem, population_size=10, generator=generator)
algorithm.initialize()
solution = algorithm.population[0]
np.testing.assert_allclose(solution.variables, np.zeros(12))
.parametrize('use_current', [True, False])
def test_other_solutions(self, two_reservoir_problem, use_current):
solutions = [{'control_curve': {'doubles': ([1] * 12)}}, {'control_curve': {'doubles': ([2] * 12)}}]
generator = PywrRandomGenerator(wrapper=two_reservoir_problem, solutions=solutions, use_current=use_current)
algorithm = NSGAII(two_reservoir_problem.problem, population_size=10, generator=generator)
algorithm.initialize()
if use_current:
np.testing.assert_allclose(algorithm.population[0].variables, np.zeros(12))
np.testing.assert_allclose(algorithm.population[1].variables, np.ones(12))
np.testing.assert_allclose(algorithm.population[2].variables, (np.ones(12) * 2))
else:
np.testing.assert_allclose(algorithm.population[0].variables, np.ones(12))
np.testing.assert_allclose(algorithm.population[1].variables, (np.ones(12) * 2)) |
class Tget_gtk_bookmarks(TestCase):
def test_main(self):
paths = get_gtk_bookmarks()
assert all((isinstance(p, fsnative) for p in paths))
def test_parse(self):
if is_windows():
return
data = b'file:///foo/bar\nfile:///home/user\nfile:///home/user/Downloads Downloads\n'
paths = parse_gtk_bookmarks(data)
assert all((isinstance(p, fsnative) for p in paths)) |
def export(preprocessor: Union[('PreTrainedTokenizer', 'FeatureExtractionMixin', 'ProcessorMixin')], model: Union[('PreTrainedModel', 'TFPreTrainedModel')], config: OnnxConfig, opset: int, output: Path, tokenizer: 'PreTrainedTokenizer'=None, device: str='cpu') -> Tuple[(List[str], List[str])]:
if (not (is_torch_available() or is_tf_available())):
raise ImportError('Cannot convert because neither PyTorch nor TensorFlow are not installed. Please install torch or tensorflow first.')
if (is_tf_available() and isinstance(model, TFPreTrainedModel) and (device == 'cuda')):
raise RuntimeError('`tf2onnx` does not support export on CUDA device.')
if (isinstance(preprocessor, PreTrainedTokenizerBase) and (tokenizer is not None)):
raise ValueError('You cannot provide both a tokenizer and a preprocessor to export the model.')
if (tokenizer is not None):
warnings.warn('The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use `preprocessor` instead.', FutureWarning)
logger.info('Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.')
preprocessor = tokenizer
if is_torch_available():
from ..utils import torch_version
if (not config.is_torch_support_available):
logger.warning(f'Unsupported PyTorch version for this model. Minimum required is {config.torch_onnx_minimum_version}, got: {torch_version}')
if (is_torch_available() and issubclass(type(model), PreTrainedModel)):
return export_pytorch(preprocessor, model, config, opset, output, tokenizer=tokenizer, device=device)
elif (is_tf_available() and issubclass(type(model), TFPreTrainedModel)):
return export_tensorflow(preprocessor, model, config, opset, output, tokenizer=tokenizer) |
class MaskFormerSwinModelTester():
def __init__(self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-05, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8, out_features=['stage1', 'stage2', 'stage3']):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.patch_norm = patch_norm
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.is_training = is_training
self.scope = scope
self.use_labels = use_labels
self.type_sequence_label_size = type_sequence_label_size
self.encoder_stride = encoder_stride
self.out_features = out_features
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return (config, pixel_values, labels)
def get_config(self):
return MaskFormerSwinConfig(image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features)
def create_and_check_model(self, config, pixel_values, labels):
model = MaskFormerSwinModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
expected_seq_len = (((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)))
expected_dim = int((config.embed_dim * (2 ** (len(config.depths) - 1))))
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim))
def create_and_check_backbone(self, config, pixel_values, labels):
model = MaskFormerSwinBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [13, 16, 16, 16])
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, [16, 32, 64])
with self.parent.assertRaises(ValueError):
config.out_features = ['stem']
model = MaskFormerSwinBackbone(config=config)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, pixel_values, labels) = config_and_inputs
inputs_dict = {'pixel_values': pixel_values}
return (config, inputs_dict) |
class MalGAN():
def __init__(self):
self.apifeature_dims = 74
self.z_dims = 10
self.hide_layers = 256
self.generator_layers = [(self.apifeature_dims + self.z_dims), self.hide_layers, self.apifeature_dims]
self.substitute_detector_layers = [self.apifeature_dims, self.hide_layers, 1]
self.blackbox = 'RF'
optimizer = Adam(lr=0.001)
self.blackbox_detector = self.build_blackbox_detector()
self.substitute_detector = self.build_substitute_detector()
self.substitute_detector.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
self.generator = self.build_generator()
example = Input(shape=(self.apifeature_dims,))
noise = Input(shape=(self.z_dims,))
input = [example, noise]
malware_examples = self.generator(input)
self.substitute_detector.trainable = False
validity = self.substitute_detector(malware_examples)
self.combined = Model(input, validity)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def build_blackbox_detector(self):
if (self.blackbox is 'RF'):
blackbox_detector = RandomForestClassifier(n_estimators=50, max_depth=5, random_state=1)
return blackbox_detector
def build_generator(self):
example = Input(shape=(self.apifeature_dims,))
noise = Input(shape=(self.z_dims,))
x = Concatenate(axis=1)([example, noise])
for dim in self.generator_layers[1:]:
x = Dense(dim)(x)
x = Activation(activation='sigmoid')(x)
x = Maximum()([example, x])
generator = Model([example, noise], x, name='generator')
generator.summary()
return generator
def build_substitute_detector(self):
input = Input(shape=(self.substitute_detector_layers[0],))
x = input
for dim in self.substitute_detector_layers[1:]:
x = Dense(dim)(x)
x = Activation(activation='sigmoid')(x)
substitute_detector = Model(input, x, name='substitute_detector')
substitute_detector.summary()
return substitute_detector
def load_data(self, filename):
data = load(filename)
(xmal, ymal, xben, yben) = (data['xmal'], data['ymal'], data['xben'], data['yben'])
return ((xmal, ymal), (xben, yben))
def train(self, epochs, batch_size=32):
((xmal, ymal), (xben, yben)) = self.load_data('mydata.npz')
(xtrain_mal, xtest_mal, ytrain_mal, ytest_mal) = train_test_split(xmal, ymal, test_size=0.2)
(xtrain_ben, xtest_ben, ytrain_ben, ytest_ben) = train_test_split(xben, yben, test_size=0.2)
self.blackbox_detector.fit(np.concatenate([xmal, xben]), np.concatenate([ymal, yben]))
ytrain_ben_blackbox = self.blackbox_detector.predict(xtrain_ben)
Original_Train_TPR = self.blackbox_detector.score(xtrain_mal, ytrain_mal)
Original_Test_TPR = self.blackbox_detector.score(xtest_mal, ytest_mal)
(Train_TPR, Test_TPR) = ([Original_Train_TPR], [Original_Test_TPR])
best_TPR = 1.0
for epoch in range(epochs):
for step in range((xtrain_mal.shape[0] // batch_size)):
idx = np.random.randint(0, xtrain_mal.shape[0], batch_size)
xmal_batch = xtrain_mal[idx]
noise = np.random.uniform(0, 1, (batch_size, self.z_dims))
idx = np.random.randint(0, xmal_batch.shape[0], batch_size)
xben_batch = xtrain_ben[idx]
yben_batch = ytrain_ben_blackbox[idx]
gen_examples = self.generator.predict([xmal_batch, noise])
ymal_batch = self.blackbox_detector.predict((np.ones(gen_examples.shape) * (gen_examples > 0.5)))
d_loss_real = self.substitute_detector.train_on_batch(gen_examples, ymal_batch)
d_loss_fake = self.substitute_detector.train_on_batch(xben_batch, yben_batch)
d_loss = (0.5 * np.add(d_loss_real, d_loss_fake))
idx = np.random.randint(0, xtrain_mal.shape[0], batch_size)
xmal_batch = xtrain_mal[idx]
noise = np.random.uniform(0, 1, (batch_size, self.z_dims))
g_loss = self.combined.train_on_batch([xmal_batch, noise], np.zeros((batch_size, 1)))
noise = np.random.uniform(0, 1, (xtrain_mal.shape[0], self.z_dims))
gen_examples = self.generator.predict([xtrain_mal, noise])
TPR = self.blackbox_detector.score((np.ones(gen_examples.shape) * (gen_examples > 0.5)), ytrain_mal)
Train_TPR.append(TPR)
noise = np.random.uniform(0, 1, (xtest_mal.shape[0], self.z_dims))
gen_examples = self.generator.predict([xtest_mal, noise])
TPR = self.blackbox_detector.score((np.ones(gen_examples.shape) * (gen_examples > 0.5)), ytest_mal)
Test_TPR.append(TPR)
if (TPR < best_TPR):
self.combined.save_weights('saves/malgan.h5')
best_TPR = TPR
print(('%d [D loss: %f, acc.: %.2f%%] [G loss: %f]' % (epoch, d_loss[0], (100 * d_loss[1]), g_loss)))
print('Original_Train_TPR: {0}, Adver_Train_TPR: {1}'.format(Original_Train_TPR, Train_TPR[(- 1)]))
print('Original_Test_TPR: {0}, Adver_Test_TPR: {1}'.format(Original_Test_TPR, Test_TPR[(- 1)]))
plt.figure()
plt.plot(range(len(Train_TPR)), Train_TPR, c='r', label='Training Set', linewidth=2)
plt.plot(range(len(Test_TPR)), Test_TPR, c='g', linestyle='--', label='Validation Set', linewidth=2)
plt.xlabel('Epoch')
plt.ylabel('TPR')
plt.legend()
plt.savefig('saves/Epoch_TPR.png')
plt.show() |
class BuildSourceSet():
def __init__(self, sources: list[BuildSource]) -> None:
self.source_text_present = False
self.source_modules: dict[(str, str)] = {}
self.source_paths: set[str] = set()
for source in sources:
if (source.text is not None):
self.source_text_present = True
if source.path:
self.source_paths.add(source.path)
if source.module:
self.source_modules[source.module] = (source.path or '')
def is_source(self, file: MypyFile) -> bool:
return ((file.path and (file.path in self.source_paths)) or (file._fullname in self.source_modules) or self.source_text_present) |
class Message(Message):
_e_label = property((lambda x: getattr(x, 'details').get('severity', 'MESSAGE')))
_e_factors = ('creator',)
def _e_metas(self, get0=itemgetter(0)):
(yield (None, self.message))
if (self.code and (self.code != '00000')):
(yield ('CODE', self.code))
locstr = self.location_string
if locstr:
(yield ('LOCATION', ((locstr + ' from ') + self.source)))
else:
(yield ('LOCATION', self.source))
for (k, v) in sorted(self.details.items(), key=get0):
if (k not in self.standard_detail_coverage):
(yield (k.upper(), str(v)))
source = 'SERVER'
code = '00000'
message = None
details = None
severities = ('DEBUG', 'INFO', 'NOTICE', 'WARNING', 'ERROR', 'FATAL', 'PANIC')
sources = ('SERVER', 'CLIENT')
def isconsistent(self, other):
if (not isinstance(other, self.__class__)):
return False
return ((self.code == other.code) and (self.message == other.message) and (self.details == other.details) and (self.source == other.source))
def __init__(self, message, code=None, details={}, source=None, creator=None):
self.message = message
self.details = details
self.creator = creator
if ((code is not None) and (self.code != code)):
self.code = code
if ((source is not None) and (self.source != source)):
self.source = source
def __repr__(self):
return '{mod}.{typname}({message!r}{code}{details}{source}{creator})'.format(mod=self.__module__, typname=self.__class__.__name__, message=self.message, code=('' if (self.code == type(self).code) else (', code = ' + repr(self.code))), details=('' if (not self.details) else (', details = ' + repr(self.details))), source=('' if (self.source is None) else (', source = ' + repr(self.source))), creator=('' if (self.creator is None) else (', creator = ' + repr(self.creator))))
def location_string(self):
details = self.details
loc = [details.get(k, '?') for k in ('file', 'line', 'function')]
return ('' if (loc == ['?', '?', '?']) else 'File {0!r}, line {1!s}, in {2!s}'.format(*loc))
standard_detail_coverage = frozenset(['message', 'severity', 'file', 'function', 'line'])
def emit(self, starting_point=None):
if (starting_point is not None):
f = starting_point
else:
f = self.creator
while (f is not None):
if (getattr(f, 'msghook', None) is not None):
if f.msghook(self):
return f
f = prime_factor(f)
if f:
f = f[1]
pg_sys.msghook(self) |
class QuestionViewSet(ModelViewSet):
permission_classes = ((HasModelPermission | HasObjectPermission),)
serializer_class = QuestionSerializer
filter_backends = (SearchFilter, DjangoFilterBackend)
search_fields = ('uri', 'text')
filterset_fields = ('attribute', 'uri', 'uri_prefix', 'uri_path', 'is_collection', 'value_type', 'widget_type', 'unit', 'comment')
def get_queryset(self):
queryset = Question.objects.all()
if (self.action in ['index']):
return queryset
elif (self.action in ('nested', 'export', 'detail_export')):
return queryset.prefetch_elements().select_related('attribute')
else:
return queryset.prefetch_related('conditions', 'optionsets', 'pages', 'questionsets', 'editors').select_related('attribute')
(detail=False)
def index(self, request):
queryset = self.filter_queryset(self.get_queryset())
serializer = QuestionIndexSerializer(queryset, many=True)
return Response(serializer.data)
(detail=False, url_path='export(/(?P<export_format>[a-z]+))?')
def export(self, request, export_format='xml'):
queryset = self.filter_queryset(self.get_queryset())
if (export_format == 'xml'):
serializer = QuestionExportSerializer(queryset, many=True)
xml = QuestionRenderer().render(serializer.data, context=self.get_export_renderer_context(request))
return XMLResponse(xml, name='questions')
else:
return render_to_format(self.request, export_format, 'questions', 'questions/export/questions.html', {'questions': queryset})
(detail=True, url_path='export(/(?P<export_format>[a-z]+))?')
def detail_export(self, request, pk=None, export_format='xml'):
if (export_format == 'xml'):
serializer = QuestionExportSerializer(self.get_object())
xml = QuestionRenderer().render([serializer.data], context=self.get_export_renderer_context(request))
return XMLResponse(xml, name=self.get_object().uri_path)
else:
return render_to_format(self.request, export_format, self.get_object().uri_path, 'questions/export/questions.html', {'questions': [self.get_object()]})
def get_export_renderer_context(self, request):
full = is_truthy(request.GET.get('full'))
return {'attributes': (full or is_truthy(request.GET.get('attributes'))), 'optionsets': (full or is_truthy(request.GET.get('optionsets'))), 'options': (full or is_truthy(request.GET.get('options'))), 'conditions': (full or is_truthy(request.GET.get('conditions')))} |
def parse_basic_str_escape(src: str, pos: Pos, *, multiline: bool=False) -> Tuple[(Pos, str)]:
escape_id = src[pos:(pos + 2)]
pos += 2
if (multiline and (escape_id in {'\\ ', '\\\t', '\\\n'})):
if (escape_id != '\\\n'):
pos = skip_chars(src, pos, TOML_WS)
try:
char = src[pos]
except IndexError:
return (pos, '')
if (char != '\n'):
raise suffixed_err(src, pos, 'Unescaped "\\" in a string')
pos += 1
pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
return (pos, '')
if (escape_id == '\\u'):
return parse_hex_char(src, pos, 4)
if (escape_id == '\\U'):
return parse_hex_char(src, pos, 8)
try:
return (pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id])
except KeyError:
if (len(escape_id) != 2):
raise suffixed_err(src, pos, 'Unterminated string') from None
raise suffixed_err(src, pos, 'Unescaped "\\" in a string') from None |
class NodeNG():
is_statement: ClassVar[bool] = False
optional_assign: ClassVar[bool] = False
is_function: ClassVar[bool] = False
is_lambda: ClassVar[bool] = False
_astroid_fields: ClassVar[tuple[(str, ...)]] = ()
_other_fields: ClassVar[tuple[(str, ...)]] = ()
_other_other_fields: ClassVar[tuple[(str, ...)]] = ()
_explicit_inference: (InferFn[Self] | None) = None
def __init__(self, lineno: (int | None), col_offset: (int | None), parent: (NodeNG | None), *, end_lineno: (int | None), end_col_offset: (int | None)) -> None:
self.lineno = lineno
self.col_offset = col_offset
self.parent = parent
self.end_lineno = end_lineno
self.end_col_offset = end_col_offset
self.position: (Position | None) = None
def infer(self, context: (InferenceContext | None)=None, **kwargs: Any) -> Generator[(InferenceResult, None, None)]:
if (context is None):
context = InferenceContext()
else:
context = context.extra_context.get(self, context)
if (self._explicit_inference is not None):
try:
for result in self._explicit_inference(self, context, **kwargs):
context.nodes_inferred += 1
(yield result)
return
except UseInferenceDefault:
pass
key = (self, context.lookupname, context.callcontext, context.boundnode)
if (key in context.inferred):
(yield from context.inferred[key])
return
results = []
limit = AstroidManager().max_inferable_values
for (i, result) in enumerate(self._infer(context=context, **kwargs)):
if ((i >= limit) or (context.nodes_inferred > context.max_inferred)):
results.append(util.Uninferable)
(yield util.Uninferable)
break
results.append(result)
(yield result)
context.nodes_inferred += 1
context.inferred[key] = tuple(results)
return
def repr_name(self) -> str:
if all(((name not in self._astroid_fields) for name in ('name', 'attrname'))):
return (getattr(self, 'name', '') or getattr(self, 'attrname', ''))
return ''
def __str__(self) -> str:
rname = self.repr_name()
cname = type(self).__name__
if rname:
string = '%(cname)s.%(rname)s(%(fields)s)'
alignment = ((len(cname) + len(rname)) + 2)
else:
string = '%(cname)s(%(fields)s)'
alignment = (len(cname) + 1)
result = []
for field in (self._other_fields + self._astroid_fields):
value = getattr(self, field, 'Unknown')
width = ((80 - len(field)) - alignment)
lines = pprint.pformat(value, indent=2, width=width).splitlines(True)
inner = [lines[0]]
for line in lines[1:]:
inner.append(((' ' * alignment) + line))
result.append(f"{field}={''.join(inner)}")
return (string % {'cname': cname, 'rname': rname, 'fields': (',\n' + (' ' * alignment)).join(result)})
def __repr__(self) -> str:
rname = self.repr_name()
try:
lineno = self.fromlineno
except AttributeError:
lineno = 0
if rname:
string = '<%(cname)s.%(rname)s l.%(lineno)s at 0x%(id)x>'
else:
string = '<%(cname)s l.%(lineno)s at 0x%(id)x>'
return (string % {'cname': type(self).__name__, 'rname': rname, 'lineno': lineno, 'id': id(self)})
def accept(self, visitor: AsStringVisitor) -> str:
func = getattr(visitor, ('visit_' + self.__class__.__name__.lower()))
return func(self)
def get_children(self) -> Iterator[NodeNG]:
for field in self._astroid_fields:
attr = getattr(self, field)
if (attr is None):
continue
if isinstance(attr, (list, tuple)):
(yield from attr)
else:
(yield attr)
(yield from ())
def last_child(self) -> (NodeNG | None):
for field in self._astroid_fields[::(- 1)]:
attr = getattr(self, field)
if (not attr):
continue
if isinstance(attr, (list, tuple)):
return attr[(- 1)]
return attr
return None
def node_ancestors(self) -> Iterator[NodeNG]:
parent = self.parent
while (parent is not None):
(yield parent)
parent = parent.parent
def parent_of(self, node) -> bool:
return any(((self is parent) for parent in node.node_ancestors()))
def statement(self, *, future: Literal[(None, True)]=None) -> nodes.Statement:
if (future is not None):
warnings.warn('The future arg will be removed in astroid 4.0.', DeprecationWarning, stacklevel=2)
if self.is_statement:
return cast('nodes.Statement', self)
if (not self.parent):
raise StatementMissing(target=self)
return self.parent.statement()
def frame(self, *, future: Literal[(None, True)]=None) -> (((nodes.FunctionDef | nodes.Module) | nodes.ClassDef) | nodes.Lambda):
if (future is not None):
warnings.warn('The future arg will be removed in astroid 4.0.', DeprecationWarning, stacklevel=2)
if (self.parent is None):
raise ParentMissingError(target=self)
return self.parent.frame(future=future)
def scope(self) -> nodes.LocalsDictNodeNG:
if (not self.parent):
raise ParentMissingError(target=self)
return self.parent.scope()
def root(self) -> nodes.Module:
if (not (parent := self.parent)):
return self
while parent.parent:
parent = parent.parent
return parent
def child_sequence(self, child):
for field in self._astroid_fields:
node_or_sequence = getattr(self, field)
if (node_or_sequence is child):
return [node_or_sequence]
if (isinstance(node_or_sequence, (tuple, list)) and (child in node_or_sequence)):
return node_or_sequence
msg = "Could not find %s in %s's children"
raise AstroidError((msg % (repr(child), repr(self))))
def locate_child(self, child):
for field in self._astroid_fields:
node_or_sequence = getattr(self, field)
if (child is node_or_sequence):
return (field, child)
if (isinstance(node_or_sequence, (tuple, list)) and (child in node_or_sequence)):
return (field, node_or_sequence)
msg = "Could not find %s in %s's children"
raise AstroidError((msg % (repr(child), repr(self))))
def next_sibling(self):
return self.parent.next_sibling()
def previous_sibling(self):
return self.parent.previous_sibling()
_property
def fromlineno(self) -> int:
if (self.lineno is None):
return self._fixed_source_line()
return self.lineno
_property
def tolineno(self) -> int:
if (self.end_lineno is not None):
return self.end_lineno
if (not self._astroid_fields):
last_child = None
else:
last_child = self.last_child()
if (last_child is None):
return self.fromlineno
return last_child.tolineno
def _fixed_source_line(self) -> int:
line = self.lineno
_node = self
try:
while (line is None):
_node = next(_node.get_children())
line = _node.lineno
except StopIteration:
parent = self.parent
while (parent and (line is None)):
line = parent.lineno
parent = parent.parent
return (line or 0)
def block_range(self, lineno: int) -> tuple[(int, int)]:
return (lineno, self.tolineno)
def set_local(self, name: str, stmt: NodeNG) -> None:
assert self.parent
self.parent.set_local(name, stmt)
def nodes_of_class(self, klass: type[_NodesT], skip_klass: SkipKlassT=...) -> Iterator[_NodesT]:
...
def nodes_of_class(self, klass: tuple[(type[_NodesT], type[_NodesT2])], skip_klass: SkipKlassT=...) -> (Iterator[_NodesT] | Iterator[_NodesT2]):
...
def nodes_of_class(self, klass: tuple[(type[_NodesT], type[_NodesT2], type[_NodesT3])], skip_klass: SkipKlassT=...) -> ((Iterator[_NodesT] | Iterator[_NodesT2]) | Iterator[_NodesT3]):
...
def nodes_of_class(self, klass: tuple[(type[_NodesT], ...)], skip_klass: SkipKlassT=...) -> Iterator[_NodesT]:
...
def nodes_of_class(self, klass: (((type[_NodesT] | tuple[(type[_NodesT], type[_NodesT2])]) | tuple[(type[_NodesT], type[_NodesT2], type[_NodesT3])]) | tuple[(type[_NodesT], ...)]), skip_klass: SkipKlassT=None) -> ((Iterator[_NodesT] | Iterator[_NodesT2]) | Iterator[_NodesT3]):
if isinstance(self, klass):
(yield self)
if (skip_klass is None):
for child_node in self.get_children():
(yield from child_node.nodes_of_class(klass, skip_klass))
return
for child_node in self.get_children():
if isinstance(child_node, skip_klass):
continue
(yield from child_node.nodes_of_class(klass, skip_klass))
_property
def _assign_nodes_in_scope(self) -> list[nodes.Assign]:
return []
def _get_name_nodes(self):
for child_node in self.get_children():
(yield from child_node._get_name_nodes())
def _get_return_nodes_skip_functions(self):
(yield from ())
def _get_yield_nodes_skip_functions(self):
(yield from ())
def _get_yield_nodes_skip_lambdas(self):
(yield from ())
def _infer_name(self, frame, name):
pass
def _infer(self, context: (InferenceContext | None)=None, **kwargs: Any) -> Generator[(InferenceResult, None, (InferenceErrorInfo | None))]:
raise InferenceError('No inference function for {node!r}.', node=self, context=context)
def inferred(self):
return list(self.infer())
def instantiate_class(self):
return self
def has_base(self, node) -> bool:
return False
def callable(self) -> bool:
return False
def eq(self, value) -> bool:
return False
def as_string(self) -> str:
return AsStringVisitor()(self)
def repr_tree(self, ids=False, include_linenos=False, ast_state=False, indent=' ', max_depth=0, max_width=80) -> str:
_singledispatch
def _repr_tree(node, result, done, cur_indent='', depth=1):
lines = pprint.pformat(node, width=max((max_width - len(cur_indent)), 1)).splitlines(True)
result.append(lines[0])
result.extend([(cur_indent + line) for line in lines[1:]])
return (len(lines) != 1)
_repr_tree.register(tuple)
_repr_tree.register(list)
def _repr_seq(node, result, done, cur_indent='', depth=1):
cur_indent += indent
result.append('[')
if (not node):
broken = False
elif (len(node) == 1):
broken = _repr_tree(node[0], result, done, cur_indent, depth)
elif (len(node) == 2):
broken = _repr_tree(node[0], result, done, cur_indent, depth)
if (not broken):
result.append(', ')
else:
result.append(',\n')
result.append(cur_indent)
broken = (_repr_tree(node[1], result, done, cur_indent, depth) or broken)
else:
result.append('\n')
result.append(cur_indent)
for child in node[:(- 1)]:
_repr_tree(child, result, done, cur_indent, depth)
result.append(',\n')
result.append(cur_indent)
_repr_tree(node[(- 1)], result, done, cur_indent, depth)
broken = True
result.append(']')
return broken
_repr_tree.register(NodeNG)
def _repr_node(node, result, done, cur_indent='', depth=1):
if (node in done):
result.append((indent + f'<Recursion on {type(node).__name__} with id={id(node)}'))
return False
done.add(node)
if (max_depth and (depth > max_depth)):
result.append('...')
return False
depth += 1
cur_indent += indent
if ids:
result.append(f'''{type(node).__name__}<0x{id(node):x}>(
''')
else:
result.append(f'{type(node).__name__}(')
fields = []
if include_linenos:
fields.extend(('lineno', 'col_offset'))
fields.extend(node._other_fields)
fields.extend(node._astroid_fields)
if ast_state:
fields.extend(node._other_other_fields)
if (not fields):
broken = False
elif (len(fields) == 1):
result.append(f'{fields[0]}=')
broken = _repr_tree(getattr(node, fields[0]), result, done, cur_indent, depth)
else:
result.append('\n')
result.append(cur_indent)
for field in fields[:(- 1)]:
if (field == 'doc'):
continue
result.append(f'{field}=')
_repr_tree(getattr(node, field), result, done, cur_indent, depth)
result.append(',\n')
result.append(cur_indent)
result.append(f'{fields[(- 1)]}=')
_repr_tree(getattr(node, fields[(- 1)]), result, done, cur_indent, depth)
broken = True
result.append(')')
return broken
result: list[str] = []
_repr_tree(self, result, set())
return ''.join(result)
def bool_value(self, context: (InferenceContext | None)=None):
return util.Uninferable
def op_precedence(self):
return OP_PRECEDENCE.get(self.__class__.__name__, len(OP_PRECEDENCE))
def op_left_associative(self) -> bool:
return True |
.parametrize('env_name', ML1.ENV_NAMES)
def test_all_ml1(env_name):
ml1 = ML1(env_name)
train_env_instances = {env_name: env_cls() for (env_name, env_cls) in ml1.train_classes.items()}
train_env_rand_vecs = check_tasks_unique(ml1.train_tasks, ml1._train_classes.keys())
for task in ml1.train_tasks:
env = train_env_instances[task.env_name]
env.set_task(task)
env.reset()
assert (env.random_init == True)
old_obj_init = env.obj_init_pos
old_target_pos = env._target_pos
step_env(env, max_path_length=STEPS, render=False)
assert np.all(np.allclose(old_obj_init, env.obj_init_pos))
assert np.all(np.allclose(old_target_pos, env._target_pos))
for env in train_env_instances.values():
env.close()
del train_env_instances
test_env_instances = {env_name: env_cls() for (env_name, env_cls) in ml1.test_classes.items()}
test_env_rand_vecs = check_tasks_unique(ml1.test_tasks, ml1._test_classes.keys())
for task in ml1.test_tasks:
env = test_env_instances[task.env_name]
env.set_task(task)
env.reset()
assert (env.random_init == True)
old_obj_init = env.obj_init_pos
old_target_pos = env._target_pos
step_env(env, max_path_length=STEPS, render=False)
assert np.all(np.allclose(old_obj_init, env.obj_init_pos))
assert np.all(np.allclose(old_target_pos, env._target_pos))
for env in test_env_instances.values():
env.close()
train_test_rand_vecs = set()
for rand_vecs in train_env_rand_vecs.values():
for rand_vec in rand_vecs:
train_test_rand_vecs.add(tuple(rand_vec))
for rand_vecs in test_env_rand_vecs.values():
for rand_vec in rand_vecs:
train_test_rand_vecs.add(tuple(rand_vec))
assert (len(train_test_rand_vecs) == ((len(ml1.test_classes.keys()) + len(ml1.train_classes.keys())) * metaworld._N_GOALS))
del test_env_instances |
class CTRLModelTester():
def __init__(self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.pad_token_id = (self.vocab_size - 1)
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels)
def get_config(self):
return CTRLConfig(vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id)
def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLModel(config=config)
model.to(torch_device)
model.eval()
model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLLMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return (config, inputs_dict)
def create_and_check_ctrl_for_sequence_classification(self, config, input_ids, head_mask, token_type_ids, *args):
config.num_labels = self.num_labels
model = CTRLForSequenceClassification(config)
model.to(torch_device)
model.eval()
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) |
def _clean_header(header):
if isinstance(header, str):
header = {'description': header}
typedef = header.get('type', 'string')
if (isinstance(typedef, Hashable) and (typedef in PY_TYPES)):
header['type'] = PY_TYPES[typedef]
elif (isinstance(typedef, (list, tuple)) and (len(typedef) == 1) and (typedef[0] in PY_TYPES)):
header['type'] = 'array'
header['items'] = {'type': PY_TYPES[typedef[0]]}
elif hasattr(typedef, '__schema__'):
header.update(typedef.__schema__)
else:
header['type'] = typedef
return not_none(header) |
class CompositeOptimizerConfig(FairseqDataclass):
groups: Dict[(str, OptimizerAndSchedulerConfig)] = field(default_factory=(lambda : {}), metadata={'help': 'optimizer name -> optimizer OptimizerAndSchedulerConfig. Configures a different optimizer and (optionally) lr scheduler for each parameter group'}) |
def test_no_missing_resource_types():
request_interceptor = interceptor.RequestInterceptor()
qb_keys = set(request_interceptor._resource_types.keys())
qt_keys = set(testutils.enum_members(QWebEngineUrlRequestInfo, QWebEngineUrlRequestInfo.ResourceType).values())
assert (qt_keys == qb_keys) |
def get_ytplayer_js(html: str) -> Any:
js_url_patterns = ['(/s/player/[\\w\\d]+/[\\w\\d_/.]+/base\\.js)']
for pattern in js_url_patterns:
regex = re.compile(pattern)
function_match = regex.search(html)
if function_match:
logger.debug('finished regex search, matched: %s', pattern)
yt_player_js = function_match.group(1)
return yt_player_js
raise RegexMatchError(caller='get_ytplayer_js', pattern='js_url_patterns') |
class FullyConnectedContextMerge(SequenceMapperWithContext):
def __init__(self, output_size, init='glorot_uniform', activation='tanh', use_dots=False, keep_probs=1, context_keep_probs=1):
self.output_size = output_size
self.activation = activation
self.init = init
self.context_keep_probs = context_keep_probs
self.keep_probs = keep_probs
self.use_dots = use_dots
def apply(self, is_train, x, c, mask=None, context_mask=None):
x = dropout(x, self.keep_probs, is_train)
c = dropout(c, self.context_keep_probs, is_train)
init = get_keras_initialization(self.init)
x_w = tf.get_variable('merge_x_weights', (x.shape.as_list()[(- 1)], self.output_size), initializer=init)
c_w = tf.get_variable('merge_context_weights', (c.shape.as_list()[(- 1)], self.output_size), initializer=init)
output = (tf.tensordot(x, x_w, axes=[[2], [0]]) + tf.expand_dims(tf.matmul(c, c_w), 1))
if self.use_dots:
dots = tf.einsum('aij,aj->aij', x, c)
dot_w = tf.get_variable('dot_weights', (c.shape.as_list()[(- 1)], self.output_size), initializer=init)
output += tf.tensordot(dots, dot_w, axes=[[2], [0]])
bais = tf.get_variable('merge_bias', (1, 1, self.output_size))
output += bais
return get_keras_activation(self.activation)(output) |
class TransformerPredictor(nn.Module):
def __init__(self, config):
super(TransformerPredictor, self).__init__()
self.transformer_decoder = TransformerDecoder(config)
self.area_decoder = nn.Sequential(nn.Linear(config['encode_dim'], config['area_num'], bias=False))
self.shot_decoder = nn.Sequential(nn.Linear(config['encode_dim'], config['shot_num'], bias=False))
self.player_embedding = PlayerEmbedding(config['player_num'], config['player_dim'])
def forward(self, input_shot, input_x, input_y, input_player, encoder_output, target_player, return_attns=False):
if return_attns:
(x, decoder_self_attention_list, decoder_encoder_self_attention_list) = self.transformer_decoder(input_shot, input_x, input_y, input_player, encoder_output, return_attns=return_attns)
else:
x = self.transformer_decoder(input_shot, input_x, input_y, input_player, encoder_output, return_attns=return_attns)
embedded_player = self.player_embedding(target_player)
x = (x + embedded_player)
area_logits = self.area_decoder(x)
shot_logits = self.shot_decoder(x)
if return_attns:
return (area_logits, shot_logits, decoder_self_attention_list, decoder_encoder_self_attention_list)
else:
return (area_logits, shot_logits) |
_node(_all_prototype_parents, _prototype_load_select)
def node_prototype_load(caller, **kwargs):
text = '\n Select a prototype to load. This will replace any prototype currently being edited!\n '
_set_actioninfo(caller, _format_list_actions('examine', 'delete'))
helptext = '\n Loading a prototype will load it and return you to the main index. It can be a good idea\n to examine the prototype before loading it.\n '
text = (text, helptext)
options = _wizard_options('prototype_load', 'index', None)
options.append({'key': '_default', 'goto': _prototype_load_actions})
return (text, options) |
def average_models(model_files, fp32=False):
vocab = None
opt = None
avg_model = None
avg_generator = None
for (i, model_file) in enumerate(model_files):
m = torch.load(model_file, map_location='cpu')
model_weights = m['model']
generator_weights = m['generator']
if fp32:
for (k, v) in model_weights.items():
model_weights[k] = v.float()
for (k, v) in generator_weights.items():
generator_weights[k] = v.float()
if (i == 0):
(vocab, opt) = (m['vocab'], m['opt'])
avg_model = model_weights
avg_generator = generator_weights
else:
for (k, v) in avg_model.items():
avg_model[k].mul_(i).add_(model_weights[k]).div_((i + 1))
for (k, v) in avg_generator.items():
avg_generator[k].mul_(i).add_(generator_weights[k]).div_((i + 1))
final = {'vocab': vocab, 'opt': opt, 'optim': None, 'generator': avg_generator, 'model': avg_model}
return final |
def test_povm():
coeff = (sqrt(2) / (1 + sqrt(2)))
E_1 = (coeff * ket2dm(basis(2, 1)))
E_2 = (coeff * ket2dm(((basis(2, 0) - basis(2, 1)) / sqrt(2))))
E_3 = ((identity(2) - E_1) - E_2)
M_1 = E_1.sqrtm()
M_2 = E_2.sqrtm()
M_3 = E_3.sqrtm()
ket1 = basis(2, 0)
ket2 = ((basis(2, 0) + basis(2, 1)) / sqrt(2))
dm1 = ket2dm(ket1)
dm2 = ket2dm(ket2)
M = [M_1, M_2, M_3]
(_, probabilities) = measurement_statistics_povm(ket1, M)
np.testing.assert_allclose(probabilities, [0, 0.293, 0.707], atol=0.001)
(_, probabilities) = measurement_statistics_povm(ket2, M)
np.testing.assert_allclose(probabilities, [0.293, 0, 0.707], atol=0.001)
(_, probabilities) = measurement_statistics_povm(dm1, M)
np.testing.assert_allclose(probabilities, [0, 0.293, 0.707], atol=0.001)
(_, probabilities) = measurement_statistics_povm(dm2, M)
np.testing.assert_allclose(probabilities, [0.293, 0, 0.707], atol=0.001) |
class InsetMaps(Maps):
def __init__(self, parent, crs=4326, layer=None, xy=(45, 45), xy_crs=4326, radius=5, radius_crs=None, plot_position=(0.5, 0.5), plot_size=0.5, shape='ellipses', indicate_extent=True, indicator_line=False, boundary=True, background_color='w', **kwargs):
self._parent_m = self._proxy(parent)
self._indicators = []
if (layer is None):
layer = self._parent_m.layer
possible_shapes = ['ellipses', 'rectangles', 'geod_circles']
assert (shape in possible_shapes), f'EOmaps: the inset shape can only be one of {possible_shapes}'
if (shape == 'geod_circles'):
assert (radius_crs is None), ("EOmaps: Using 'radius_crs' is not possible if 'geod_circles' is " + 'used as shape! (the radius for `geod_circles` is always in meters!)')
if (radius_crs is None):
radius_crs = xy_crs
self._extent_kwargs = dict(ec='r', lw=1, fc='none')
self._line_kwargs = dict(c='r', lw=2)
boundary_kwargs = dict(ec='r', lw=2)
if isinstance(boundary, dict):
assert (len(set(boundary.keys()).difference({'ec', 'lw'})) == 0), "EOmaps: only 'ec' and 'lw' keys are allowed for the 'boundary' dict!"
boundary_kwargs.update(boundary)
self._extent_kwargs['ec'] = boundary['ec']
self._line_kwargs['c'] = boundary['ec']
elif isinstance(boundary, (str, tuple)):
boundary_kwargs.update({'ec': boundary})
self._extent_kwargs['ec'] = boundary
self._line_kwargs['c'] = boundary
if isinstance(indicate_extent, dict):
self._extent_kwargs.update(indicate_extent)
if isinstance(indicator_line, dict):
self._line_kwargs.update(indicator_line)
(x, y) = xy
(plot_x, plot_y) = plot_position
left = (plot_x - (plot_size / 2))
bottom = (plot_y - (plot_size / 2))
super().__init__(crs=crs, f=self._parent_m.f, ax=(left, bottom, plot_size, plot_size), layer=layer, **kwargs)
self.ax.set_zorder(99999)
(bnd, bnd_verts) = self._get_inset_boundary(x, y, xy_crs, radius, radius_crs, shape)
self.ax.set_boundary(bnd)
((x0, y0), (x1, y1)) = (bnd_verts.min(axis=0), bnd_verts.max(axis=0))
self.ax.set_extent((x0, x1, y0, y1), crs=self.ax.projection)
if (boundary is not False):
spine = self.ax.spines['geo']
spine.set_edgecolor(boundary_kwargs['ec'])
spine.set_lw(boundary_kwargs['lw'])
self._inset_props = dict(xy=xy, xy_crs=xy_crs, radius=radius, radius_crs=radius_crs, shape=shape)
if (indicate_extent is not False):
self.add_extent_indicator(self._parent_m, **self._extent_kwargs)
self._indicator_lines = []
if (indicator_line is not False):
self.add_indicator_line(**self._line_kwargs)
if (background_color is not None):
self._bg_patch = self._add_background_patch(color=background_color, layer=self.layer)
else:
self._bg_patch = None
self.BM._before_fetch_bg_actions.append(self._update_indicator)
def _get_spine_verts(self):
s = self.ax.spines['geo']
s._adjust_location()
verts = s.get_verts()
verts = self.ax.transData.inverted().transform(s.get_verts())
verts = np.column_stack(self._transf_plot_to_lonlat.transform(*verts.T))
return verts
def _update_indicator(self, *args, **kwargs):
from matplotlib.patches import Polygon
if (not hasattr(self, '_patches')):
self._patches = set()
while (len(self._patches) > 0):
patch = self._patches.pop()
self.BM.remove_bg_artist(patch, draw=False)
try:
patch.remove()
except ValueError:
pass
verts = self._get_spine_verts()
for (m, kwargs) in self._indicators:
verts_t = np.column_stack(m._transf_lonlat_to_plot.transform(*verts.T))
p = Polygon(verts_t, **kwargs)
p.set_label('__EOmaps_deactivated InsetMap indicator')
art = m.ax.add_patch(p)
self.BM.add_bg_artist(art, layer=m.layer, draw=False)
self._patches.add(art)
def _add_background_patch(self, color, layer='all'):
(art,) = self.ax.fill([0, 0, 1, 1], [0, 1, 1, 0], fc=color, ec='none', zorder=(- 9999), transform=self.ax.transAxes)
art.set_label('Inset map background patch')
self.BM.add_bg_artist(art, layer=layer)
return art
def _handle_spines(self):
spine = self.ax.spines['geo']
if (spine not in self.BM._bg_artists.get('__inset___SPINES__', [])):
self.BM.add_bg_artist(spine, layer='__inset___SPINES__')
def _get_ax_label(self):
return 'inset_map'
def plot_map(self, *args, **kwargs):
set_extent = kwargs.pop('set_extent', False)
super().plot_map(*args, **kwargs, set_extent=set_extent)
_deprecated('Use `add_extent_indicator` instead!')
def indicate_inset_extent(self, *args, **kwargs):
return self.add_extent_indicator(*args, **kwargs)
def add_extent_indicator(self, m=None, n=100, **kwargs):
if (m is None):
m = self._parent_m
defaultargs = {**self._extent_kwargs}
defaultargs.setdefault('zorder', 9999)
defaultargs.update(kwargs)
if (not any(((i in defaultargs) for i in ['fc', 'facecolor']))):
defaultargs['fc'] = 'none'
if (not any(((i in defaultargs) for i in ['ec', 'edgecolor']))):
defaultargs['ec'] = 'r'
if (not any(((i in defaultargs) for i in ['lw', 'linewidth']))):
defaultargs['lw'] = 1
self._indicators.append((m, defaultargs))
self._update_indicator()
def add_indicator_line(self, m=None, **kwargs):
if (m is None):
m = self._parent_m
defaultargs = {**self._line_kwargs}
defaultargs.setdefault('c', 'r')
defaultargs.setdefault('lw', 2)
defaultargs.setdefault('zorder', 99999)
defaultargs.update(kwargs)
l = plt.Line2D([0, 0], [1, 1], transform=self.f.transFigure, **defaultargs)
l = self._parent.ax.add_artist(l)
l.set_clip_on(False)
self.BM.add_bg_artist(l, self.layer, draw=False)
self._indicator_lines.append((l, m))
if isinstance(m, InsetMaps):
from matplotlib.transforms import TransformedPath
clip_path = TransformedPath(m.ax.patch.get_path(), m.ax.projection._as_mpl_transform(m.ax))
defaultargs['zorder'] = 99999
l2 = plt.Line2D([0, 0], [1, 1], **defaultargs, transform=m.f.transFigure)
l2.set_clip_path(clip_path)
l2.set_clip_on(True)
l2 = m.ax.add_artist(l2)
self.BM.add_bg_artist(l2, self.layer)
self._indicator_lines.append((l2, m))
self._update_indicator_lines()
self.BM._before_fetch_bg_actions.append(self._update_indicator_lines)
def _update_indicator_lines(self, *args, **kwargs):
spine_verts = self._get_spine_verts()
verts = np.column_stack(self._transf_lonlat_to_plot.transform(*spine_verts.T))
verts = (self.ax.transData + self.f.transFigure.inverted()).transform(verts)
for (l, m) in self._indicator_lines:
verts_t = np.column_stack(m._transf_lonlat_to_plot.transform(*spine_verts.T))
verts_t = (m.ax.transData + m.f.transFigure.inverted()).transform(verts_t)
p_map = verts_t.mean(axis=0)
p_inset = verts.mean(axis=0)
q = _intersect(p_map, p_inset, verts[:(- 1)], verts[1:])
if q.any():
(x0, y0) = _get_intersect(p_map, p_inset, verts[:(- 1)][q], verts[1:][q])
else:
(x0, y0) = p_inset
q = _intersect(p_map, p_inset, verts_t[:(- 1)], verts_t[1:])
if q.any():
(x1, y1) = _get_intersect(p_map, p_inset, verts_t[:(- 1)][q], verts_t[1:][q])
l.set_xdata([x0, x1])
l.set_ydata([y0, y1])
continue
def set_inset_position(self, x=None, y=None, size=None):
(x0, y1, x1, y0) = self.ax.get_position().bounds
if (size is None):
size = abs((x1 - x0))
if (x is None):
x = ((x0 + x1) / 2)
if (y is None):
y = ((y0 + y1) / 2)
self.ax.set_position(((x - (size / 2)), (y - (size / 2)), size, size))
self.redraw(('__inset_' + self.layer), '__inset___SPINES__')
def get_inset_position(self, precision=3):
bbox = self.ax.get_position()
size = round(max(bbox.width, bbox.height), precision)
x = round((bbox.x0 + (bbox.width / 2)), precision)
y = round((bbox.y0 + (bbox.height / 2)), precision)
return (x, y, size)
def _get_inset_boundary(self, x, y, xy_crs, radius, radius_crs, shape, n=100):
shp = self.set_shape._get(shape)
if (shape == 'ellipses'):
shp_pts = shp._get_ellipse_points(x=np.atleast_1d(x), y=np.atleast_1d(y), crs=xy_crs, radius=radius, radius_crs=radius_crs, n=n)
bnd_verts = np.stack(shp_pts[:2], axis=2)[0]
bnd_verts = bnd_verts[::(- 1)]
elif (shape == 'rectangles'):
shp_pts = shp._get_rectangle_verts(x=np.atleast_1d(x), y=np.atleast_1d(y), crs=xy_crs, radius=radius, radius_crs=radius_crs, n=n)
bnd_verts = shp_pts[0][0]
elif (shape == 'geod_circles'):
shp_pts = shp._get_geod_circle_points(x=np.atleast_1d(x), y=np.atleast_1d(y), crs=xy_crs, radius=radius, n=n)
bnd_verts = np.stack(shp_pts[:2], axis=2).squeeze()
bnd_verts = bnd_verts[::(- 1)]
boundary = Path(bnd_verts)
return (boundary, bnd_verts) |
def weight_quantization(b, grids, power=True):
def uniform_quant(x, b):
xdiv = x.mul(((2 ** b) - 1))
xhard = xdiv.round().div(((2 ** b) - 1))
return xhard
def power_quant(x, value_s):
shape = x.shape
xhard = x.view((- 1))
value_s = value_s.type_as(x)
idxs = (xhard.unsqueeze(0) - value_s.unsqueeze(1)).abs().min(dim=0)[1]
xhard = value_s[idxs].view(shape)
return xhard
class _pq(torch.autograd.Function):
def forward(ctx, input, alpha):
input.div_(alpha)
input_c = input.clamp(min=(- 1), max=1)
sign = input_c.sign()
input_abs = input_c.abs()
if power:
input_q = power_quant(input_abs, grids).mul(sign)
else:
input_q = uniform_quant(input_abs, b).mul(sign)
ctx.save_for_backward(input, input_q)
input_q = input_q.mul(alpha)
return input_q
def backward(ctx, grad_output):
grad_input = grad_output.clone()
(input, input_q) = ctx.saved_tensors
i = (input.abs() > 1.0).float()
sign = input.sign()
grad_alpha = (grad_output * ((sign * i) + ((input_q - input) * (1 - i)))).sum()
return (grad_input, grad_alpha)
return _pq().apply |
class MetricList(EvalMetric):
def __init__(self, *args, name='metric_list'):
assert all([issubclass(type(x), EvalMetric) for x in args]), 'MetricList input is illegal: {}'.format(args)
self.metrics = [metric for metric in args]
super(MetricList, self).__init__(name=name)
def update(self, preds, labels, losses=None):
preds = ([preds] if (type(preds) is not list) else preds)
labels = ([labels] if (type(labels) is not list) else labels)
losses = ([losses] if (type(losses) is not list) else losses)
for metric in self.metrics:
metric.update(preds, labels, losses)
def reset(self):
if hasattr(self, 'metrics'):
for metric in self.metrics:
metric.reset()
else:
logging.warning('No metric defined.')
def get(self):
ouputs = []
for metric in self.metrics:
ouputs.append(metric.get())
return ouputs
def get_name_value(self):
ouputs = []
for metric in self.metrics:
ouputs.append(metric.get_name_value())
return ouputs |
def run_experiment(dataset: Dataset, method: PromptMethod, evaluator: Evaluator) -> Dict:
(predictions, gold_answers) = ([], [])
with open(group_file, 'r') as f:
groups = json.load(f)
grouped_dataset = [[] for _ in range(len(groups))]
for (group_idx, group) in enumerate(groups):
grouped_dataset[group_idx].extend([(idx, dataset[idx]) for idx in group])
for grouped_data_items in grouped_dataset:
is_group_cached = True
for (idx, data_item) in grouped_data_items:
data_item['idx'] = idx
if (data_item.get('id', None) is None):
data_item['id'] = idx
if (not os.path.exists(os.path.join(tmp_save_dir, f"{idx}_{data_item['id']}.json"))):
is_group_cached = False
if (use_cache and is_group_cached):
(batch_prediction, batch_gold_answer) = ([], [])
for (idx, data_item) in grouped_data_items:
with open(os.path.join(tmp_save_dir, f"{data_item['idx']}_{data_item['id']}.json"), 'r') as f:
result_item = json.load(f)
batch_prediction.append(result_item['prediction'])
batch_gold_answer.append(result_item['gold_answer'])
print(f"idx: {data_item['idx']}")
print(f"id: {data_item['id']}")
print(f"pred answer: {result_item['prediction']}")
print(f"gold answer: {result_item['gold_answer']}")
else:
batch_data_items = [g[1] for g in grouped_data_items]
while True:
try:
current_key = openai_key_pool.get_key()
os.environ['OPENAI_API_KEY'] = current_key
print('Using OpenAI key: ', current_key)
batch_prediction = method.run(x=batch_data_items, verbose=verbose)
break
except openai.error.OpenAIError as e:
print(f'Error when getting response: {e}')
continue
target_num_in_batch = len(batch_data_items)
if (batch_prediction is None):
batch_prediction = (['<empty>'] * target_num_in_batch)
if (len(batch_prediction) < target_num_in_batch):
batch_prediction = (batch_prediction + (['<empty>'] * (target_num_in_batch - len(batch_prediction))))
elif (len(batch_prediction) > target_num_in_batch):
batch_prediction = batch_prediction[:target_num_in_batch]
batch_prediction = evaluator.normalize_answer(batch_prediction)
batch_gold_answer = evaluator.normalize_answer([data_item['answer'] for data_item in batch_data_items])
for (data_item, prediction, gold_answer) in zip(batch_data_items, batch_prediction, batch_gold_answer):
os.makedirs(tmp_save_dir, exist_ok=True)
with open(os.path.join(tmp_save_dir, f"{data_item['idx']}_{data_item['id']}.json"), 'w') as f:
json.dump({'idx': data_item['idx'], 'id': data_item['id'], 'prediction': prediction, 'gold_answer': gold_answer}, f)
print(f"idx: {data_item['idx']}")
print(f"id: {data_item['id']}")
print(f'pred answer: {prediction}')
print(f'gold answer: {gold_answer}')
print(('-' * 80))
predictions.extend(batch_prediction)
gold_answers.extend(batch_gold_answer)
print(len(predictions))
eval_dict = evaluator.evaluate(predictions, gold_answers)
return eval_dict |
class RandomVerticalFlip(object):
def __call__(self, sample):
img1 = sample['image'][0]
img2 = sample['image'][1]
mask = sample['label']
if (random.random() < 0.5):
img1 = img1.transpose(Image.FLIP_TOP_BOTTOM)
img2 = img2.transpose(Image.FLIP_TOP_BOTTOM)
mask = mask.transpose(Image.FLIP_TOP_BOTTOM)
return {'image': (img1, img2), 'label': mask} |
class PriorProbability(keras.initializers.Initializer):
def __init__(self, probability=0.01):
self.probability = probability
def get_config(self):
return {'probability': self.probability}
def __call__(self, shape, dtype=None):
result = (np.ones(shape, dtype=dtype) * (- math.log(((1 - self.probability) / self.probability))))
return result |
class SubmitControl(ScalarControl):
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
if (self.value is None):
self.__dict__['_value'] = ''
self.readonly = True
def get_labels(self):
res = []
if self.value:
res.append(Label(self.value))
res.extend(ScalarControl.get_labels(self))
return res
def is_of_kind(self, kind):
return (kind == 'clickable')
def _click(self, form, coord, return_type, request_class=_request.Request):
self._clicked = coord
r = form._switch_click(return_type, request_class)
self._clicked = False
return r
def _totally_ordered_pairs(self):
if (not self._clicked):
return []
return ScalarControl._totally_ordered_pairs(self) |
class CSVPrettyTable(PrettyTable):
def get_string(self, **kwargs: (str | list[str])) -> str:
def esc_quotes(val: (bytes | str)) -> str:
try:
return cast(str, val).replace('"', '""')
except UnicodeDecodeError:
return cast(bytes, val).decode('utf-8').replace('"', '""')
except UnicodeEncodeError:
return str(cast(str, val).encode('unicode_escape').replace('"', '""'))
options = self._get_options(kwargs)
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows)
lines = []
formatted_header = ','.join([('"%s"' % (esc_quotes(val),)) for val in self._field_names])
lines.append(formatted_header)
for row in formatted_rows:
formatted_row = ','.join([('"%s"' % (esc_quotes(val),)) for val in row])
lines.append(formatted_row)
return '\n'.join(lines) |
def test_uniquifier_error():
obj1 = [1]
obj2 = [2]
obj3 = [3]
obj4 = [4]
objs = [obj1, obj2, obj1, obj1, obj2]
objs2 = [obj1, obj2, obj1, obj1, obj2, obj2]
uniq = Uniquifier(objs)
try:
unique_objs = uniq.get_unique_objs(objs2)
assert False, 'Expected a RuntimeError'
except RuntimeError:
pass
try:
objs3 = uniq.map_unique_objs(objs)
assert False, 'Expected a RuntimeError'
except RuntimeError:
pass |
def assert_database_is_reset(conn):
conn.execute('ALTER TABLE names ADD COLUMN deprecated_column')
(names_ddl,) = [ddl for ddl in conn.iterdump() if ('CREATE TABLE names' in ddl)]
assert ('deprecated_column' in names_ddl)
(yield)
(names_ddl,) = [ddl for ddl in conn.iterdump() if ('CREATE TABLE names' in ddl)]
assert ('deprecated_column' not in names_ddl), 'Database did not get reset' |
class garmintools_full():
def __init__(self, parent=None, validate=False):
self.parent = parent
self.pytrainer_main = parent.pytrainer_main
self.confdir = self.pytrainer_main.profile.confdir
self.tmpdir = self.pytrainer_main.profile.tmpdir
os.environ['GARMIN_SAVE_RUNS'] = self.tmpdir
self.data_path = os.path.dirname(__file__)
self.validate = validate
self.sport = self.getConfValue('Force_sport_to')
self.deltaDays = self.getConfValue('Not_older_days')
if (self.deltaDays is None):
logging.info('Delta days not set, retrieving complete history, defaulting to 0')
self.deltaDays = 0
self.maxGap = self.getConfValue('Max_gap_seconds')
if (self.maxGap is None):
logging.info('No gap defined, strict comparison')
self.maxGap = 0
def getConfValue(self, confVar):
info = XMLParser((self.data_path + '/conf.xml'))
code = info.getValue('pytrainer-plugin', 'plugincode')
plugindir = self.pytrainer_main.profile.plugindir
if (not os.path.isfile((((plugindir + '/') + code) + '/conf.xml'))):
value = None
else:
info = XMLParser((((plugindir + '/') + code) + '/conf.xml'))
value = info.getValue('pytrainer-plugin', confVar)
return value
def error_dialog(self, msg):
subprocess.call(['zenity', '--error', ("--text='%s'" % msg)])
def run(self):
logging.debug('>>')
importFiles = []
if self.checkLoadedModule():
numError = self.getDeviceInfo()
if (numError >= 0):
outgps = subprocess.call("garmin_save_runs -v | zenity --progress --pulsate --text='Loading Data' --auto-close", shell=True)
if (outgps == 0):
foundFiles = self.searchFiles(self.tmpdir, 'gmn')
logging.info((('Retrieved ' + str(len(foundFiles))) + ' entries from GPS device'))
if (int(self.deltaDays) > 0):
selectedFiles = self.discardOld(foundFiles)
else:
logging.info('Retrieving complete history from GPS device')
selectedFiles = foundFiles
if (len(selectedFiles) > 0):
logging.info((('Dumping ' + str(len(selectedFiles))) + ' binary files found'))
dumpFiles = self.dumpBinaries(selectedFiles)
self.listStringDBUTC = self.pytrainer_main.ddbb.session.query(Activity)
if (self.maxGap > 0):
logging.info((('Starting import. Comparison will be made with ' + str(self.maxGap)) + ' seconds interval'))
else:
logging.info('Starting import. Comparison will be strict')
importFiles = self.importEntries(dumpFiles)
else:
logging.info('No new entries to add')
else:
logging.error('Error when retrieving data from GPS device')
elif (numError == (- 1)):
self.error_dialog('No Garmin device found\nCheck your configuration')
elif (numError == (- 2)):
self.error_dialog('Can not find garmintools binaries\nCheck your configuration')
else:
self.error_dialog('Can not handle Garmin device (wrong module loaded)\nCheck your configuration')
logging.info(('Entries to import: ' + str(len(importFiles))))
logging.debug('<<')
return importFiles
def discardOld(self, listEntries):
logging.debug('>>')
tempList = []
logging.info((('Discarding entries older than ' + str(self.deltaDays)) + ' days'))
limit = (datetime.now() - timedelta(days=int(self.deltaDays)))
for entry in listEntries:
filename = os.path.split(entry)[1].rstrip('.gmn')
filenameDateTime = datetime.strptime(filename, '%Y%m%dT%H%M%S')
logging.debug(((('Entry time: ' + str(filenameDateTime)) + ' | limit: ') + str(limit)))
if (filenameDateTime < limit):
logging.debug(('Discarding old entry: ' + str(filenameDateTime)))
else:
tempList.append(entry)
logging.debug('<<')
return tempList
def importEntries(self, entries):
logging.debug('>>')
logging.debug(('Selected files: ' + str(entries)))
importfiles = []
for filename in entries:
if self.valid_input_file(filename):
with open(filename, 'r') as f:
xmlString = f.read()
fileString = BytesIO(((b'<root>' + xmlString) + b'</root>'))
try:
tree = etree.parse(fileString)
if (not self.entryExists(tree, filename)):
sport = self.getSport(tree)
gpxfile = ('%s/garmintools-%d.gpx' % (self.tmpdir, len(importfiles)))
self.createGPXfile(gpxfile, tree)
importfiles.append((gpxfile, sport))
else:
logging.debug(('%s already present. Skipping import.' % (filename,)))
except:
logging.error(('Error parsing entry ' + str(filename)))
traceback.print_exc()
else:
logging.error(('File %s failed validation' % filename))
logging.debug('<<')
return importfiles
def valid_input_file(self, filename):
if (not self.validate):
logging.debug(('Not validating %s' % filename))
return True
else:
logging.debug('Cannot validate garmintools dump files yet')
return True
def entryExists(self, tree, filename):
logging.debug('>>')
stringStartDatetime = self.detailsFromFile(tree)
exists = False
if (stringStartDatetime is not None):
startDatetime = dateutil.parser.parse(stringStartDatetime)
stringStartUTC = startDatetime.astimezone(tzutc()).strftime('%Y-%m-%dT%H:%M:%SZ')
if self.checkDupe(stringStartUTC, self.listStringDBUTC, int(self.maxGap)):
exists = True
else:
logging.info((((('Marking ' + str(filename)) + ' | ') + str(stringStartUTC)) + ' to import'))
exists = False
else:
logging.debug(('Not able to find start time, please check ' + str(filename)))
exists = True
logging.debug('<<')
return exists
def checkDupe(self, stringStartUTC, listStringStartUTC, gap):
logging.debug('>>')
found = False
if (gap > 0):
stringStartDate = stringStartUTC[0:10]
for entry in listStringStartUTC:
if entry.date_time_utc.startswith(stringStartDate):
deltaGap = timedelta(seconds=gap)
datetimeStartUTC = datetime.strptime(stringStartUTC, '%Y-%m-%dT%H:%M:%SZ')
datetimeStartUTCDB = datetime.strptime(entry.date_time_utc, '%Y-%m-%dT%H:%M:%SZ')
datetimePlusDelta = (datetimeStartUTC + deltaGap)
if ((datetimeStartUTC <= datetimeStartUTCDB) and (datetimeStartUTCDB <= datetimePlusDelta)):
found = True
logging.debug(((((('Found: ' + str(stringStartUTC)) + ' <= ') + entry.date_time_utc) + ' <= ') + str(datetimePlusDelta)))
break
elif ((stringStartUTC,) in listStringStartUTC):
found = True
logging.debug('<<')
return found
def getSport(self, tree):
if self.sport:
return self.sport
root = tree.getroot()
sportElement = root.find('.//run')
try:
sport = sportElement.get('sport')
sport = sport.capitalize()
except:
sport = 'import'
return sport
def detailsFromFile(self, tree):
root = tree.getroot()
pointElement = root.find('.//point')
if (pointElement is not None):
stringStartDatetime = pointElement.get('time')
return stringStartDatetime
return None
def createGPXfile(self, gpxfile, tree):
xslt_doc = etree.parse((self.data_path + '/translate.xsl'))
transform = etree.XSLT(xslt_doc)
result_tree = transform(tree)
result_tree.write(gpxfile, xml_declaration=True, encoding='UTF-8')
def dumpBinaries(self, listFiles):
logging.debug('>>')
dumpFiles = []
for filename in listFiles:
outdump = filename.replace('.gmn', '.dump')
logging.debug(('outdump: ' + str(outdump)))
result = subprocess.call(('garmin_dump %s > %s' % (filename, outdump)))
if (result == 0):
dumpFiles.append(outdump)
else:
logging.error(('Error when creating dump of %s: %d' % (str(filename), result)))
logging.debug('<<')
return dumpFiles
def searchFiles(self, rootPath, extension):
logging.debug('>>')
foundFiles = []
logging.debug(('rootPath: ' + str(rootPath)))
process = subprocess.Popen(['find', rootPath, '-name', ('*.%s' % extension)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
if (process.returncode == 0):
foundFiles = stdout.splitlines()
logging.info(('Found files: ' + str(len(foundFiles))))
else:
logging.error(('Not able to locate files from GPS: %s' % str(process.returncode)))
logging.debug('<<')
return foundFiles
def getDeviceInfo(self):
logging.debug('>>')
process = subprocess.Popen('garmin_get_info', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
logging.debug(('Returns ' + str(stdout)))
numError = 0
if (process.returncode == 0):
if (stdout != 'garmin unit could not be opened!'):
try:
xmlString = result[1].rstrip()
xmlString_2 = ' '.join(xmlString.split())
tree = etree.fromstring(xmlString_2)
description = self.getProductDesc(tree)
if (description is not None):
logging.info(('Found ' + str(description)))
else:
raise Exception
except:
logging.error('Not able to identify GPS device. Continuing anyway...')
logging.debug(('Traceback: %s' % traceback.format_exc()))
pass
else:
logging.error(result[1])
numError = (- 1)
else:
logging.error('Can not find garmintools binaries, please check your installation')
numError = (- 2)
logging.debug('<<')
return numError
def getProductDesc(self, tree):
desc = tree.findtext('.//product_description')
return desc
def checkLoadedModule(self):
try:
outmod = subprocess.Popen('lsmod | grep garmin_gps', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdout, stderr) = outmod.communicate()
return (stdout != '')
except:
return False
def createUserdirBackup(self):
logging.debug('>>')
result = subprocess.call(['tar', '-cvzf', ('%s/pytrainer_%s.tar.gz' % (os.environ['HOME'], time.strftime('%Y%m%d_%H%M'))), self.confdir])
if (result != 0):
raise Exception(('Copying current user directory does not work, error #' + str(result)))
else:
logging.info('User directory backup successfully created')
logging.debug('<<') |
def run_process(gpu, train_data, valid_data, dicts, opt, checkpoint, constants):
from onmt.train_utils.mp_trainer import Trainer
from onmt.train_utils.clip_trainer import ClipTrainer
if opt.clip_learning:
trainer = ClipTrainer(gpu, dicts, opt, constants)
trainer.run(checkpoint=checkpoint, train_data=train_data, valid_data=valid_data)
else:
trainer = Trainer(gpu, dicts, opt, constants)
trainer.run(checkpoint=checkpoint, train_data=train_data, valid_data=valid_data) |
def gen_verify_hash(shared_key: bytes, public_key: bytes):
verify_hash = hashlib.sha1()
verify_hash.update((' ' * 20).encode('utf-8'))
verify_hash.update(shared_key)
verify_hash.update(public_key)
return format(int.from_bytes(verify_hash.digest(), byteorder='big', signed=True), 'x') |
class NeuMF(torch.nn.Module):
def __init__(self, config):
super(NeuMF, self).__init__()
self.config = config
self.num_users = config['num_users']
self.num_items = config['num_items']
self.latent_dim_mf = config['latent_dim_mf']
self.latent_dim_mlp = config['latent_dim_mlp']
self.embedding_user_mlp = torch.nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.latent_dim_mlp)
self.embedding_item_mlp = torch.nn.Embedding(num_embeddings=self.num_items, embedding_dim=self.latent_dim_mlp)
self.embedding_user_mf = torch.nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.latent_dim_mf)
self.embedding_item_mf = torch.nn.Embedding(num_embeddings=self.num_items, embedding_dim=self.latent_dim_mf)
self.fc_layers = torch.nn.ModuleList()
for (idx, (in_size, out_size)) in enumerate(zip(config['layers'][:(- 1)], config['layers'][1:])):
self.fc_layers.append(torch.nn.Linear(in_size, out_size))
self.affine_output = torch.nn.Linear(in_features=(config['layers'][(- 1)] + config['latent_dim_mf']), out_features=1)
self.logistic = torch.nn.Sigmoid()
def forward(self, user_indices, item_indices):
user_embedding_mlp = self.embedding_user_mlp(user_indices)
item_embedding_mlp = self.embedding_item_mlp(item_indices)
user_embedding_mf = self.embedding_user_mf(user_indices)
item_embedding_mf = self.embedding_item_mf(item_indices)
mlp_vector = torch.cat([user_embedding_mlp, item_embedding_mlp], dim=(- 1))
mf_vector = torch.mul(user_embedding_mf, item_embedding_mf)
for (idx, _) in enumerate(range(len(self.fc_layers))):
mlp_vector = self.fc_layers[idx](mlp_vector)
mlp_vector = torch.nn.ReLU()(mlp_vector)
vector = torch.cat([mlp_vector, mf_vector], dim=(- 1))
logits = self.affine_output(vector)
rating = self.logistic(logits)
return rating
def init_weight(self):
pass
def load_pretrain_weights(self):
config = self.config
config['latent_dim'] = config['latent_dim_mlp']
mlp_model = MLP(config)
if (config['use_cuda'] is True):
mlp_model.cuda()
resume_checkpoint(mlp_model, model_dir=config['pretrain_mlp'], device_id=config['device_id'])
self.embedding_user_mlp.weight.data = mlp_model.embedding_user.weight.data
self.embedding_item_mlp.weight.data = mlp_model.embedding_item.weight.data
for idx in range(len(self.fc_layers)):
self.fc_layers[idx].weight.data = mlp_model.fc_layers[idx].weight.data
config['latent_dim'] = config['latent_dim_mf']
gmf_model = GMF(config)
if (config['use_cuda'] is True):
gmf_model.cuda()
resume_checkpoint(gmf_model, model_dir=config['pretrain_mf'], device_id=config['device_id'])
self.embedding_user_mf.weight.data = gmf_model.embedding_user.weight.data
self.embedding_item_mf.weight.data = gmf_model.embedding_item.weight.data
self.affine_output.weight.data = (0.5 * torch.cat([mlp_model.affine_output.weight.data, gmf_model.affine_output.weight.data], dim=(- 1)))
self.affine_output.bias.data = (0.5 * (mlp_model.affine_output.bias.data + gmf_model.affine_output.bias.data)) |
def show_proj_bbox_img(input, out_dir, show=False, is_nus_mono=False):
gt_bboxes = input['gt_bboxes_3d']._data
img_metas = input['img_metas']._data
img = input['img']._data.numpy()
img = img.transpose(1, 2, 0)
if (gt_bboxes.tensor.shape[0] == 0):
gt_bboxes = None
filename = Path(img_metas['filename']).name
if isinstance(gt_bboxes, DepthInstance3DBoxes):
show_multi_modality_result(img, gt_bboxes, None, None, out_dir, filename, box_mode='depth', img_metas=img_metas, show=show)
elif isinstance(gt_bboxes, LiDARInstance3DBoxes):
show_multi_modality_result(img, gt_bboxes, None, img_metas['lidar2img'], out_dir, filename, box_mode='lidar', img_metas=img_metas, show=show)
elif isinstance(gt_bboxes, CameraInstance3DBoxes):
show_multi_modality_result(img, gt_bboxes, None, img_metas['cam2img'], out_dir, filename, box_mode='camera', img_metas=img_metas, show=show)
else:
warnings.warn(f'unrecognized gt box type {type(gt_bboxes)}, only show image')
show_multi_modality_result(img, None, None, None, out_dir, filename, show=show) |
class DateTimeProperty(JSONProperty):
def make_expression(self, base_exp):
try:
return base_exp.astext.cast(sa.DateTime)
except AttributeError:
return sa.func.json_unquote(base_exp).cast(mysql.DATETIME(fsp=6))
def decode(self, val):
if val:
val = datetime.strptime(val, DATETIME_FORMAT)
return val
def encode(self, val):
if isinstance(val, datetime):
val = val.strftime(DATETIME_FORMAT)
return val |
class SponsorshipBenefitModelTests(TestCase):
def test_with_conflicts(self):
(benefit_1, benefit_2, benefit_3) = baker.make(SponsorshipBenefit, _quantity=3)
benefit_1.conflicts.add(benefit_2)
qs = SponsorshipBenefit.objects.with_conflicts()
self.assertEqual(2, qs.count())
self.assertIn(benefit_1, qs)
self.assertIn(benefit_2, qs)
def test_has_capacity(self):
benefit = baker.prepare(SponsorshipBenefit, capacity=10, soft_capacity=False)
self.assertTrue(benefit.has_capacity)
benefit.capacity = 0
self.assertFalse(benefit.has_capacity)
benefit.soft_capacity = True
self.assertTrue(benefit.has_capacity)
benefit.capacity = 10
benefit.soft_capacity = False
benefit.unavailable = True
self.assertFalse(benefit.has_capacity)
def test_list_related_sponsorships(self):
benefit = baker.make(SponsorshipBenefit)
sponsor_benefit = baker.make(SponsorBenefit, sponsorship_benefit=benefit)
other_benefit = baker.make(SponsorshipBenefit)
baker.make(SponsorBenefit, sponsorship_benefit=other_benefit)
with self.assertNumQueries(1):
sponsorships = list(benefit.related_sponsorships)
self.assertEqual(2, Sponsorship.objects.count())
self.assertEqual(1, len(sponsorships))
self.assertIn(sponsor_benefit.sponsorship, sponsorships)
def test_name_for_display_without_specifying_package(self):
benefit = baker.make(SponsorshipBenefit, name='Benefit')
benefit_config = baker.make(TieredBenefitConfiguration, package__name='Package', benefit=benefit, quantity=10)
expected_name = f'Benefit (10)'
name = benefit.name_for_display(package=benefit_config.package)
self.assertEqual(name, expected_name)
self.assertTrue(benefit.has_tiers) |
def XForwardedForMiddleware(get_response):
def middleware(request):
if ('HTTP_X_FORWARDED_FOR' in request.META.keys()):
request.META['HTTP_X_PROXY_REMOTE_ADDR'] = request.META['REMOTE_ADDR']
parts = request.META['HTTP_X_FORWARDED_FOR'].split(',', 1)
request.META['REMOTE_ADDR'] = parts[0]
response = get_response(request)
return response
return middleware |
def test_resolve_package_path(tmp_path: Path) -> None:
pkg = (tmp_path / 'pkg1')
pkg.mkdir()
(pkg / '__init__.py').touch()
(pkg / 'subdir').mkdir()
(pkg / 'subdir/__init__.py').touch()
assert (resolve_package_path(pkg) == pkg)
assert (resolve_package_path((pkg / 'subdir/__init__.py')) == pkg) |
class GridSearch():
def __init__(self, appr_ft, seed, gs_config='gridsearch_config', acc_drop_thr=0.2, hparam_decay=0.5, max_num_searches=7):
self.seed = seed
GridSearchConfig = getattr(importlib.import_module(name=gs_config), 'GridSearchConfig')
self.appr_ft = appr_ft
self.gs_config = GridSearchConfig()
self.acc_drop_thr = acc_drop_thr
self.hparam_decay = hparam_decay
self.max_num_searches = max_num_searches
self.lr_first = 1.0
def extra_parser(args):
parser = ArgumentParser()
parser.add_argument('--gridsearch-config', type=str, default='gridsearch_config', required=False, help='Configuration file for GridSearch options (default=%(default)s)')
parser.add_argument('--gridsearch-acc-drop-thr', default=0.2, type=float, required=False, help='GridSearch accuracy drop threshold (default=%(default)f)')
parser.add_argument('--gridsearch-hparam-decay', default=0.5, type=float, required=False, help='GridSearch hyperparameter decay (default=%(default)f)')
parser.add_argument('--gridsearch-max-num-searches', default=7, type=int, required=False, help='GridSearch maximum number of hyperparameter search (default=%(default)f)')
return parser.parse_known_args(args)
def search_lr(self, model, t, trn_loader, val_loader):
best_ft_acc = 0.0
best_ft_lr = 0.0
gen_params = self.gs_config.get_params('general')
for (k, v) in gen_params.items():
if (not isinstance(v, list)):
setattr(self.appr_ft, k, v)
if (t > 0):
list_lr = [lr for lr in gen_params['lr'] if (lr < self.lr_first)][:gen_params['lr_searches'][0]]
else:
list_lr = gen_params['lr_first']
for curr_lr in list_lr:
utils.seed_everything(seed=self.seed)
self.appr_ft.model = deepcopy(model)
self.appr_ft.lr = curr_lr
self.appr_ft.train(t, trn_loader, val_loader)
(_, ft_acc_taw, _) = self.appr_ft.eval(t, val_loader)
if (ft_acc_taw > best_ft_acc):
best_ft_acc = ft_acc_taw
best_ft_lr = curr_lr
print(('Current best LR: ' + str(best_ft_lr)))
self.gs_config.current_lr = best_ft_lr
print('Current best acc: {:5.1f}'.format((best_ft_acc * 100)))
if (t == 0):
self.lr_first = best_ft_lr
return (best_ft_acc, best_ft_lr)
def search_tradeoff(self, appr_name, appr, t, trn_loader, val_loader, best_ft_acc):
best_tradeoff = None
tradeoff_name = None
appr_params = self.gs_config.get_params(appr_name)
for (k, v) in appr_params.items():
if isinstance(v, list):
tradeoff_name = k
else:
setattr(appr, k, v)
if ((tradeoff_name is not None) and (t > 0)):
best_tradeoff = appr_params[tradeoff_name][0]
num_searches = 0
while (num_searches < self.max_num_searches):
utils.seed_everything(seed=self.seed)
appr_gs = type(appr)(deepcopy(appr.model), appr.device, exemplars_dataset=appr.exemplars_dataset)
for (attr, value) in vars(appr).items():
if (attr == 'logger'):
setattr(appr_gs, attr, value)
else:
setattr(appr_gs, attr, deepcopy(value))
setattr(appr_gs, tradeoff_name, best_tradeoff)
appr_gs.train(t, trn_loader, val_loader)
(_, curr_acc, _) = appr_gs.eval(t, val_loader)
print(((((('Current acc: ' + str(curr_acc)) + ' for ') + tradeoff_name) + '=') + str(best_tradeoff)))
if (curr_acc < ((1 - self.acc_drop_thr) * best_ft_acc)):
best_tradeoff = (best_tradeoff * self.hparam_decay)
else:
break
num_searches += 1
else:
print('There is no trade-off to gridsearch.')
return (best_tradeoff, tradeoff_name) |
class DeepFM(nn.Module):
def __init__(self, dense_module: nn.Module) -> None:
super().__init__()
self.dense_module = dense_module
def forward(self, embeddings: List[torch.Tensor]) -> torch.Tensor:
deepfm_input = _get_flatten_input(embeddings)
deepfm_output = self.dense_module(deepfm_input)
return deepfm_output |
class _TestFileObj(object):
def __init__(self, fileobj, stop_after=(- 1), fail_after=(- 1)):
self._fileobj = fileobj
self._stop_after = stop_after
self._fail_after = fail_after
self.dataread = 0
self.operations = 0
fileobj.seek(0, 0)
def _check_fail(self):
self.operations += 1
if (self._fail_after != (- 1)):
if (self.operations > self._fail_after):
raise IOError('fail')
def tell(self):
self._check_fail()
return self._fileobj.tell()
def write(self, data):
try:
self._check_fail()
except IOError:
if len(data):
raise
self._fileobj.write(data)
def truncate(self, *args, **kwargs):
self._check_fail()
self._fileobj.truncate(*args, **kwargs)
def flush(self):
self._fileobj.flush()
def read(self, size=(- 1)):
try:
self._check_fail()
except IOError:
if (size != 0):
raise
data = self._fileobj.read(size)
self.dataread += len(data)
if ((self._stop_after != (- 1)) and (self.dataread > self._stop_after)):
data = data[:(self._stop_after - self.dataread)]
return data
def seek(self, offset, whence=0):
self._check_fail()
if (whence == 0):
final_position = offset
elif (whence == 1):
final_position = (self._fileobj.tell() + offset)
elif (whence == 2):
final_position = (get_size(self._fileobj) + offset)
assert (final_position >= 0), final_position
return self._fileobj.seek(offset, whence) |
class CaffeineBeverageWithHook(ABC):
def prepareRecipe(self) -> None:
self.boilWater()
self.brew()
self.pourInCup()
if self.customerWantsCondiments():
self.addCondiments()
def brew(self) -> None:
pass
def addCondiments(self) -> None:
pass
def boilWater(self) -> None:
print('Boiling water')
def pourInCup(self) -> None:
print('Pouring into cup')
def customerWantsCondiments(self) -> bool:
return True |
class GC(Fontable):
__gc__ = resource.Resource.__resource__
def change(self, onerror=None, **keys):
request.ChangeGC(display=self.display, onerror=onerror, gc=self.id, attrs=keys)
def copy(self, src_gc, mask, onerror=None):
request.CopyGC(display=self.display, onerror=onerror, src_gc=src_gc, dst_gc=self.id, mask=mask)
def set_dashes(self, offset, dashes, onerror=None):
request.SetDashes(display=self.display, onerror=onerror, gc=self.id, dash_offset=offset, dashes=dashes)
def set_clip_rectangles(self, x_origin, y_origin, rectangles, ordering, onerror=None):
request.SetClipRectangles(display=self.display, onerror=onerror, ordering=ordering, gc=self.id, x_origin=x_origin, y_origin=y_origin, rectangles=rectangles)
def free(self, onerror=None):
request.FreeGC(display=self.display, onerror=onerror, gc=self.id)
self.display.free_resource_id(self.id) |
class DAEModel(BaseModel):
def __init__(self, args):
super().__init__(args)
self.input_dropout = nn.Dropout(p=args.dae_dropout)
dims = (([args.dae_hidden_dim] * 2) * args.dae_num_hidden)
dims = (([args.num_items] + dims) + [args.dae_latent_dim])
(encoder_modules, decoder_modules) = ([], [])
for i in range((len(dims) // 2)):
encoder_modules.append(nn.Linear(dims[(2 * i)], dims[((2 * i) + 1)]))
decoder_modules.append(nn.Linear(dims[(((- 2) * i) - 1)], dims[(((- 2) * i) - 2)]))
self.encoder = nn.ModuleList(encoder_modules)
self.decoder = nn.ModuleList(decoder_modules)
self.encoder.apply(self.weight_init)
self.decoder.apply(self.weight_init)
def weight_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight)
m.bias.data.normal_(0.0, 0.001)
def code(cls):
return 'dae'
def forward(self, x):
x = F.normalize(x)
x = self.input_dropout(x)
for (i, layer) in enumerate(self.encoder):
x = layer(x)
x = torch.tanh(x)
for (i, layer) in enumerate(self.decoder):
x = layer(x)
if (i != (len(self.decoder) - 1)):
x = torch.tanh(x)
return x |
class CopyLoader(Loadable, FileManagerAware):
progressbar_supported = True
def __init__(self, copy_buffer, do_cut=False, overwrite=False, dest=None, make_safe_path=get_safe_path):
self.copy_buffer = tuple(copy_buffer)
self.do_cut = do_cut
self.original_copy_buffer = copy_buffer
self.original_path = (dest if (dest is not None) else self.fm.thistab.path)
self.overwrite = overwrite
self.make_safe_path = make_safe_path
self.percent = 0
if self.copy_buffer:
self.one_file = self.copy_buffer[0]
Loadable.__init__(self, self.generate(), 'Calculating size...')
def _calculate_size(self, step):
from os.path import join
size = 0
stack = [fobj.path for fobj in self.copy_buffer]
while stack:
fname = stack.pop()
if os.path.islink(fname):
continue
if os.path.isdir(fname):
stack.extend([join(fname, item) for item in os.listdir(fname)])
else:
try:
fstat = os.stat(fname)
except OSError:
continue
size += max(step, (math.ceil((fstat.st_size / step)) * step))
return size
def generate(self):
if (not self.copy_buffer):
return
from ranger.ext import shutil_generatorized as shutil_g
bytes_per_tick = shutil_g.BLOCK_SIZE
size = max(1, self._calculate_size(bytes_per_tick))
size_str = ((' (' + human_readable(self._calculate_size(1))) + ')')
done = 0
if self.do_cut:
self.original_copy_buffer.clear()
if (len(self.copy_buffer) == 1):
self.description = (('moving: ' + self.one_file.path) + size_str)
else:
self.description = (('moving files from: ' + self.one_file.dirname) + size_str)
for fobj in self.copy_buffer:
for path in self.fm.tags.tags:
if ((path == fobj.path) or str(path).startswith(fobj.path)):
tag = self.fm.tags.tags[path]
self.fm.tags.remove(path)
new_path = path.replace(fobj.path, os.path.join(self.original_path, fobj.basename))
self.fm.tags.tags[new_path] = tag
self.fm.tags.dump()
n = 0
for n in shutil_g.move(src=fobj.path, dst=self.original_path, overwrite=self.overwrite, make_safe_path=self.make_safe_path):
self.percent = (((done + n) / size) * 100.0)
(yield)
done += n
else:
if (len(self.copy_buffer) == 1):
self.description = (('copying: ' + self.one_file.path) + size_str)
else:
self.description = (('copying files from: ' + self.one_file.dirname) + size_str)
for fobj in self.copy_buffer:
if (os.path.isdir(fobj.path) and (not os.path.islink(fobj.path))):
n = 0
for n in shutil_g.copytree(src=fobj.path, dst=os.path.join(self.original_path, fobj.basename), symlinks=True, overwrite=self.overwrite, make_safe_path=self.make_safe_path):
self.percent = (((done + n) / size) * 100.0)
(yield)
done += n
else:
n = 0
for n in shutil_g.copy2(fobj.path, self.original_path, symlinks=True, overwrite=self.overwrite, make_safe_path=self.make_safe_path):
self.percent = (((done + n) / size) * 100.0)
(yield)
done += n
cwd = self.fm.get_directory(self.original_path)
cwd.load_content() |
.asyncio(scope='class')
class TestInOneEventLoopPerClass():
loop: asyncio.AbstractEventLoop
async def test_remember_loop(self):
TestInOneEventLoopPerClass.loop = asyncio.get_running_loop()
async def test_assert_same_loop(self):
assert (asyncio.get_running_loop() is TestInOneEventLoopPerClass.loop) |
(permission_classes=[IsAuthenticated])
def subscribe_user_to_association(info: Info) -> SubscribeUserResult:
user = info.context.request.user
membership = Membership.objects.of_user(user).first()
local_stripe_customer = None
if (not membership):
membership = Membership.objects.create(user=user)
stripe_customer = stripe.Customer.create(email=user.email, metadata={'user_id': user.id})
local_stripe_customer = StripeCustomer.objects.create(user_id=user.id, stripe_customer_id=stripe_customer.id)
if membership.is_active:
return AlreadySubscribed()
if (not local_stripe_customer):
local_stripe_customer = StripeCustomer.objects.of_user(user).first()
checkout_session = stripe.checkout.Session.create(success_url=f'{settings.ASSOCIATION_FRONTEND_URL}?membership-status=success#membership', cancel_url=f'{settings.ASSOCIATION_FRONTEND_URL}#membership', payment_method_types=['card'], mode='subscription', customer=local_stripe_customer.stripe_customer_id, line_items=[{'price': settings.STRIPE_SUBSCRIPTION_PRICE_ID, 'quantity': 1}])
return CheckoutSession(stripe_session_id=checkout_session.id) |
.parametrize('info1, info2, equal', [(keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier), keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier), True), (keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier), keyutils.KeyInfo(Qt.Key.Key_B, Qt.KeyboardModifier.NoModifier), False), (keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier), keyutils.KeyInfo(Qt.Key.Key_B, Qt.KeyboardModifier.ControlModifier), False)])
def test_hash(info1, info2, equal):
assert ((hash(info1) == hash(info2)) == equal) |
class ProjectMergeRequestResourceStateEventManager(RetrieveMixin, RESTManager):
_path = '/projects/{project_id}/merge_requests/{mr_iid}/resource_state_events'
_obj_cls = ProjectMergeRequestResourceStateEvent
_from_parent_attrs = {'project_id': 'project_id', 'mr_iid': 'iid'}
def get(self, id: Union[(str, int)], lazy: bool=False, **kwargs: Any) -> ProjectMergeRequestResourceStateEvent:
return cast(ProjectMergeRequestResourceStateEvent, super().get(id=id, lazy=lazy, **kwargs)) |
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return (self.max_action * torch.tanh(self.l3(a))) |
class Check():
module: str
origin: str
browser: Browser
browser_binary: Optional[str]
include_paths: List[str]
headless: bool
capture_screenshots: bool
cookies: List[Cookie]
driver_log_file: Optional[str]
extra_desired_capabilities: Optional[Dict[(str, Any)]]
remote_webdriver_url: Optional[str]
interpreter_log_file: IO
log: logging.Logger = logging.getLogger('quickstrom.executor')
def execute(self) -> List[result.PlainResult]:
scripts = self.load_scripts()
with self.launch_specstrom(self.interpreter_log_file) as p:
assert (p.stdout is not None)
assert (p.stdin is not None)
input_messages = message_reader(p.stdout)
output_messages = message_writer(p.stdin)
screenshots: Dict[(str, result.Screenshot[bytes])] = {}
def receive():
msg = input_messages.read()
exit_code = p.poll()
if ((msg is None) and (exit_code is not None)):
if (exit_code == 0):
return None
else:
raise SpecstromError('Specstrom invocation failed', exit_code, self.interpreter_log_file.name)
else:
self.log.debug('Received %s', msg)
return msg
def send(msg):
exit_code = p.poll()
if (exit_code is None):
self.log.debug('Sending %s', msg)
output_messages.write(msg)
elif (exit_code == 0):
self.log.warning("Done, can't send.")
else:
self.log.warning("Specstrom errored, can't send.")
def perform_action(driver, action):
try:
if (action.id == 'noop'):
pass
elif (action.id == 'click'):
id = action.args[0]
element = WebElement(driver, id)
try:
element.click()
except Exception as e:
self.log.warning('Basic click failed, falling back to JS click: %s', e)
driver.execute_script('arguments[0].click();', element)
elif (action.id == 'doubleClick'):
id = action.args[0]
element = WebElement(driver, id)
ActionChains(driver).move_to_element(element).double_click(element).perform()
elif (action.id == 'select'):
id = action.args[0]
value = action.args[1]
option = WebElement(driver, id)
select = Select(option.find_element(By.XPATH, './ancestor::select'))
select.select_by_value(value)
elif (action.id == 'focus'):
id = action.args[0]
element = WebElement(driver, id)
element.send_keys('')
elif (action.id == 'keyPress'):
char = action.args[0]
element = driver.switch_to.active_element
element.send_keys(char)
elif (action.id == 'enterText'):
element = driver.switch_to.active_element
element.send_keys(action.args[0])
elif (action.id == 'enterTextInto'):
id = action.args[1]
element = WebElement(driver, id)
element.send_keys(action.args[0])
elif (action.id == 'clear'):
id = action.args[0]
element = WebElement(driver, id)
element.clear()
elif (action.id == 'scrollBy'):
driver.execute_script('window.scrollBy(arguments[0], arguments[1])', action.args[0], action.args[1])
else:
raise UnsupportedActionError(action)
except Exception as e:
raise PerformActionError(action, e)
def screenshot(driver: WebDriver, hash: str):
if self.capture_screenshots:
bs: bytes = driver.get_screenshot_as_png()
(width, height, _, _) = png.Reader(io.BytesIO(bs)).read()
window_size = driver.get_window_size()
scale = round((width / window_size['width']))
if (scale != round((height / window_size['height']))):
self.log.warn('Width and height scales do not match for screenshot')
screenshots[hash] = result.Screenshot(image=bs, width=width, height=height, scale=scale)
def attach_screenshots(r: result.PlainResult) -> result.PlainResult:
def on_state(state):
return result.State(screenshot=screenshots.get(state.hash, None), queries=state.queries, hash=state.hash)
return result.map_states(r, on_state)
def await_events(driver, deps, state_version, timeout: int):
def on_no_events():
state = scripts.query_state(driver, deps)
screenshot(driver, dict_hash(state))
state_version.increment()
send(Timeout(state=state))
try:
self.log.debug(f'Awaiting events with timeout {timeout}')
events = scripts.await_events(driver, deps, timeout)
self.log.debug(f'Change: {events}')
if (events is None):
self.log.info(f'Timed out!')
on_no_events()
else:
screenshot(driver, dict_hash(events.state))
state_version.increment()
send(Events(events.events, events.state))
except StaleElementReferenceException as e:
self.log.error(f'Stale element reference: {e}')
on_no_events()
def run_sessions() -> List[result.PlainResult]:
while True:
msg = receive()
assert (msg is not None)
if isinstance(msg, Start):
try:
self.log.info('Starting session')
driver = self.new_driver()
driver.set_script_timeout(10)
driver.set_window_size(1200, 1200)
if (len(self.cookies) > 0):
driver.get(self.origin)
for cookie in self.cookies:
self.log.debug(f'Setting {cookie}')
driver.add_cookie(dataclasses.asdict(cookie))
driver.get(self.origin)
time.sleep(1)
state_version = Counter(initial_value=0)
scripts.install_event_listener(driver, msg.dependencies)
await_events(driver, msg.dependencies, state_version, 10000)
await_session_commands(driver, msg.dependencies, state_version)
except SpecstromAbortedError as e:
raise e
except Exception as e:
send(Error(traceback.format_exc()))
msg = receive()
if (not isinstance(msg, End)):
raise Exception(f'Expected End after Error but got: {msg}')
elif isinstance(msg, Done):
return [attach_screenshots(result.from_protocol_result(r)) for r in msg.results]
elif isinstance(msg, Aborted):
raise SpecstromAbortedError(msg.error_message)
else:
raise Exception(f'Unexpected message in run_sessions: {msg}')
def await_session_commands(driver: WebDriver, deps, state_version):
try:
while True:
msg = receive()
if (not msg):
raise Exception('No more messages from Specstrom, expected RequestAction or End.')
elif isinstance(msg, RequestAction):
if (msg.version == state_version.value):
self.log.info(f'Performing action in state {state_version.value}: {printer.pretty_print_action(msg.action)}')
perform_action(driver, msg.action)
if (msg.action.timeout is not None):
self.log.debug('Installing change observer')
scripts.install_event_listener(driver, deps)
state = scripts.query_state(driver, deps)
screenshot(driver, dict_hash(state))
state_version.increment()
send(Performed(state=state))
if (msg.action.timeout is not None):
await_events(driver, deps, state_version, msg.action.timeout)
else:
self.log.warn(f'Got stale message ({msg}) in state {state_version.value}')
send(Stale())
elif isinstance(msg, AwaitEvents):
if (msg.version == state_version.value):
self.log.info(f'Awaiting events in state {state_version.value} with timeout {msg.await_timeout}')
scripts.install_event_listener(driver, deps)
await_events(driver, deps, state_version, msg.await_timeout)
else:
self.log.warn(f'Got stale message ({msg}) in state {state_version.value}')
send(Stale())
elif isinstance(msg, End):
self.log.info('Ending session')
return
elif isinstance(msg, Aborted):
raise SpecstromAbortedError(msg.error_message)
else:
raise Exception(f'Unexpected message: {msg}')
finally:
driver.close()
return run_sessions()
def launch_specstrom(self, ilog):
includes = list(map((lambda i: ('-I' + i)), self.include_paths))
cmd = (['specstrom', 'check', self.module] + includes)
self.log.debug('Invoking Specstrom with: %s', ' '.join(cmd))
return subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE, stderr=ilog, stdin=subprocess.PIPE, bufsize=0)
def new_driver(self) -> WebDriver:
if (self.remote_webdriver_url is not None):
return webdriver.Remote(command_executor=self.remote_webdriver_url, options=self.browser_shared_options())
elif (self.browser == 'chrome'):
chrome_opts = cast(ChromeOptions, self.browser_shared_options())
chromedriver_path = which('chromedriver')
if (not chromedriver_path):
raise Exception('chromedriver not found in PATH')
browser_path = (self.browser_binary or which('chromium') or which('google-chrome-stable') or which('google-chrome') or which('chrome'))
chrome_opts.binary_location = browser_path
chrome_opts.add_argument('--no-sandbox')
chrome_opts.add_argument('--single-process')
return webdriver.Chrome(options=chrome_opts, service=ChromeService(executable_path=chromedriver_path, log_path=self.driver_log_file))
elif (self.browser == 'edge'):
edge_opts = cast(EdgeOptions, self.browser_shared_options())
edgedriver_path = which('msedgedriver')
if (not edgedriver_path):
raise Exception('msedgedriver not found in PATH')
browser_path = (self.browser_binary or which('microsoft-edge-stable') or which('microsoft-edge'))
edge_opts.binary_location = browser_path
edge_opts.add_argument('--no-sandbox')
edge_opts.add_argument('--single-process')
return webdriver.Edge(options=edge_opts, service=EdgeService(executable_path=edgedriver_path, log_path=self.driver_log_file))
elif (self.browser == 'firefox'):
firefox_opts = cast(FirefoxOptions, self.browser_shared_options())
binary = (self.browser_binary or which('firefox'))
geckodriver_path = which('geckodriver')
if (not geckodriver_path):
raise Exception('geckodriver not found in PATH')
return webdriver.Firefox(options=firefox_opts, service=FirefoxService(executable_path=geckodriver_path, log_path=(self.driver_log_file or 'geckodriver.log'), service_args=(['--binary', binary] if binary else [])))
else:
raise Exception(f'Unsupported browser: {self.browser}')
def browser_shared_options(self) -> Union[(ChromeOptions, FirefoxOptions, EdgeOptions, SafariOptions)]:
def set_shared(options):
for (key, value) in (self.extra_desired_capabilities or {}).items():
options.set_capability(key, value)
options.headless = self.headless
return options
if (self.browser == 'chrome'):
return set_shared(ChromeOptions())
elif (self.browser == 'firefox'):
return set_shared(FirefoxOptions())
elif (self.browser == 'edge'):
return set_shared(EdgeOptions())
elif (self.browser == 'safari'):
return set_shared(SafariOptions())
else:
raise Exception(f'Unsupported browser: {self.browser}')
def load_scripts(self) -> Scripts:
def map_query_state(r):
if (r is None):
raise Exception('WebDriver script invocation failed with unexpected None result. This might be caused by an unexpected page navigation in the browser. Consider adding a timeout to the corresponding action.')
return elements_to_refs(r)
def map_client_side_events(r):
def map_event(e: dict):
if (e['tag'] == 'loaded'):
return Action(id='loaded', args=[], isEvent=True, timeout=None)
elif (e['tag'] == 'changed'):
return Action(id='changed', args=[elements_to_refs(e['element'])], isEvent=True, timeout=None)
elif (e['tag'] == 'detached'):
return Action(id='detached', args=[e['markup']], isEvent=True, timeout=None)
else:
raise Exception(f'Invalid event tag in: {e}')
return (ClientSideEvents([map_event(e) for e in r['events']], elements_to_refs(r['state'])) if (r is not None) else None)
result_mappers = {'queryState': map_query_state, 'installEventListener': (lambda r: r), 'awaitEvents': map_client_side_events}
def load_script(name: str, is_async: bool=False) -> Any:
key = 'QUICKSTROM_CLIENT_SIDE_DIRECTORY'
client_side_dir = os.getenv(key)
if (not client_side_dir):
raise Exception(f'Environment variable {key} must be set')
file = open(f'{client_side_dir}/{name}.js')
script = file.read()
def f(driver: WebDriver, *args: Any) -> JsonLike:
try:
r = (driver.execute_async_script(script, *args) if is_async else driver.execute_script(script, *args))
return result_mappers[name](r)
except StaleElementReferenceException as e:
raise e
except Exception as e:
raise ScriptError(name, list(args), e)
return f
return Scripts(query_state=load_script('queryState'), install_event_listener=load_script('installEventListener'), await_events=load_script('awaitEvents', is_async=True)) |
class MakeBBoxSquare(BBoxTransform):
def __call__(self, bbox: List[float], image_size: Tuple[(int, int)]) -> List[float]:
(x, y, w, h) = (bbox[0], bbox[1], bbox[2], bbox[3])
larger_dim = max(w, h)
w_half_extra = ((larger_dim - w) / 2)
h_half_extra = ((larger_dim - h) / 2)
new_x = (x - w_half_extra)
new_y = (y - h_half_extra)
return [new_x, new_y, larger_dim, larger_dim]
def __repr__(self) -> str:
return 'ms' |
class AffineTransformed(TransformedDistribution):
def __init__(self, base_distribution: Distribution, loc=None, scale=None, event_dim=0):
self.scale = (1.0 if (scale is None) else scale)
self.loc = (0.0 if (loc is None) else loc)
super().__init__(base_distribution, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=event_dim)])
def mean(self):
return ((self.base_dist.mean * self.scale) + self.loc)
def variance(self):
return (self.base_dist.variance * (self.scale ** 2))
def stddev(self):
return self.variance.sqrt() |
def test_update_legacy_questions(db, settings):
xml_file = ((((Path(settings.BASE_DIR) / 'xml') / 'elements') / 'legacy') / 'questions.xml')
root = read_xml_file(xml_file)
version = root.attrib.get('version')
elements = flat_xml_to_elements(root)
elements = convert_elements(elements, version)
elements = order_elements(elements)
elements = elements.values()
import_elements(elements)
assert (len(root) == len(elements) == 147)
assert all(((element['created'] is False) for element in elements))
assert all(((element['updated'] is True) for element in elements))
catalog = Catalog.objects.prefetch_elements().first()
descendant_uris = {element.uri for element in catalog.descendants}
element_uris = {element['uri'] for element in elements if (element['uri'] != catalog.uri)}
assert (descendant_uris == element_uris) |
class FrozenSet(node_classes.BaseContainer):
def pytype(self) -> Literal['builtins.frozenset']:
return 'builtins.frozenset'
def _infer(self, context: (InferenceContext | None)=None, **kwargs: Any):
(yield self)
_property
def _proxied(self):
ast_builtins = AstroidManager().builtins_module
return ast_builtins.getattr('frozenset')[0] |
def create_classifier_and_diffusion(image_size, classifier_use_fp16, classifier_width, classifier_depth, classifier_attention_resolutions, classifier_use_scale_shift_norm, classifier_resblock_updown, classifier_pool, learn_sigma, diffusion_steps, noise_schedule, timestep_respacing, use_kl, predict_xstart, rescale_timesteps, rescale_learned_sigmas):
classifier = create_classifier(image_size, classifier_use_fp16, classifier_width, classifier_depth, classifier_attention_resolutions, classifier_use_scale_shift_norm, classifier_resblock_updown, classifier_pool)
diffusion = create_gaussian_diffusion(steps=diffusion_steps, learn_sigma=learn_sigma, noise_schedule=noise_schedule, use_kl=use_kl, predict_xstart=predict_xstart, rescale_timesteps=rescale_timesteps, rescale_learned_sigmas=rescale_learned_sigmas, timestep_respacing=timestep_respacing)
return (classifier, diffusion) |
class CNN_DUQ(Model):
def __init__(self, num_classes, embedding_size, learnable_length_scale, length_scale, gamma):
super().__init__()
self.gamma = gamma
self.W = nn.Parameter(torch.normal(torch.zeros(embedding_size, num_classes, 256), 0.05))
self.register_buffer('N', (torch.ones(num_classes) * 12))
self.register_buffer('m', torch.normal(torch.zeros(embedding_size, num_classes), 1))
self.m = (self.m * self.N.unsqueeze(0))
if learnable_length_scale:
self.sigma = nn.Parameter((torch.zeros(num_classes) + length_scale))
else:
self.sigma = length_scale
def update_embeddings(self, x, y):
z = self.last_layer(self.compute_features(x))
self.N = ((self.gamma * self.N) + ((1 - self.gamma) * y.sum(0)))
features_sum = torch.einsum('ijk,ik->jk', z, y)
self.m = ((self.gamma * self.m) + ((1 - self.gamma) * features_sum))
def last_layer(self, z):
z = torch.einsum('ij,mnj->imn', z, self.W)
return z
def output_layer(self, z):
embeddings = (self.m / self.N.unsqueeze(0))
diff = (z - embeddings.unsqueeze(0))
distances = (- (diff ** 2)).mean(1).div((2 * (self.sigma ** 2))).exp()
return distances
def forward(self, x):
z = self.last_layer(self.compute_features(x))
y_pred = self.output_layer(z)
return y_pred |
def assert_kraus_equivalence(a, b, tol=tol):
assert (a.shape == b.shape)
assert (a.dims == b.dims)
assert (a.type == b.type)
(a, b) = (a.full(), b.full())
a_nz = np.nonzero((np.abs(a) > tol))
if (len(a_nz[0]) == 0):
np.testing.assert_allclose(b, 0, atol=tol)
(a_el, b_el) = (a[(a_nz[0][0], a_nz[1][0])], b[(a_nz[0][0], a_nz[1][0])])
assert (b_el != 0)
b *= ((a_el / np.abs(a_el)) / (b_el / np.abs(b_el)))
np.testing.assert_allclose(a, b) |
def train(args, model, train_data_loader, dev_data_loader):
loss_func = CrossEntropyLoss()
if (args['type'] == 'BERT'):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = BertAdam(optimizer_grouped_parameters, lr=args['lr'], warmup=0.05, t_total=(len(train_data_loader) * args['num_epochs']), weight_decay=args['weight_decay'])
else:
optimizer = Adam(model.parameters(), args['lr'], weight_decay=args['weight_decay'])
writer = SummaryWriter(((args['output_path'] + '/') + time.strftime('%m-%d_%H.%M', time.localtime())))
train_stat = Stat(training=True, writer=writer)
dev_stat = Stat(training=False, writer=writer)
(best_acc, best_net) = (0, None)
for epoch in range(args['num_epochs']):
print(f'--- epoch: {(epoch + 1)} ---')
for (iter, batch) in enumerate(train_data_loader):
model.train()
(inputs, labels) = (batch[0], batch[1])
optimizer.zero_grad()
pred_outputs = model(inputs)
loss = loss_func(pred_outputs, labels)
loss.backward()
optimizer.step()
train_stat.add(pred_outputs, labels, loss.item())
if (((iter + 1) % args['display_per_batch']) == 0):
(train_loss, train_acc) = train_stat.log()
model.eval()
with torch.no_grad():
for batch in dev_data_loader:
(inputs, labels) = (batch[0], batch[1])
pred_outputs = model(inputs)
loss = loss_func(pred_outputs, labels)
dev_stat.add(pred_outputs, labels, loss.item())
(dev_loss, dev_acc) = dev_stat.log()
print(f'step {(iter + 1):5}, training loss: {train_loss:.4f}, acc: {train_acc:.2%}, dev loss: {dev_loss:.4f}, acc: {dev_acc:.2%}.')
if (dev_acc > best_acc):
best_acc = dev_acc
best_net = deepcopy(model.state_dict())
print(f'best dev acc: {best_acc:.4f}')
return best_net |
def build_network(config, channels, num_classes, anchors, num_layers):
depth_mul = config.model.depth_multiple
width_mul = config.model.width_multiple
num_repeat_backbone = config.model.backbone.num_repeats
channels_list_backbone = config.model.backbone.out_channels
num_repeat_neck = config.model.neck.num_repeats
channels_list_neck = config.model.neck.out_channels
num_anchors = config.model.head.anchors
use_dfl = config.model.head.use_dfl
reg_max = config.model.head.reg_max
num_repeat = [(max(round((i * depth_mul)), 1) if (i > 1) else i) for i in (num_repeat_backbone + num_repeat_neck)]
channels_list = [make_divisible((i * width_mul), 8) for i in (channels_list_backbone + channels_list_neck)]
block = get_block(config.training_mode)
BACKBONE = eval(config.model.backbone.type)
NECK = eval(config.model.neck.type)
if ('CSP' in config.model.backbone.type):
backbone = BACKBONE(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat, block=block, csp_e=config.model.backbone.csp_e)
neck = NECK(channels_list=channels_list, num_repeats=num_repeat, block=block, csp_e=config.model.neck.csp_e)
else:
backbone = BACKBONE(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat, block=block)
neck = NECK(channels_list=channels_list, num_repeats=num_repeat, block=block)
head_layers = build_effidehead_layer(channels_list, num_anchors, num_classes, reg_max)
head = Detect(num_classes, anchors, num_layers, head_layers=head_layers, use_dfl=use_dfl)
return (backbone, neck, head) |
def _make_constraints(*rhos):
constraints = [(cvxpy.trace(rho.re) == 1) for rho in rhos]
for rho in rhos:
constraints += ([(rho.re == rho.re.T)] + [(rho.im == (- rho.im.T))])
constraints += [(cvxpy.bmat([[rho.re, (- rho.im)], [rho.im, rho.re]]) >> 0) for rho in rhos]
return constraints |
class AnswersExportMixin():
def get_data(self):
self.project.catalog.prefetch_elements()
project_wrapper = ProjectWrapper(self.project, self.snapshot)
data = []
for question in project_wrapper.questions:
set_prefixes = view_tags.get_set_prefixes({}, question['attribute'], project=project_wrapper)
for set_prefix in set_prefixes:
set_indexes = view_tags.get_set_indexes({}, question['attribute'], set_prefix=set_prefix, project=project_wrapper)
for set_index in set_indexes:
values = view_tags.get_values({}, question['attribute'], set_prefix=set_prefix, set_index=set_index, project=project_wrapper)
labels = view_tags.get_labels({}, question, set_prefix=set_prefix, set_index=set_index, project=project_wrapper)
result = view_tags.check_element({}, question, set_prefix=set_prefix, set_index=set_index, project=project_wrapper)
if result:
data.append({'question': self.stringify(question['text']), 'set': ' '.join(labels), 'values': self.stringify_values(values)})
return data
def stringify_values(self, values):
if (values is not None):
return '; '.join([self.stringify(value['value_and_unit']) for value in values])
else:
return ''
def stringify(self, el):
if (el is None):
return ''
else:
return re.sub('\\s+', ' ', str(el)) |
class BallQuery(Function):
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
assert new_xyz.is_contiguous()
assert xyz.is_contiguous()
(B, N, _) = xyz.size()
npoint = new_xyz.size(1)
idx = torch.cuda.IntTensor(B, npoint, nsample).zero_()
pointnet2.ball_query_wrapper(B, N, npoint, radius, nsample, new_xyz, xyz, idx)
return idx
def backward(ctx, a=None):
return (None, None, None, None) |
class TorchXArgumentHelpFormatter(argparse.HelpFormatter):
def _get_help_string(self, action: argparse.Action) -> str:
help = (action.help or '')
if (action.default is argparse.SUPPRESS):
return help
if action.required:
help += ' (required)'
else:
help += f' (default: {action.default})'
return help |
class Crypt(CryptAbstract):
encryption_algorithm = 'SYMMETRIC_DEFAULT'
key_id = ''
def __init__(self, *args, **kwargs) -> None:
self._init_boto()
super().__init__(*args, **kwargs)
def encrypt(self, message: str) -> str:
ciphertext = self.client.encrypt(KeyId=self.config.key_id, Plaintext=bytes(message, encoding='UTF-8'))
return str(base64.b64encode(ciphertext['CiphertextBlob']), encoding='UTF-8')
def _init_boto(self) -> None:
check_package_exists('boto3')
boto3 = import_package('boto3')
boto3.set_stream_logger(name='botocore')
self.client = boto3.client('kms')
def _aws_decrypt(self, blob_text: bytes) -> str:
response = self.client.decrypt(CiphertextBlob=blob_text, KeyId=self.config.key_id, EncryptionAlgorithm=self.encryption_algorithm)
return str(response['Plaintext'], encoding='UTF-8')
def _parse_encrypted(self, encrypted: str) -> bytes:
blob_text = base64.b64decode(encrypted)
return blob_text
def decrypt(self, encrypted: str) -> str:
blob_text = self._parse_encrypted(encrypted)
decrypted = self._aws_decrypt(blob_text)
return decrypted |
def relaxation_frequency_nitrogen(pressure, temperature, h, reference_pressure=REFERENCE_PRESSURE, reference_temperature=REFERENCE_TEMPERATURE):
return (((pressure / reference_pressure) * ((temperature / reference_temperature) ** (- 0.5))) * (9.0 + ((280.0 * h) * np.exp(((- 4.17) * (((temperature / reference_temperature) ** ((- 1.0) / 3.0)) - 1.0)))))) |
def test_failing_command(tmp_path):
project_dir = (tmp_path / 'project')
test_projects.new_c_project().generate(project_dir)
with pytest.raises(subprocess.CalledProcessError):
utils.cibuildwheel_run(project_dir, add_env={'CIBW_BEFORE_BUILD': 'false', 'CIBW_BEFORE_BUILD_WINDOWS': 'exit /b 1'}) |
class Effect7080(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Large Precursor Weapon')), 'damageMultiplier', ship.getModifiedItemAttr('shipBonusPBS2'), skill='Precursor Battleship', **kwargs) |
(scope='function')
def sapm_dc_snl_ac_system(sapm_module_params, cec_inverter_parameters, sapm_temperature_cs5p_220m):
module = 'Canadian_Solar_CS5P_220M___2009_'
module_parameters = sapm_module_params.copy()
temp_model_params = sapm_temperature_cs5p_220m.copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module=module, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, inverter_parameters=cec_inverter_parameters)
return system |
def get_dependencies(argv: Optional[List[str]]=None) -> None:
parser = get_parser()
args = parser.parse_args(argv)
pkg_mgr = args.pkg_mgr.lower()
fn_names = {f"get_reqs_{pkg_mgr}{('_dev' if args.dev else '')}", f'get_reqs_{pkg_mgr}', f"get_reqs_{pkg_mgr}{('_torch' if args.torch else '')}", f"get_reqs_{pkg_mgr}{('_tf' if args.tensorflow else '')}"}
fn_args = {'cuda_version': args.cuda, 'tf_verson': args.tensorflow, 'torch_version': args.torch, 'onnx_version': args.onnx}
funcs = operator.attrgetter(*fn_names)(_CURRENT_MODULE)
if (not isinstance(funcs, Iterable)):
funcs = [funcs]
deps = functools.reduce((lambda acc, fn: (acc + fn(**fn_args))), funcs, [])
print('\n'.join(deps)) |
def get_tl_line_values(line, LTRB=True, withTranscription=False, withConfidence=False, imWidth=0, imHeight=0):
confidence = 0.0
transcription = ''
points = []
numPoints = 4
if LTRB:
numPoints = 4
if (withTranscription and withConfidence):
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*,(.*)$', line)
if (m == None):
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*,(.*)$', line)
raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription')
elif withConfidence:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*$', line)
if (m == None):
raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence')
elif withTranscription:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,(.*)$', line)
if (m == None):
raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription')
else:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,?\\s*$', line)
if (m == None):
raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax')
xmin = int(m.group(1))
ymin = int(m.group(2))
xmax = int(m.group(3))
ymax = int(m.group(4))
if (xmax < xmin):
raise Exception(('Xmax value (%s) not valid (Xmax < Xmin).' % xmax))
if (ymax < ymin):
raise Exception(('Ymax value (%s) not valid (Ymax < Ymin).' % ymax))
points = [float(m.group(i)) for i in range(1, (numPoints + 1))]
if ((imWidth > 0) and (imHeight > 0)):
validate_point_inside_bounds(xmin, ymin, imWidth, imHeight)
validate_point_inside_bounds(xmax, ymax, imWidth, imHeight)
else:
numPoints = 8
if (withTranscription and withConfidence):
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*,(.*)$', line)
if (m == None):
raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription')
elif withConfidence:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*$', line)
if (m == None):
raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence')
elif withTranscription:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,(.*)$', line)
if (m == None):
raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription')
else:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*$', line)
if (m == None):
raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4')
points = [float(m.group(i)) for i in range(1, (numPoints + 1))]
validate_clockwise_points(points)
if ((imWidth > 0) and (imHeight > 0)):
validate_point_inside_bounds(points[0], points[1], imWidth, imHeight)
validate_point_inside_bounds(points[2], points[3], imWidth, imHeight)
validate_point_inside_bounds(points[4], points[5], imWidth, imHeight)
validate_point_inside_bounds(points[6], points[7], imWidth, imHeight)
if withConfidence:
try:
confidence = float(m.group((numPoints + 1)))
except ValueError:
raise Exception('Confidence value must be a float')
if withTranscription:
posTranscription = (numPoints + (2 if withConfidence else 1))
transcription = m.group(posTranscription)
m2 = re.match('^\\s*\\"(.*)\\"\\s*$', transcription)
if (m2 != None):
transcription = m2.group(1).replace('\\\\', '\\').replace('\\"', '"')
return (points, confidence, transcription) |
def make_zone_file(zone, filename, serial, header, record_list):
try:
with open(filename, 'w') as f:
f.writelines(header)
run_command_with_code(('sed -i "s/pre_serial/%s/" %s' % (str(serial), filename)))
with open(filename, 'a') as f:
for item in record_list:
name = item['name'].replace(('.' + zone), '')
record = item['record']
if (item['type'] == 'CNAME'):
record = (record + '.')
ttl = 0
if ('ttl' in item):
ttl = int(item['ttl'])
if (ttl == 0):
f.write(('%s \t IN \t %s \t %s\n' % (name, item['type'], record)))
else:
f.write(('%s \t %d \t IN \t %s \t %s\n' % (name, int(ttl), item['type'], record)))
except Exception as e:
log.error(e)
raise UpdaterErr(('Create zone file failed: %s' % e)) |
def get_all_tags(skopeo: SkopeoMirror, mirror: RepoMirrorConfig) -> list[str]:
verbose_logs = (os.getenv('DEBUGLOG', 'false').lower() == 'true')
username = (mirror.external_registry_username.decrypt() if mirror.external_registry_username else None)
password = (mirror.external_registry_password.decrypt() if mirror.external_registry_password else None)
with database.CloseForLongOperation(app.config):
result = skopeo.tags(('docker://%s' % mirror.external_reference), username=username, password=password, verbose_logs=verbose_logs, verify_tls=mirror.external_registry_config.get('verify_tls', True), proxy=mirror.external_registry_config.get('proxy', {}))
if (not result.success):
raise RepoMirrorSkopeoException(('skopeo list-tags failed: %s' % _skopeo_inspect_failure(result)), result.stdout, result.stderr)
return result.tags |
def test_DecisionMatrixDominanceAccessor_bt():
dm = data.mkdm(matrix=[[10, 40], [20, 70]], objectives=[max, min], alternatives=['A0', 'A1'], criteria=['C0', 'C1'])
dom = dominance.DecisionMatrixDominanceAccessor(dm)
expected = pd.DataFrame([[0, 1], [1, 0]], index=['A0', 'A1'], columns=['A0', 'A1'])
expected.index.name = 'Better than'
expected.columns.name = 'Worse than'
pd.testing.assert_frame_equal(dom.bt(), expected)
pd.testing.assert_frame_equal(dm.dominance.bt(), expected) |
class TestRuntime(TestNameCheckVisitorBase):
_passes()
def test_overload(self):
from pyanalyze.extensions import deprecated, overload
('int support is deprecated')
def deprecated_overload(x: int) -> int:
...
def deprecated_overload(x: str) -> str:
...
def deprecated_overload(x):
return x
def capybara():
deprecated_overload(1)
deprecated_overload('x')
_passes()
def test_function(self):
from pyanalyze.extensions import deprecated
('no functioning capybaras')
def deprecated_function(x: int) -> int:
return x
def capybara():
print(deprecated_function)
deprecated_function(1)
_passes()
def test_method(self):
from pyanalyze.extensions import deprecated
class Cls():
('no methodical capybaras')
def deprecated_method(self, x: int) -> int:
return x
def capybara():
Cls().deprecated_method(1)
print(Cls.deprecated_method)
_passes()
def test_class(self):
from pyanalyze.extensions import deprecated
('no classy capybaras')
class DeprecatedClass():
pass
def capybara():
print(DeprecatedClass)
return DeprecatedClass() |
(cc=STDCALL, params={'lpTopLevelExceptionFilter': LPTOP_LEVEL_EXCEPTION_FILTER})
def hook_SetUnhandledExceptionFilter(ql: Qiling, address: int, params):
addr = params['lpTopLevelExceptionFilter']
handle = ql.os.handle_manager.search('TopLevelExceptionHandler')
if (handle is None):
handle = Handle(name='TopLevelExceptionHandler', obj=addr)
ql.os.handle_manager.append(handle)
prev_filter = 0
else:
prev_filter = handle.obj
handle.obj = addr
return prev_filter |
class BTOOLS_OT_materials_clear(bpy.types.Operator):
bl_idname = 'btools.materials_clear'
bl_label = 'Clear Empty Material Groups'
bl_options = {'REGISTER', 'UNDO'}
def poll(cls, context):
obj = context.object
return (obj and (obj.type == 'MESH'))
def execute(self, context):
clear_empty_matgroups(context)
return {'FINISHED'} |
.parametrize('tuf_prefix,server_hostname,namespace,repo,gun', [('quay.dev', 'quay.io', 'ns', 'repo', 'quay.dev/ns/repo'), (None, 'quay.io', 'ns', 'repo', 'quay.io/ns/repo'), ('quay.dev/', 'quay.io', 'ns', 'repo', 'quay.dev/ns/repo'), (None, 'quay.io/', 'ns', 'repo', 'quay.io/ns/repo'), (None, 'localhost:5000/', 'ns', 'repo', 'localhost:5000/ns/repo'), (None, 'localhost:5000', 'ns', 'repo', 'localhost:5000/ns/repo')])
def test_gun(tuf_prefix, server_hostname, namespace, repo, gun):
app = Flask(__name__)
app.config.from_object(testconfig.TestConfig())
app.config['TUF_GUN_PREFIX'] = tuf_prefix
app.config['SERVER_HOSTNAME'] = server_hostname
tuf_api = api.TUFMetadataAPI(app, app.config)
assert (gun == tuf_api._gun(namespace, repo)) |
class uvm_nonblocking_put_port(uvm_port_base):
def try_put(self, data):
try:
success = self.export.try_put(data)
return success
except AttributeError:
raise UVMTLMConnectionError(f'Missing or wrong export in {self.get_full_name()}. Did you connect it?')
def can_put(self):
try:
can_do_it = self.export.can_put()
return can_do_it
except AttributeError:
raise UVMTLMConnectionError(f'Missing or wrong export in {self.get_full_name()}. Did you connect it?') |
class QtileMigrate():
def __call__(self, args: argparse.Namespace) -> None:
if ('libcst' not in sys.modules):
print("libcst can't be found. Unable to migrate config file.")
print('Please install it and try again.')
sys.exit(1)
self.args = args
self.filter_migrations()
if self.args.list_migrations:
self.list_migrations()
return
elif self.args.info:
self.show_migration_info()
return
else:
self.run_migrations()
def filter_migrations(self) -> None:
load_migrations()
if self.args.run_migrations:
self.migrations = [m for m in MIGRATIONS if (m.ID in self.args.run_migrations)]
elif self.args.after_version:
self.migrations = [m for m in MIGRATIONS if (m.get_version() > self.args.after_version)]
else:
self.migrations = MIGRATIONS
if (not self.migrations):
sys.exit('No migrations found.')
def list_migrations(self) -> None:
width = (max((len(m.ID) for m in self.migrations)) + 4)
ordered = sorted(self.migrations, key=(lambda m: (m.get_version(), m.ID)))
print(f"ID{(' ' * (width - 2))}{'After Version':<15}Summary")
for m in ordered:
summary = m.show_summary().replace('``', "'")
print(f'{m.ID:<{width}}{m.AFTER_VERSION:^15}{summary}')
def show_migration_info(self) -> None:
migration_id = self.args.info
migration = [m for m in MIGRATIONS if (m.ID == migration_id)]
if (not migration):
print(f'Unknown migration: {migration_id}')
sys.exit(1)
print(f'{migration_id}:')
print(migration[0].show_help())
def get_source(self, path: str) -> libcst.metadata.MetadataWrapper:
module = libcst.parse_module(Path(path).read_text())
return libcst.metadata.MetadataWrapper(module)
def lint(self, path: str) -> None:
print(f'{path}:')
source = self.get_source(path)
lint_lines = []
for m in self.migrations:
migrator = m()
migrator.migrate(source)
lint_lines.extend(migrator.show_lint())
lint_lines.sort()
print('\n'.join(map(str, lint_lines)))
def migrate(self, path: str) -> bool:
source: (libcst.metadata.MetadataWrapper | libcst.Module) = self.get_source(path)
changed = False
for m in self.migrations:
migrator = m()
migrator.migrate(source)
diff = migrator.show_diff(self.args.no_colour)
if diff:
if (self.args.show_diff or (not self.args.yes)):
print(f'''{m.ID}: {m.show_summary()}
''')
print(f'''{diff}
''')
if self.args.yes:
assert (migrator.updated is not None)
source = libcst.metadata.MetadataWrapper(migrator.updated)
changed = True
else:
while ((a := input('Apply changes? (y)es, (n)o, (s)kip file, (q)uit. ').lower()) not in ('y', 'n', 's', 'q')):
print('Unexpected response. Try again.')
if (a == 'y'):
assert (migrator.updated is not None)
source = libcst.metadata.MetadataWrapper(migrator.updated)
changed = True
elif (a == 'n'):
assert (migrator.original is not None)
source = migrator.original
elif (a == 's'):
raise SkipFile
elif (a == 'q'):
raise AbortMigration
if (not changed):
return False
if (not self.args.yes):
while ((save := input(f'Save all changes to {path}? (y)es, (n)o. ').lower()) not in ('y', 'n')):
print('Unexpected response. Try again.')
do_save = (save == 'y')
else:
do_save = True
if do_save:
if isinstance(source, libcst.metadata.MetadataWrapper):
source = source.module
Path(f'{path}').write_text(source.code)
print('Saved!')
return True
else:
return False
def run_migrations(self) -> None:
backups = []
changed_files = []
aborted = False
for (py, backup) in file_and_backup(self.args.config):
if self.args.lint:
self.lint(py)
continue
else:
try:
shutil.copyfile(py, backup)
backups.append(backup)
changed = self.migrate(py)
if changed:
changed_files.append(py)
except SkipFile:
backups.remove(backup)
continue
except AbortMigration:
aborted = True
break
if aborted:
print('Migration aborted. Reverting changes.')
for f in changed_files:
shutil.copyfile((f + BACKUP_SUFFIX), f)
elif backups:
print('Finished. Backup files have not been deleted.') |
class TrainingSampler(Sampler):
def __init__(self, size: int, shuffle: bool=True, seed: Optional[int]=None):
self._size = size
assert (size > 0)
self._shuffle = shuffle
if (seed is None):
seed = comm.shared_random_seed()
self._seed = int(seed)
self._rank = comm.get_rank()
self._world_size = comm.get_world_size()
def __iter__(self):
start = self._rank
(yield from itertools.islice(self._infinite_indices(), start, None, self._world_size))
def _infinite_indices(self):
np.random.seed(self._seed)
while True:
if self._shuffle:
(yield from np.random.permutation(self._size))
else:
(yield from np.arange(self._size)) |
class GifReplyDataset(torch.utils.data.Dataset):
tokenizer = AutoTokenizer.from_pretrained('vinai/bertweet-base')
def __init__(self, dataset_path, metadata_path, train=True, test=False, dev_size=0.05, multiclass=False, max_seq_length=128, random_state=42, reuse_data=None, **kwargs):
self.train = train
self.test = test
self.multiclass = multiclass
self.max_seq_length = max_seq_length
if (not reuse_data):
(self.data, dataset_info) = load_dataset(dataset_path, metadata_path)
self.num_classes = dataset_info['n_labels']
self.label_to_id = dataset_info['label_to_id']
self.id_to_label = dataset_info['id_to_label']
tags_to_vector_func = partial(tags_to_vector, label_to_id=self.label_to_id, n_labels=self.num_classes)
_before = len(self.data)
self.data = self.data[self.data['tags'].apply((lambda x: (len(x) > 0)))]
_after = len(self.data)
print(f'Removed {(_after - _before)} entries with empty tags from dataset. Before {_before}, After {_after}')
self.data = self.data.assign(y_true=self.data['tags'].parallel_apply(tags_to_vector_func))
self.n_samples_per_class_overall = self.multilabel_class_stats(self.data)
if ('set' in self.data.columns):
self._train_df = self.data[(self.data['set'] == 'train')]
self._dev_df = self.data[(self.data['set'] == 'dev')]
print(f'dataset is already splited, using the provided split: train {len(self._train_df)}, dev {len(self._dev_df)}')
elif self.multiclass:
(self._train_df, self._dev_df) = train_test_split(self.data, test_size=dev_size, random_state=random_state, stratify=self.data['y_true'].to_list())
else:
X = self.data.to_numpy()
y = np.array(self.data['y_true'].to_list())
(X_train, y_train, X_dev, y_dev) = iterative_train_test_split(X, y, test_size=dev_size)
self._train_df = pd.DataFrame(data=X_train, columns=self.data.columns)
self._dev_df = pd.DataFrame(data=X_dev, columns=self.data.columns)
else:
self.num_classes = reuse_data.num_classes
self.label_to_id = reuse_data.label_to_id
self.id_to_label = reuse_data.id_to_label
self._train_df = reuse_data._train_df
self._dev_df = reuse_data._dev_df
self.n_samples_per_class_overall = reuse_data.n_samples_per_class_overall
if train:
self.data = self._train_df
else:
self.data = self._dev_df
self.data.reset_index(drop=True, inplace=True)
def __getitem__(self, index):
row = self.data.loc[index]
X = row['parent_text']
X = self.tokenizer.encode(X, max_length=self.max_seq_length, truncation=True)
X = torch.Tensor(X).long()
y_true = row['y_true']
if self.multiclass:
y_true = torch.Tensor(y_true).long()
else:
y_true = torch.Tensor(y_true).float()
if self.test:
return X
else:
return (X, y_true)
def __len__(self):
return len(self.data) |
_lr_scheduler('triangular')
class TriangularSchedule(LegacyFairseqLRScheduler):
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if (len(args.lr) > 1):
raise ValueError('Cannot use a fixed learning rate schedule with triangular. Consider --lr-scheduler=fixed instead.')
lr = args.lr[0]
assert (args.max_lr > lr), 'max_lr must be more than lr'
self.min_lr = lr
self.max_lr = args.max_lr
self.stepsize = (args.lr_period_updates // 2)
self.lr_shrink = args.lr_shrink
self.shrink_min = args.shrink_min
self.lr = self.min_lr
self.optimizer.set_lr(self.lr)
def add_args(parser):
parser.add_argument('--max-lr', required=True, type=float, metavar='LR', help='max learning rate, must be more than args.lr')
parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR', help='initial number of updates per period (cycle length)')
parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing')
parser.add_argument('--shrink-min', action='store_true', help='if set, also shrinks min lr')
def step(self, epoch, val_loss=None):
super().step(epoch, val_loss)
return self.optimizer.get_lr()
def step_update(self, num_updates):
cycle = math.floor((num_updates / (2 * self.stepsize)))
lr_shrink = (self.lr_shrink ** cycle)
max_lr = (self.max_lr * lr_shrink)
if self.shrink_min:
min_lr = (self.min_lr * lr_shrink)
else:
min_lr = self.min_lr
x = abs((((num_updates / self.stepsize) - (2 * (cycle + 1))) + 1))
self.lr = (min_lr + ((max_lr - min_lr) * max(0, (1 - x))))
self.optimizer.set_lr(self.lr)
return self.lr |
class TestUser1DSubMesh(TestCase):
def test_exceptions(self):
edges = np.array([0, 0.3, 1])
submesh_params = {'edges': edges}
mesh = pybamm.MeshGenerator(pybamm.UserSupplied1DSubMesh, submesh_params)
lims = {'x_n': {'min': 0, 'max': 1}}
npts = {'x_n': 10}
with self.assertRaises(pybamm.GeometryError):
mesh(lims, npts)
lims = {'x_n': {'min': 0.1, 'max': 1}}
npts = {'x_n': (len(edges) - 1)}
with self.assertRaises(pybamm.GeometryError):
mesh(lims, npts)
lims = {'x_n': {'min': 0, 'max': 10}}
npts = {'x_n': (len(edges) - 1)}
with self.assertRaises(pybamm.GeometryError):
mesh(lims, npts)
mesh = pybamm.MeshGenerator(pybamm.UserSupplied1DSubMesh)
with self.assertRaisesRegex(pybamm.GeometryError, 'User mesh requires'):
mesh(None, None)
def test_mesh_creation_no_parameters(self):
r = pybamm.SpatialVariable('r', domain=['negative particle'], coord_sys='spherical polar')
geometry = {'negative particle': {r: {'min': pybamm.Scalar(0), 'max': pybamm.Scalar(1)}}}
edges = np.array([0, 0.3, 1])
submesh_params = {'edges': edges}
submesh_types = {'negative particle': pybamm.MeshGenerator(pybamm.UserSupplied1DSubMesh, submesh_params)}
var_pts = {r: (len(edges) - 1)}
mesh = pybamm.Mesh(geometry, submesh_types, var_pts)
self.assertEqual(mesh['negative particle'].edges[0], 0)
self.assertEqual(mesh['negative particle'].edges[(- 1)], 1)
self.assertEqual(len(mesh['negative particle'].nodes), var_pts[r])
self.assertEqual(len(mesh['negative particle'].edges), (len(mesh['negative particle'].nodes) + 1)) |
def update_pkg_resources():
vendor = Path('pkg_resources/_vendor')
install(vendor)
rewrite_packaging((vendor / 'packaging'), 'pkg_resources.extern')
rewrite_jaraco_text((vendor / 'jaraco/text'), 'pkg_resources.extern')
rewrite_jaraco((vendor / 'jaraco'), 'pkg_resources.extern')
rewrite_importlib_resources((vendor / 'importlib_resources'), 'pkg_resources.extern')
rewrite_more_itertools((vendor / 'more_itertools'))
rewrite_platformdirs((vendor / 'platformdirs')) |
def extract_games(zip_filename, dst, force=False):
zipped_file = zipfile.ZipFile(zip_filename)
filenames_to_extract = [f for f in zipped_file.namelist() if (f.endswith('.z8') or f.endswith('.json'))]
subdirs = {'train': pjoin(dst, 'train'), 'valid': pjoin(dst, 'valid'), 'test': pjoin(dst, 'test')}
for d in subdirs.values():
if (not os.path.isdir(d)):
os.makedirs(d)
print('Extracting...')
extracted_files = []
for filename in tqdm.tqdm(filenames_to_extract):
subdir = subdirs[os.path.basename(os.path.dirname(filename))]
out_file = pjoin(subdir, os.path.basename(filename))
extracted_files.append(out_file)
if (os.path.isfile(out_file) and (not force)):
continue
data = zipped_file.read(filename)
with open(out_file, 'wb') as f:
f.write(data)
return extracted_files |
class SpectralVolume1DSubMesh(SubMesh1D):
def __init__(self, lims, npts, edges=None, order=2):
(spatial_var, spatial_lims, tabs) = self.read_lims(lims)
npts = npts[spatial_var.name]
if (edges is None):
edges = np.linspace(spatial_lims['min'], spatial_lims['max'], (npts + 1))
elif ((npts + 1) != len(edges)):
raise pybamm.GeometryError('User-suppled edges should have length (npts + 1) but has length {}. Number of points (npts) for domain {} is {}.'.format(len(edges), spatial_var.domain, npts))
if (edges[0] != spatial_lims['min']):
raise pybamm.GeometryError('First entry of edges is {}, but should be equal to {}\n for domain {}.'.format(edges[0], spatial_lims['min'], spatial_var.domain))
if (edges[(- 1)] != spatial_lims['max']):
raise pybamm.GeometryError('Last entry of edges is {}, but should be equal to {}\n for domain {}.'.format(edges[(- 1)], spatial_lims['max'], spatial_var.domain))
coord_sys = spatial_var.coord_sys
array = np.array([((((order + 1) - 1) - (2 * i)) / ((2 * (order + 1)) - 2)) for i in range((order + 1))])
cv_edges = np.array(([edges[0]] + [x for (a, b) in zip(edges[:(- 1)], edges[1:]) for x in np.flip((a + ((0.5 * (b - a)) * (1 + np.sin((np.pi * array))))))[1:]]))
self.sv_edges = edges
self.sv_nodes = ((edges[:(- 1)] + edges[1:]) / 2)
self.d_sv_edges = np.diff(self.sv_edges)
self.d_sv_nodes = np.diff(self.sv_nodes)
self.order = 2
super().__init__(cv_edges, coord_sys=coord_sys, tabs=tabs) |
class Topic(TimeStampedModel, models.Model):
CACHE_KEY = 'keyword-topic-mapping'
CACHE_TIMEOUT = (60 * 30)
name = models.CharField(max_length=255)
slug = models.SlugField(_('Slug'), max_length=200, unique=True)
selectable = models.BooleanField(default=False, help_text=_('Whether advertisers can select this region for new flights'))
def __str__(self):
return self.name
def load_from_cache(cls):
topics = caches[settings.CACHE_LOCAL_ALIAS].get(cls.CACHE_KEY)
if (not topics):
topics = cls._load_db()
return topics
def _load_db(cls):
topics = {}
for topic in Topic.objects.all().prefetch_related():
topics[topic.slug] = [kw.slug for kw in topic.keywords.all()]
caches[settings.CACHE_LOCAL_ALIAS].set(cls.CACHE_KEY, value=topics, timeout=cls.CACHE_TIMEOUT)
return topics |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.