code stringlengths 281 23.7M |
|---|
class MichaudTauFatigue(TauFatigue):
def dynamics_suffix() -> str:
return 'ma'
def fatigue_suffix() -> str:
return 'mf'
def __init__(self, minus: MichaudFatigue, plus: MichaudFatigue, state_only: bool=False, apply_to_joint_dynamics: bool=False, **kwargs):
super(MichaudTauFatigue, self).__init__(minus, plus, state_only=state_only, apply_to_joint_dynamics=apply_to_joint_dynamics, **kwargs) |
class BbxBlur(object):
def __init__(self, compress_ratio):
self.compress_ratio = compress_ratio
def __call__(self, img, bbx):
(original_width, original_height) = (img.width, img.height)
(compressed_width, compressed_height) = (int((original_width / math.sqrt(self.compress_ratio))), int((original_height / math.sqrt(self.compress_ratio))))
img_resized = img.resize((compressed_width, compressed_height), Image.NEAREST).resize((original_width, original_height), Image.NEAREST)
img_resized.paste(img.crop((bbx[2], bbx[0], bbx[3], bbx[1])), (bbx[2], bbx[0], bbx[3], bbx[1]))
bbx_rate = ((((bbx[1] - bbx[0]) * (bbx[3] - bbx[2])) / original_width) / original_height)
compress_rate = (bbx_rate + ((1.0 - bbx_rate) / self.compress_ratio))
return (img_resized, compress_rate) |
def parsed_sql_has_superlative(sql_query_parsed_from_spider, schema):
if ((sql_query_parsed_from_spider.get('limit') == 1) and sql_query_parsed_from_spider.get('orderBy')):
return True
for cond in sql_query_parsed_from_spider['where']:
if (isinstance(cond, tuple) and (WHERE_OPS[cond[1]] == '=') and isinstance(cond[3], dict)):
if parsed_sql_has_superlative(cond[3], schema):
return True
subquery_select_list = cond[3]['select'][1]
for item in subquery_select_list:
if (AGG_OPS[item[0]] in ['min', 'max']):
return True
return False |
class Discrete(Space):
def __init__(self, n):
self._n = n
def n(self):
return self._n
def sample(self):
return np.random.randint(self.n)
def contains(self, x):
x = np.asarray(x)
return ((x.shape == ()) and (x.dtype.kind == 'i') and (x >= 0) and (x < self.n))
def __repr__(self):
return ('Discrete(%d)' % self.n)
def __eq__(self, other):
return (self.n == other.n)
def flatten(self, x):
return special.to_onehot(x, self.n)
def unflatten(self, x):
return special.from_onehot(x)
def flatten_n(self, x):
return special.to_onehot_n(x, self.n)
def unflatten_n(self, x):
return special.from_onehot_n(x)
def flat_dim(self):
return self.n
def weighted_sample(self, weights):
return special.weighted_sample(weights, range(self.n))
def default_value(self):
return 0
def new_tensor_variable(self, name, extra_dims):
if (self.n <= (2 ** 8)):
return ext.new_tensor(name=name, ndim=(extra_dims + 1), dtype='uint8')
elif (self.n <= (2 ** 16)):
return ext.new_tensor(name=name, ndim=(extra_dims + 1), dtype='uint16')
else:
return ext.new_tensor(name=name, ndim=(extra_dims + 1), dtype='uint32')
def __eq__(self, other):
if (not isinstance(other, Discrete)):
return False
return (self.n == other.n)
def __hash__(self):
return hash(self.n) |
def execute_with_timeout(fn, args=None, kwargs=None, timeout=None, fail_if_no_timer=True, signal_type=_DEFAULT_SIGNAL_TYPE, timer_type=_DEFAULT_TIMER_TYPE, timeout_exception_cls=TimeoutError):
if (args is None):
args = empty_tuple
if (kwargs is None):
kwargs = empty_dict
if ((timeout is None) or (timeout == 0) or (signal_type is None) or (timer_type is None)):
return fn(*args, **kwargs)
def signal_handler(signum, frame):
raise timeout_exception_cls(inspection.get_function_call_str(fn, args, kwargs))
old_signal_handler = none
timer_is_set = False
try:
try:
old_signal_handler = signal.signal(signal_type, signal_handler)
signal.setitimer(timer_type, timeout)
timer_is_set = True
except ValueError:
if fail_if_no_timer:
raise NotSupportedError('Timer is not available; the code is probably invoked from outside the main thread.')
return fn(*args, **kwargs)
finally:
if timer_is_set:
signal.setitimer(timer_type, 0)
if (old_signal_handler is not none):
signal.signal(signal_type, old_signal_handler) |
class ProjectIssueResourceWeightEventManager(RetrieveMixin, RESTManager):
_path = '/projects/{project_id}/issues/{issue_iid}/resource_weight_events'
_obj_cls = ProjectIssueResourceWeightEvent
_from_parent_attrs = {'project_id': 'project_id', 'issue_iid': 'iid'}
def get(self, id: Union[(str, int)], lazy: bool=False, **kwargs: Any) -> ProjectIssueResourceWeightEvent:
return cast(ProjectIssueResourceWeightEvent, super().get(id=id, lazy=lazy, **kwargs)) |
class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):
def __init__(self, is_train=True, *, augmentations, image_format, ignore_label, size_divisibility):
super().__init__(is_train, augmentations=augmentations, image_format=image_format, ignore_label=ignore_label, size_divisibility=size_divisibility)
def __call__(self, dataset_dict):
assert self.is_train, 'MaskFormerPanopticDatasetMapper should only be used for training!'
dataset_dict = copy.deepcopy(dataset_dict)
image = utils.read_image(dataset_dict['file_name'], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if ('sem_seg_file_name' in dataset_dict):
sem_seg_gt = utils.read_image(dataset_dict.pop('sem_seg_file_name')).astype('double')
else:
sem_seg_gt = None
if ('pan_seg_file_name' in dataset_dict):
pan_seg_gt = utils.read_image(dataset_dict.pop('pan_seg_file_name'), 'RGB')
segments_info = dataset_dict['segments_info']
else:
pan_seg_gt = None
segments_info = None
if (pan_seg_gt is None):
raise ValueError("Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.".format(dataset_dict['file_name']))
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
(aug_input, transforms) = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
if (sem_seg_gt is not None):
sem_seg_gt = aug_input.sem_seg
pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)
from panopticapi.utils import rgb2id
pan_seg_gt = rgb2id(pan_seg_gt)
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if (sem_seg_gt is not None):
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype('long'))
pan_seg_gt = torch.as_tensor(pan_seg_gt.astype('long'))
if (self.size_divisibility > 0):
image_size = (image.shape[(- 2)], image.shape[(- 1)])
padding_size = [0, (self.size_divisibility - image_size[1]), 0, (self.size_divisibility - image_size[0])]
image = F.pad(image, padding_size, value=128).contiguous()
if (sem_seg_gt is not None):
sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()
pan_seg_gt = F.pad(pan_seg_gt, padding_size, value=0).contiguous()
image_shape = (image.shape[(- 2)], image.shape[(- 1)])
dataset_dict['image'] = image
if (sem_seg_gt is not None):
dataset_dict['sem_seg'] = sem_seg_gt.long()
if ('annotations' in dataset_dict):
raise ValueError("Pemantic segmentation dataset should not have 'annotations'.")
pan_seg_gt = pan_seg_gt.numpy()
instances = Instances(image_shape)
classes = []
masks = []
for segment_info in segments_info:
class_id = segment_info['category_id']
if (not segment_info['iscrowd']):
classes.append(class_id)
masks.append((pan_seg_gt == segment_info['id']))
classes = np.array(classes)
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
if (len(masks) == 0):
instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[(- 2)], pan_seg_gt.shape[(- 1)]))
else:
masks = BitMasks(torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks]))
instances.gt_masks = masks.tensor
dataset_dict['instances'] = instances
return dataset_dict |
class TestEntropy(unittest.TestCase):
def test_petrosian_fd(self):
pfd = petrosian_fd(RANDOM_TS)
petrosian_fd(list(RANDOM_TS))
self.assertEqual(np.round(pfd, 3), 1.03)
assert_equal(aal(petrosian_fd, axis=1, arr=data), petrosian_fd(data))
assert_equal(aal(petrosian_fd, axis=0, arr=data), petrosian_fd(data, axis=0))
def test_katz_fd(self):
x_k = [0.0, 0.0, 2.0, (- 2.0), 0.0, (- 1.0), (- 1.0), 0.0]
self.assertEqual(np.round(katz_fd(x_k), 3), 5.783)
assert_equal(aal(katz_fd, axis=1, arr=data), katz_fd(data))
assert_equal(aal(katz_fd, axis=0, arr=data), katz_fd(data, axis=0))
def test_higuchi_fd(self):
self.assertEqual(np.round(higuchi_fd(RANDOM_TS), 8), 1.9914198)
higuchi_fd(list(RANDOM_TS), kmax=20)
def test_detrended_fluctuation(self):
self.assertEqual(np.round(detrended_fluctuation(RANDOM_TS), 4), 0.4976)
self.assertEqual(np.round(detrended_fluctuation(PURE_SINE), 4), 1.5848) |
def context_decorator(ctx, func):
assert (not (callable(ctx) and hasattr(ctx, '__enter__'))), f'Passed in {ctx} is both callable and also a valid context manager (has __enter__), making it ambiguous which interface to use. If you intended to pass a context manager factory, rewrite your call as context_decorator(lambda: ctx()); if you intended to pass a context manager directly, rewrite your call as context_decorator(lambda: ctx)'
if (not callable(ctx)):
def ctx_factory():
return ctx
else:
ctx_factory = ctx
if inspect.isclass(func):
raise RuntimeError('Cannot decorate classes; it is ambiguous whether or not only the constructor or all methods should have the context manager applied; additionally, decorating a class at definition-site will prevent use of the identifier as a conventional type. To specify which methods to decorate, decorate each of them individually.')
if inspect.isgeneratorfunction(func):
return _wrap_generator(ctx_factory, func)
(func)
def decorate_context(*args, **kwargs):
with ctx_factory():
return func(*args, **kwargs)
return decorate_context |
class SymbolBase():
name = None
def __init__(self):
self.value = None
self.first = None
self.second = None
self.third = None
def nud(self, parser):
raise SyntaxError(('Syntax error (%r).' % self.name))
def led(self, left, parser):
raise SyntaxError(('Unknown operator (%r).' % self.name))
def evaluate(self, pyobject):
raise NotImplementedError(self.name, self)
def __repr__(self):
if (self.name == '(name)'):
return f'({self.name[1:(- 1)]} {self.value})'
out = [repr(self.name), self.first, self.second, self.third]
out = [str(i) for i in out if i]
return (('(' + ' '.join(out)) + ')') |
class XOSLExchangeCalendar(TradingCalendar):
name = 'XOSL'
tz = timezone('Europe/Oslo')
open_times = ((None, time(9, 1)),)
close_times = ((None, time(16, 20)),)
regular_early_close = time(13)
def regular_holidays(self):
return HolidayCalendar([NewYearsDay, MaundyThursday, GoodFriday, EasterMonday, LabourDay, AscensionDay, ConstitutionDay, WhitMonday, ChristmasEve, Christmas, BoxingDay, NewYearsEve])
def special_closes(self):
return [(self.regular_early_close, HolidayCalendar([HolyWednesday]))] |
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure(dir=args.save_dir)
logger.log('creating model and diffusion...')
(model, diffusion) = create_model_and_diffusion(image_size=args.img_size, dataset=args.dataset, **args_to_dict(args, model_and_diffusion_defaults().keys()))
model.load_state_dict(dist_util.load_state_dict(args.model_path, map_location='cpu'))
model.to(dist_util.dev())
if args.use_fp16:
model.convert_to_fp16()
model.eval()
logger.log('loading classifier...')
classifier = create_classifier(image_size=args.img_size, **args_to_dict(args, classifier_defaults().keys()))
classifier.load_state_dict(dist_util.load_state_dict(os.path.join(args.save_dir, 'model.pt'), map_location='cpu'))
classifier.to(dist_util.dev())
if args.classifier_use_fp16:
classifier.convert_to_fp16()
classifier.eval()
def cond_fn(x, t, y=None):
assert (y is not None)
with th.enable_grad():
x_in = x.detach().requires_grad_(True)
logits = classifier(x_in, t)
log_probs = F.log_softmax(logits, dim=(- 1))
selected = log_probs[(range(len(logits)), y.view((- 1)))]
return (th.autograd.grad(selected.sum(), x_in)[0] * args.classifier_scale)
def model_fn(x, t, y=None):
assert (y is not None)
return model(x, t, (y if args.class_cond else None))
logger.log('sampling...')
all_images = []
all_labels = []
while ((len(all_images) * args.batch_size) < args.num_samples):
model_kwargs = {}
classes = th.randint(low=0, high=2, size=(args.batch_size,), device=dist_util.dev())
model_kwargs['y'] = classes
sample_fn = (diffusion.p_sample_loop if (not args.use_ddim) else diffusion.ddim_sample_loop)
sample = sample_fn(model_fn, (args.batch_size, 3, args.img_size, args.img_size), clip_denoised=args.clip_denoised, model_kwargs=model_kwargs, cond_fn=(cond_fn if args.guided else None), device=dist_util.dev())
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
gathered_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_samples, sample)
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
gathered_labels = [th.zeros_like(classes) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_labels, classes)
all_labels.extend([labels.cpu().numpy() for labels in gathered_labels])
logger.log(f'created {(len(all_images) * args.batch_size)} samples')
arr = np.concatenate(all_images, axis=0)
arr = arr[:args.num_samples]
label_arr = np.concatenate(all_labels, axis=0)
label_arr = label_arr[:args.num_samples]
if (dist.get_rank() == 0):
shape_str = 'x'.join([str(x) for x in arr.shape])
out_path = os.path.join(logger.get_dir(), f'samples_{shape_str}.npz')
logger.log(f'saving to {out_path}')
np.savez(out_path, arr, label_arr)
dist.barrier()
logger.log('sampling complete') |
class Vgg16Net(nn.Module):
def __init__(self, requires_grad=False, gpu_ids=[]):
super(Vgg16Net, self).__init__()
self.gpu_ids = gpu_ids
model = [Vgg16(requires_grad=requires_grad)]
self.model = nn.Sequential(*model)
def forward(self, input):
if (self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor)):
output = nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
output = self.model(input)
return output |
_funcify.register(ExtractDiag)
def jax_funcify_ExtractDiag(op, **kwargs):
offset = op.offset
axis1 = op.axis1
axis2 = op.axis2
def extract_diag(x, offset=offset, axis1=axis1, axis2=axis2):
return jnp.diagonal(x, offset=offset, axis1=axis1, axis2=axis2)
return extract_diag |
def test_swap():
(swap_circuit, _) = SwapTest(n=5).as_composite_bloq().to_cirq_circuit(x=cirq.LineQubit.range(5), y=cirq.LineQubit.range(100, 105), qubit_manager=cirq.ops.SimpleQubitManager())
op = next(swap_circuit.all_operations())
swap_decomp_circuit = cirq.Circuit(cirq.decompose_once(op))
should_be = cirq.Circuit([cirq.Moment(cirq.SWAP(cirq.LineQubit(0), cirq.LineQubit(100)), cirq.SWAP(cirq.LineQubit(1), cirq.LineQubit(101)), cirq.SWAP(cirq.LineQubit(2), cirq.LineQubit(102)), cirq.SWAP(cirq.LineQubit(3), cirq.LineQubit(103)), cirq.SWAP(cirq.LineQubit(4), cirq.LineQubit(104)))])
assert (swap_decomp_circuit == should_be) |
class DiagGaussian(nn.Module):
def __init__(self, latent_dim, output_dim, unbounded=False, conditioned_sigma=False, max_mu=1.0, sigma_min=(- 20), sigma_max=2):
super().__init__()
self.mu = nn.Linear(latent_dim, output_dim)
self._c_sigma = conditioned_sigma
if conditioned_sigma:
self.sigma = nn.Linear(latent_dim, output_dim)
else:
self.sigma_param = nn.Parameter(torch.zeros(output_dim, 1))
self._unbounded = unbounded
self._max = max_mu
self._sigma_min = sigma_min
self._sigma_max = sigma_max
def forward(self, logits):
mu = self.mu(logits)
if (not self._unbounded):
mu = (self._max * torch.tanh(mu))
if self._c_sigma:
sigma = torch.clamp(self.sigma(logits), min=self._sigma_min, max=self._sigma_max).exp()
else:
shape = ([1] * len(mu.shape))
shape[1] = (- 1)
sigma = (self.sigma_param.view(shape) + torch.zeros_like(mu)).exp()
return NormalWrapper(mu, sigma) |
class WorkspaceMixin(abc.ABC, Generic[T]):
def __init__(self, *args: object, **kwargs: object) -> None:
super().__init__(*args, **kwargs)
def workspace_opts(self) -> runopts:
return runopts()
def build_workspace_and_update_role(self, role: Role, workspace: str, cfg: Mapping[(str, CfgVal)]) -> None:
...
def dryrun_push_images(self, app: AppDef, cfg: Mapping[(str, CfgVal)]) -> T:
raise NotImplementedError('dryrun_push is not implemented')
def push_images(self, images_to_push: T) -> None:
raise NotImplementedError('push is not implemented') |
def test_asyncio_mark_respects_parametrized_loop_policies(pytester: pytest.Pytester):
pytester.makepyfile(dedent(' import asyncio\n\n import pytest\n\n (\n scope="class",\n params=[\n asyncio.DefaultEventLoopPolicy(),\n asyncio.DefaultEventLoopPolicy(),\n ]\n )\n def event_loop_policy(request):\n return request.param\n\n .asyncio(scope="class")\n class TestWithDifferentLoopPolicies:\n async def test_parametrized_loop(self, request):\n pass\n '))
result = pytester.runpytest_subprocess('--asyncio-mode=strict')
result.assert_outcomes(passed=2) |
def make_segs(seqs, lens, labs, talabs, seg_len, seg_shift, rand_seg):
segs = []
nsegs = []
for (seq, l, lab, talab) in zip(seqs, lens, labs, talabs):
nseg = (((l - seg_len) // seg_shift) + 1)
nsegs.append(nseg)
if rand_seg:
starts = np.random.choice(xrange(((l - seg_len) + 1)), nseg)
else:
starts = (np.arange(nseg) * seg_shift)
for start in starts:
end = (start + seg_len)
seg_talab = [s.center_lab(start, end) for s in talab]
segs.append(Segment(seq, start, end, lab, seg_talab))
return (segs, nsegs) |
class SpatialSoftmax(torch.nn.Module):
def __init__(self, height, width, channel, temperature=None, data_format='NCHW'):
super(SpatialSoftmax, self).__init__()
self.data_format = data_format
self.height = height
self.width = width
self.channel = channel
if temperature:
self.temperature = Parameter((torch.ones(1) * temperature))
else:
self.temperature = 1.0
(pos_x, pos_y) = np.meshgrid(np.linspace((- 1.0), 1.0, self.height), np.linspace((- 1.0), 1.0, self.width))
self.pos_x = torch.from_numpy(pos_x.reshape((self.height * self.width))).float()
self.pos_y = torch.from_numpy(pos_y.reshape((self.height * self.width))).float()
self.register_buffer('_pos_x', self.pos_x)
self.register_buffer('_pos_y', self.pos_y)
def forward(self, feature):
if (self.data_format == 'NHWC'):
feature = feature.transpose(1, 3).tranpose(2, 3).view((- 1), (self.height * self.width))
else:
feature = feature.reshape((- 1), (self.height * self.width))
softmax_attention = F.softmax((feature / self.temperature), dim=(- 1))
expected_x = torch.sum((Variable(self._pos_x) * softmax_attention), dim=1, keepdim=True)
expected_y = torch.sum((Variable(self._pos_y) * softmax_attention), dim=1, keepdim=True)
expected_xy = torch.cat([expected_x, expected_y], 1)
feature_keypoints = expected_xy.view((- 1), (self.channel * 2))
return feature_keypoints |
class IBKRBorrowFeesSlippageTestCase(unittest.TestCase):
('moonshot.slippage.borrowfee.get_ibkr_borrow_fees_reindexed_like')
def test_borrow_fees_slippage(self, mock_get_ibkr_borrow_fees_reindexed_like):
positions = pd.DataFrame({'FI12345': [0.1, 0, (- 0.2), (- 0.2), (- 0.1), 0.5, (- 0.25)], 'FI23456': [(- 0.17), 0.32, 0.23, 0, (- 0.4), (- 0.4), (- 0.4)]}, index=pd.DatetimeIndex(['2018-06-01', '2018-06-02', '2018-06-03', '2018-06-04', '2018-06-05', '2018-06-08', '2018-06-09']))
borrow_fee_rates = pd.DataFrame({'FI12345': [1.75, 1.75, 1.75, 1.85, 1.85, 1.85, 1.2], 'FI23456': [8.0, 8.0, 8.23, 8.5, 0.25, 0.25, 0.25]}, index=pd.DatetimeIndex(['2018-06-01', '2018-06-02', '2018-06-03', '2018-06-04', '2018-06-05', '2018-06-08', '2018-06-09']))
mock_get_ibkr_borrow_fees_reindexed_like.return_value = borrow_fee_rates
turnover = prices = None
fees = IBKRBorrowFees().get_slippage(turnover, positions, prices)
mock_get_ibkr_borrow_fees_reindexed_like.assert_called_with(positions)
fees.index.name = 'Date'
fees.index = fees.index.strftime('%Y-%m-%d')
fees = fees.to_dict(orient='dict')
self.assertAlmostEqual(fees['FI12345']['2018-06-01'], 0)
self.assertAlmostEqual(fees['FI12345']['2018-06-02'], 0)
self.assertAlmostEqual(fees['FI12345']['2018-06-03'], 9.917e-06, 9)
self.assertAlmostEqual(fees['FI12345']['2018-06-04'], 1.0483e-05, 9)
self.assertAlmostEqual(fees['FI12345']['2018-06-05'], 5.2417e-06, 9)
self.assertAlmostEqual(fees['FI12345']['2018-06-08'], 0)
self.assertAlmostEqual(fees['FI12345']['2018-06-09'], 8.5e-06, 9)
self.assertAlmostEqual(fees['FI23456']['2018-06-01'], 3.853e-05, 8)
self.assertAlmostEqual(fees['FI23456']['2018-06-02'], 0)
self.assertAlmostEqual(fees['FI23456']['2018-06-03'], 0)
self.assertAlmostEqual(fees['FI23456']['2018-06-04'], 0)
self.assertAlmostEqual(fees['FI23456']['2018-06-05'], 2.833e-06, 9)
self.assertAlmostEqual(fees['FI23456']['2018-06-08'], 8.5e-06)
self.assertAlmostEqual(fees['FI23456']['2018-06-09'], 2.833e-06, 9) |
class TestOpenFont(EndianTest):
def setUp(self):
self.req_args_0 = {'fid': , 'name': 'foofont'}
self.req_bin_0 = b'-\x00\x00\x056&\x1b\xf5\x00\x07\x00\x00foofont\x00'
def testPackRequest0(self):
bin = request.OpenFont._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.OpenFont._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
class PPMConcat(nn.ModuleList):
def __init__(self, pool_scales=(1, 3, 6, 8)):
super(PPMConcat, self).__init__([nn.AdaptiveAvgPool2d(pool_scale) for pool_scale in pool_scales])
def forward(self, feats):
ppm_outs = []
for ppm in self:
ppm_out = ppm(feats)
ppm_outs.append(ppm_out.view(*feats.shape[:2], (- 1)))
concat_outs = torch.cat(ppm_outs, dim=2)
return concat_outs |
def test_validate_well_structured_non_term_meas():
(q0, q1) = cirq.LineQubit.range(2)
circuit = cirq.Circuit([cirq.Moment([cirq.PhasedXPowGate(phase_exponent=0).on(q0)]), cirq.Moment([cirq.PhasedXPowGate(phase_exponent=0.5).on(q0)]), cirq.measure(q0, q1, key='z'), cirq.Moment([cg.SYC(q0, q1)])])
with pytest.raises(BadlyStructuredCircuitError) as e:
validate_well_structured(circuit)
assert e.match('Measurements must be terminal') |
class Effect8227(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Mining')), 'miningAmount', ship.getModifiedItemAttr('miningBargeBonusOreMiningYield'), skill='Mining Barge', **kwargs) |
def find_vertical_start(horizontal_start, num_sides):
horizontal_angles = []
vertical_start = (horizontal_start + 90)
for i in range(1, num_sides):
angle = ((horizontal_start + 90) + ((i * 360.0) / num_sides))
if (angle >= 270):
vertical_start = (angle - 360)
break
return vertical_start |
def Inference(loader, test_loader, model, global_step):
print('Starting Inference...')
start_time = time.time()
model.eval()
candidates = {}
references = {}
cand_lists = []
ref_lists = []
with torch.no_grad():
for (bi, batch) in enumerate(loader):
(img1, img2, gts, ImgID) = batch
ids = model.greedy(img1, img2).data.tolist()
for i in range(len(img1.data)):
id = ids[i]
sentences = transform(id)
if ((bi == 0) and (i < 3)):
print(' '.join(sentences))
candidates[ImgID[i]] = [' '.join(sentences)]
references[ImgID[i]] = gts[i]
cand_lists.append(' '.join(sentences))
ref_lists.append(gts[i])
evaluator = Evaluator(references, candidates)
score = evaluator.evaluate()
print(args.main_metric, score[args.main_metric])
print('evaluting time:', (time.time() - start_time))
with open(os.path.join(val_sent_path, 'iter_{}.json'.format(global_step)), 'w', encoding='utf-8') as fout:
for ImgID in candidates.keys():
sample = {'ImgId': ImgID, 'candidates': candidates[ImgID]}
jterm = json.dumps(sample, ensure_ascii=False)
fout.write((jterm + '\n'))
candidates = {}
references = {}
cand_lists = []
ref_lists = []
with torch.no_grad():
for (bi, batch) in enumerate(test_loader):
(img1, img2, gts, ImgID) = batch
ids = model.greedy(img1, img2).data.tolist()
for i in range(len(img1.data)):
id = ids[i]
sentences = transform(id)
candidates[ImgID[i]] = [' '.join(sentences)]
references[ImgID[i]] = gts[i]
cand_lists.append(' '.join(sentences))
ref_lists.append(gts[i])
test_score = {}
evaluator = Evaluator(references, candidates)
test_score = evaluator.evaluate()
for key in test_score.keys():
score[('test_' + key)] = test_score[key]
return score |
class NetworkImageNet(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkImageNet, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self.drop_path_prob = 0
self.stem0 = nn.Sequential(nn.Conv2d(3, (C // 2), kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d((C // 2)), nn.ReLU(inplace=True), nn.Conv2d((C // 2), C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C))
self.stem1 = nn.Sequential(nn.ReLU(inplace=True), nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C))
(C_prev_prev, C_prev, C_curr) = (C, C, C)
self.cells = nn.ModuleList()
reduction_prev = True
for i in xrange(layers):
if (i in [(layers // 3), ((2 * layers) // 3)]):
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
(C_prev_prev, C_prev) = (C_prev, (cell.multiplier * C_curr))
if (i == ((2 * layers) // 3)):
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = self.stem0(input)
s1 = self.stem1(s0)
for (i, cell) in enumerate(self.cells):
(s0, s1) = (s1, cell(s0, s1, self.drop_path_prob))
if (i == ((2 * self._layers) // 3)):
if (self._auxiliary and self.training):
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), (- 1)))
return (logits, logits_aux) |
class BeatServer(StatelessServer):
def __init__(self, application, channel_layer, beat_config, max_applications=1000):
super().__init__(application, max_applications)
self.channel_layer = channel_layer
if (self.channel_layer is None):
raise ValueError('Channel layer is not valid')
self.beat_config = beat_config
async def handle(self):
listeners = []
for key in self.beat_config.keys():
listeners.append(asyncio.ensure_future(self.listener(key)))
emitters = []
for (key, value) in self.beat_config.items():
if isinstance(value, (list, tuple)):
for v in value:
emitters.append(asyncio.ensure_future(self.emitters(key, v)))
else:
emitters.append(asyncio.ensure_future(self.emitters(key, value)))
(await asyncio.wait(emitters))
(await asyncio.wait(listeners))
async def emitters(self, key, value):
while True:
schedule = value['schedule']
if isinstance(schedule, timedelta):
sleep_seconds = schedule.total_seconds()
else:
sleep_seconds = (croniter(schedule).next() - time.time())
(await self.channel_layer.send(key, {'type': value['type'], 'message': value['message']}))
(await asyncio.sleep(sleep_seconds))
async def listener(self, channel):
while True:
message = (await self.channel_layer.receive(channel))
if (not message.get('type', None)):
raise ValueError('Worker received message with no type.')
scope = {'type': 'channel', 'channel': channel}
instance_queue = self.get_or_create_application_instance(channel, scope)
(await instance_queue.put(message)) |
def test_comprehension():
d_comp = dict(((str((k * k)), ([v] * (1 << 17))) for (v, k) in enumerate(range(99, 111))))
l_comp = [([i] * (i << 9)) for i in range(99)]
del l_comp
del d_comp
def hh(x=1):
s_comp = set(((('Z',) * (k << 13)) for k in range(x, (19 + (2 * x)))))
return s_comp
val = [range(1, 4), max(1, 4), (42 + len(hh()))]
val = (hh() | hh(4))
val.add(40)
l1_comp = [([(1, i)] * (i << 9)) for i in range(99)]
l2_comp = [([(3, i)] * (i << 9)) for i in range(99)]
return val |
def _get_interpreters_posix():
found = []
def isPathValidPythonExe(filename):
fname = os.path.split(filename)[1]
return (fname.startswith(('python', 'pypy')) and (not fname.count('config')) and (len(fname) < 16) and os.path.isfile(filename))
for searchpath in ['/usr/bin', '/usr/local/bin', '/opt/local/bin']:
searchpath = os.path.expanduser(searchpath)
try:
files = os.listdir(searchpath)
except Exception:
continue
for fname in files:
filename = os.path.join(searchpath, fname)
filename = os.path.realpath(filename)
if isPathValidPythonExe(filename):
found.append(filename)
for rootname in ['~', '/usr/local']:
rootname = os.path.expanduser(rootname)
if (not os.path.isdir(rootname)):
continue
for dname in os.listdir(rootname):
if dname.lower().startswith(('python', 'pypy', 'miniconda', 'anaconda')):
for fname in ('bin/python', 'bin/pypy'):
exename = os.path.join(rootname, dname, fname)
if os.path.isfile(exename):
found.append(exename)
if ('PYZO_DEFAULT_SHELL_PYTHON_EXE' in os.environ):
filename = os.environ['PYZO_DEFAULT_SHELL_PYTHON_EXE']
filename = os.path.realpath(filename)
if isPathValidPythonExe(filename):
found.append(filename)
found = set(found)
for path in list(found):
if (path.endswith(('m', 'w')) and (path[:(- 1)] in found)):
found.discard(path)
return set(found) |
class FillPoly(rq.Request):
_request = rq.Struct(rq.Opcode(69), rq.Pad(1), rq.RequestLength(), rq.Drawable('drawable'), rq.GC('gc'), rq.Set('shape', 1, (X.Complex, X.Nonconvex, X.Convex)), rq.Set('coord_mode', 1, (X.CoordModeOrigin, X.CoordModePrevious)), rq.Pad(2), rq.List('points', structs.Point)) |
def compute_ne(ce_sum: torch.Tensor, weighted_num_samples: torch.Tensor, pos_labels: torch.Tensor, neg_labels: torch.Tensor, num_groups: int, eta: float) -> torch.Tensor:
result_ne = torch.zeros(num_groups)
for group in range(num_groups):
mean_label = (pos_labels[group] / weighted_num_samples[group])
ce_norm = _compute_cross_entropy_norm(mean_label, pos_labels[group], neg_labels[group], eta)
ne = (ce_sum[group] / ce_norm)
result_ne[group] = ne
return result_ne |
def _unwrap_value_from_subclass(result: Value, ctx: AttrContext) -> Value:
if ((not isinstance(result, KnownValue)) or ctx.skip_unwrap):
return result
cls_val = result.val
if (qcore.inspection.is_classmethod(cls_val) or inspect.ismethod(cls_val) or inspect.isfunction(cls_val) or isinstance(cls_val, (MethodDescriptorType, SlotWrapperType)) or (_static_hasattr(cls_val, 'decorator') and _static_hasattr(cls_val, 'instance') and (not isinstance(cls_val.instance, type))) or asynq.is_async_fn(cls_val)):
return KnownValue(cls_val)
elif _static_hasattr(cls_val, '__get__'):
return AnyValue(AnySource.inference)
elif TreatClassAttributeAsAny.should_treat_as_any(cls_val, ctx.options):
return AnyValue(AnySource.error)
else:
transformed = ClassAttributeTransformer.transform_attribute(cls_val, ctx.options)
if (transformed is not None):
return transformed
return KnownValue(cls_val) |
class Migration(migrations.Migration):
dependencies = [('conditions', '0015_move_attribute_to_attributeentity')]
operations = [migrations.AlterField(model_name='condition', name='relation', field=models.CharField(choices=[('eq', 'is equal to (==)'), ('neq', 'is not equal to (!=)'), ('contains', 'contains'), ('gt', 'is greater than (>)'), ('gte', 'is greater than or equal (>=)'), ('lt', 'is lesser than (<)'), ('lte', 'is lesser than or equal (<=)'), ('empty', 'is empty'), ('notempty', 'is not empty')], help_text='The relation this condition is using.', max_length=8, verbose_name='Relation')), migrations.AlterField(model_name='condition', name='source', field=models.ForeignKey(blank=True, db_constraint=False, help_text='The attribute of the value for this condition.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='domain.Attribute', verbose_name='Source')), migrations.AlterField(model_name='condition', name='uri', field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this condition (auto-generated).', max_length=640, null=True, verbose_name='URI'))] |
def install_pip(home):
pip_path = (home + '/Scripts/pip.exe')
python_path = (home + '/python.exe')
if exists(pip_path):
print('pip already installed.')
else:
print('Installing pip...')
download_file(GET_PIP_URL, GET_PIP_PATH)
print('Executing:', python_path, GET_PIP_PATH)
check_call([python_path, GET_PIP_PATH]) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--mrdef-csv-file', type=str, default='./data/MRDEF_name.csv', help='Path to json filewith training data')
parser.add_argument('--umls-kg-file', type=str, default='./data/umls_kg.csv', help='Path to json file with validation data')
parser.add_argument('--umls-cui-file', type=str, default='./data/umls_cui.csv', help='Path to json file with validation data')
parser.add_argument('--dataset-resampled', default=False, action='store_true', help='Whether to use sampling with replacement for webdataset shard selection.')
parser.add_argument('--max_length', type=int, default=128, help='Number of max length input token.')
parser.add_argument('--csv-separator', type=str, default='\t', help='For csv-like datasets, which separator to use.')
parser.add_argument('--output_dir', type=str, default='../MODEL/A2_KEBERT/', help='Where to store tensorboard logs. Use None to avoid storing logs.')
parser.add_argument('--aws_output_dir', type=str, default='../MODEL/A2_KEBERT/', help='Where to store tensorboard logs. Use None to avoid storing logs.')
parser.add_argument('--logs', type=str, default='logs_mlp', help='Where to store tensorboard logs. Use None to avoid storing logs.')
parser.add_argument('--log_local', action='store_true', default=False, help='log files on local master, otherwise global master only.')
parser.add_argument('--name', type=str, default=None, help='Optional identifier for the experiment when storing logs. Otherwise use current time.')
parser.add_argument('--workers', type=int, default=8, help='Number of dataloader workers per GPU.')
parser.add_argument('--batch-size', type=int, default=64, help='Batch size per GPU.')
parser.add_argument('--epochs', type=int, default=100, help='Number of epochs to train for.')
parser.add_argument('--lr', type=float, default=None, help='Learning rate.')
parser.add_argument('--beta1', type=float, default=None, help='Adam beta 1.')
parser.add_argument('--beta2', type=float, default=None, help='Adam beta 2.')
parser.add_argument('--eps', type=float, default=None, help='Adam epsilon.')
parser.add_argument('--wd', type=float, default=0.2, help='Weight decay.')
parser.add_argument('--warmup', type=int, default=10000, help='Number of steps to warmup for.')
parser.add_argument('--use-bn-sync', default=False, action='store_true', help='Whether to use batch norm sync.')
parser.add_argument('--skip-scheduler', action='store_true', default=False, help='Use this flag to skip the learning rate decay.')
parser.add_argument('--save-frequency', type=int, default=50, help='How often to save checkpoints.')
parser.add_argument('--save-most-recent', action='store_true', default=True, help='Always save the most recent model trained to epoch_latest.pt.')
parser.add_argument('--zeroshot-frequency', type=int, default=2, help='How often to run zero shot.')
parser.add_argument('--val-frequency', type=int, default=1, help='How often to run evaluation with val data.')
parser.add_argument('--resume', default=None, type=str, help='path to latest checkpoint (default: none)')
parser.add_argument('--precision', choices=['amp', 'fp16', 'fp32'], default='amp', help='Floating point precision.')
parser.add_argument('--pretrained', default='microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext', type=str, help='Use a pretrained CLIP model weights with the specified tag or file path.')
parser.add_argument('--evaluate_perbatch', default=False, action='store_true', help='Use clinical BERT.')
parser.add_argument('--pretrained-image', default=False, action='store_true', help='Load imagenet pretrained weights for image tower backbone if available.')
parser.add_argument('--lock-image', default=False, action='store_true', help='Lock full image tower by disabling gradients.')
parser.add_argument('--lock-image-unlocked-groups', type=int, default=0, help='Leave last n image tower layer groups unlocked.')
parser.add_argument('--lock-image-freeze-bn-stats', default=False, action='store_true', help='Freeze BatchNorm running stats in image tower for any locked layers.')
parser.add_argument('--grad-checkpointing', default=False, action='store_true', help='Enable gradient checkpointing.')
parser.add_argument('--local-loss', default=False, action='store_true', help='calculate loss w/ local features global (instead of realizing full global global matrix)')
parser.add_argument('--gather-with-grad', default=False, action='store_true', help='enable full distributed gradient for feature gather')
parser.add_argument('--force-quick-gelu', default=False, action='store_true', help='Force use of QuickGELU activation for non-OpenAI transformer models.')
parser.add_argument('--torchscript', default=False, action='store_true', help="torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'")
parser.add_argument('--trace', default=False, action='store_true', help='torch.jit.trace the model for inference / eval only')
parser.add_argument('--dist-url', default='env://', type=str, help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--report-to', default='tensorboard', type=str, help="Options are ['wandb', 'tensorboard', 'wandb,tensorboard']")
parser.add_argument('--wandb-notes', default='', type=str, help='Notes if logging with wandb')
parser.add_argument('--debug', default=False, action='store_true', help='If true, more information is logged.')
parser.add_argument('--copy-codebase', default=False, action='store_true', help='If true, we copy the entire base on the log diretory, and execute from there.')
parser.add_argument('--horovod', default=False, action='store_true', help='Use horovod for distributed training.')
parser.add_argument('--ddp-static-graph', default=False, action='store_true', help='Enable static graph optimization for DDP in PyTorch >= 1.11.')
parser.add_argument('--no-set-device-rank', default=False, action='store_true', help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).")
parser.add_argument('--seed', type=int, default=0, help='Default random seed.')
parser.add_argument('--norm_gradient_clip', type=float, default=None, help='Gradient clip.')
args = parser.parse_args()
default_params = get_default_params()
for (name, val) in default_params.items():
if (getattr(args, name) is None):
setattr(args, name, val)
return args |
class TestMetaLabels(object):
def setup_method(self):
testInst = pysat.Instrument('pysat', 'testing')
self.meta_labels = testInst.meta.labels
self.meta = pysat.Meta()
return
def teardown_method(self):
del self.meta, self.meta_labels
return
def test_default_label_value_raises_error(self):
testing.eval_bad_input(self.meta_labels.default_values_from_attr, ValueError, 'unknown label attribute', ['not_an_attr'])
return
.parametrize('iter_type', [list, dict, set, tuple, np.ndarray])
def test_set_bad_type(self, iter_type):
testing.eval_bad_input(pysat.MetaLabels, TypeError, 'iterable types like', input_kwargs={'value_range': ('val_range', iter_type)})
return
.parametrize('iter_type', [list, dict, set, tuple, np.ndarray])
def test_update_bad_type(self, iter_type):
testing.eval_bad_input(self.meta_labels.update, TypeError, 'iterable types like', input_args=['value_range', 'val_range', iter_type])
return
.parametrize('in_val', [1.0, 1, {}, None, []])
def test_default_value_from_type_unexpected_input(self, in_val, caplog):
with caplog.at_level(logging.INFO, logger='pysat'):
self.meta_labels.default_values_from_type(in_val)
captured = caplog.text
test_str = 'No type match found for '
assert (captured.find(test_str) >= 0)
return
def test_repr(self):
out = self.meta_labels.__repr__()
assert isinstance(out, str)
assert (out.find('pysat.MetaLabels(') >= 0)
return
.parametrize('val_type', [int, float, type(None), str, bytes, bool, np.float32, np.float64, np.int32, np.int64, np.datetime64, dt.datetime, dt.timedelta])
def test_eval_label_type_true(self, val_type):
assert self.meta_labels._eval_label_type(val_type)
return
.parametrize('val_type', [list, dict, set, tuple, np.ndarray])
def test_eval_label_type_false(self, val_type):
assert (not self.meta_labels._eval_label_type(val_type))
return
.parametrize('in_val', [float, np.float16, np.float32, np.float64])
def test_default_value_from_type_float_inputs(self, in_val):
out = self.meta.labels.default_values_from_type(in_val)
assert np.isnan(out)
return
.parametrize('in_val, comp_val', [(int, (- 1)), (np.int8, (- 1)), (np.int16, (- 1)), (np.int32, (- 1)), (np.int64, (- 1)), (str, '')])
def test_default_value_from_type_int_inputs(self, in_val, comp_val):
out = self.meta.labels.default_values_from_type(in_val)
assert (out == comp_val)
return
def test_update(self):
self.meta_labels.update('new_label', 'new_name', int)
assert hasattr(self.meta_labels, 'new_label')
assert (self.meta_labels.new_label == 'new_name')
assert (self.meta_labels.label_type['new_label'] == int)
return
def test_change_case_of_meta_labels(self):
self.meta_labels = {'units': ('units', str), 'name': ('long_name', str)}
self.meta = pysat.Meta(labels=self.meta_labels)
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta.labels.units = 'Units'
self.meta.labels.name = 'Long_Name'
assert (self.meta['new'].Units == 'hey')
assert (self.meta['new'].Long_Name == 'boo')
assert (self.meta['new2'].Units == 'hey2')
assert (self.meta['new2'].Long_Name == 'boo2')
return
def test_case_change_of_meta_labels_w_ho(self):
self.meta_labels = {'units': ('units', str), 'name': ('long_Name', str)}
self.meta = pysat.Meta(labels=self.meta_labels)
meta2 = pysat.Meta(labels=self.meta_labels)
meta2['new21'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = meta2
self.meta.labels.units = 'Units'
self.meta.labels.name = 'Long_Name'
assert (self.meta['new'].Units == 'hey')
assert (self.meta['new'].Long_Name == 'boo')
assert (self.meta['new2'].children['new21'].Units == 'hey2')
assert (self.meta['new2'].children['new21'].Long_Name == 'boo2')
return |
class GetCommonChats():
async def get_common_chats(self: 'pyrogram.Client', user_id: Union[(int, str)]) -> List['types.Chat']:
peer = (await self.resolve_peer(user_id))
if isinstance(peer, raw.types.InputPeerUser):
r = (await self.invoke(raw.functions.messages.GetCommonChats(user_id=peer, max_id=0, limit=100)))
return types.List([types.Chat._parse_chat(self, x) for x in r.chats])
raise ValueError(f"""The user_id "{user_id}" doesn't belong to a user""") |
def truncate_to_first_stop_token(tokens: torch.LongTensor, stop_ids: List[Union[(int, List[int])]]) -> torch.LongTensor:
if (not stop_ids):
return tokens
stop_ids: List[torch.LongTensor] = [torch.LongTensor(([stop_id] if (not isinstance(stop_id, list)) else stop_id)) for stop_id in stop_ids]
for i in range(len(tokens)):
for (stop_id_index, _) in enumerate(stop_ids):
stop_id = stop_ids[stop_id_index].to(tokens.device)
if (((len(tokens) - i) >= len(stop_id)) and tokens[i:(len(stop_id) + i)].equal(stop_id)):
return tokens[:i]
return tokens |
def build_word_vec(word_list, model_word2vec):
matrix_word2vec = []
igNoreList = list()
for (i, word) in enumerate(word_list):
print(i, word)
try:
matrix_word2vec.append(model_word2vec[word])
except:
igNoreList.append(word)
randArray = np.random.rand(300).astype('float32')
matrix_word2vec.append(randArray)
try:
print(('%s is not the vocaburary' % word))
except:
print('fail to print the word!')
pdb.set_trace()
return (matrix_word2vec, igNoreList) |
(bdd.parsers.parse('the per-domain option {option} should be set to {value} for {pattern}'))
def check_option_per_domain(quteproc, option, value, pattern, server):
pattern = pattern.replace('(port)', str(server.port))
actual_value = quteproc.get_setting(option, pattern=pattern)
assert (actual_value == value) |
def migrate_old_config():
active = []
old_keys = ['songsmenuplugins', 'eventplugins', 'editingplugins', 'playorderplugins']
for key in old_keys:
key = ('active_' + key)
try:
active.extend(config.get('plugins', key).splitlines())
except config.Error:
pass
else:
config._config.remove_option('plugins', key)
if active:
config.set('plugins', 'active_plugins', '\n'.join(active)) |
class TestMLTGWD(TestMLT):
def eval(self):
txt_name = '{}.txt'.format(self.cfgs.VERSION)
real_test_img_list = self.get_test_image()
gwd = build_whole_network.DetectionNetworkGWD(cfgs=self.cfgs, is_training=False)
self.test_mlt(det_net=gwd, real_test_img_list=real_test_img_list, txt_name=txt_name)
if (not self.args.show_box):
os.remove(txt_name) |
def accumulate_cv_results(trained_model_folder, merged_output_folder: str, folds: Union[(List[int], Tuple[(int, ...)])], num_processes: int=default_num_processes, overwrite: bool=True):
if (overwrite and isdir(merged_output_folder)):
shutil.rmtree(merged_output_folder)
maybe_mkdir_p(merged_output_folder)
dataset_json = load_json(join(trained_model_folder, 'dataset.json'))
plans_manager = PlansManager(join(trained_model_folder, 'plans.json'))
rw = plans_manager.image_reader_writer_class()
shutil.copy(join(trained_model_folder, 'dataset.json'), join(merged_output_folder, 'dataset.json'))
shutil.copy(join(trained_model_folder, 'plans.json'), join(merged_output_folder, 'plans.json'))
did_we_copy_something = False
for f in folds:
expected_validation_folder = join(trained_model_folder, f'fold_{f}', 'validation')
if (not isdir(expected_validation_folder)):
raise RuntimeError(f'fold {f} of model {trained_model_folder} is missing. Please train it!')
predicted_files = subfiles(expected_validation_folder, suffix=dataset_json['file_ending'], join=False)
for pf in predicted_files:
if (overwrite and isfile(join(merged_output_folder, pf))):
raise RuntimeError(f'More than one of your folds has a prediction for case {pf}')
if (overwrite or (not isfile(join(merged_output_folder, pf)))):
shutil.copy(join(expected_validation_folder, pf), join(merged_output_folder, pf))
did_we_copy_something = True
if (did_we_copy_something or (not isfile(join(merged_output_folder, 'summary.json')))):
label_manager = plans_manager.get_label_manager(dataset_json)
compute_metrics_on_folder(join(nnUNet_raw, plans_manager.dataset_name, 'labelsTr'), merged_output_folder, join(merged_output_folder, 'summary.json'), rw, dataset_json['file_ending'], (label_manager.foreground_regions if label_manager.has_regions else label_manager.foreground_labels), label_manager.ignore_label, num_processes) |
def test_unused_udp_port_selects_unused_port(pytester: Pytester):
pytester.makepyfile(dedent(' .asyncio\n async def test_unused_udp_port_fixture(unused_udp_port):\n class Closer:\n def connection_made(self, transport):\n pass\n\n def connection_lost(self, *arg, **kwd):\n pass\n\n event_loop = asyncio.get_running_loop()\n transport1, _ = await event_loop.create_datagram_endpoint(\n Closer,\n local_addr=("127.0.0.1", unused_udp_port),\n reuse_port=False,\n )\n\n with pytest.raises(IOError):\n await event_loop.create_datagram_endpoint(\n Closer,\n local_addr=("127.0.0.1", unused_udp_port),\n reuse_port=False,\n )\n\n transport1.abort()\n ')) |
def downloadUUID(accessurl, uuid):
downloadFile(accessurl.format(filename=f'{uuid}_50k.dam'), f'{uuid}_50k.dam')
shutil.copy(f'{uuid}_50k.dam', f'..{os.path.sep}{uuid}_50k.dam')
cur_file = ''
try:
for i in range(1000):
cur_file = accessurl.format(filename=f'{uuid}_50k_texture_jpg_high/{uuid}_50k_{i:03d}.jpg')
downloadFile(cur_file, f'{uuid}_50k_texture_jpg_high/{uuid}_50k_{i:03d}.jpg')
cur_file = accessurl.format(filename=f'{uuid}_50k_texture_jpg_low/{uuid}_50k_{i:03d}.jpg')
downloadFile(cur_file, f'{uuid}_50k_texture_jpg_low/{uuid}_50k_{i:03d}.jpg')
except Exception as ex:
logging.warning(f'Exception downloading file: {cur_file} of: {str(ex)}')
pass |
def main(args):
if (len(args) != 1):
dsz.ui.Echo('Usage: reproject <project>', dsz.ERROR)
return 0
f = open(os.path.join(ops.LOGDIR, 'project.txt'), 'w')
f.write(args[0].upper())
f.close()
dsz.ui.Echo(("Target %s's project has been changed to %s" % (ops.TARGET_ADDR, args[0].upper())))
return 1 |
class _ArcIteratorBase(object):
def __init__(self, fst, state):
if (not fst._valid_state_id(state)):
raise IndexError('State index out of range')
super(_ArcIteratorBase, self).__init__(fst, state)
def __iter__(self):
while (not self._done()):
(yield self._value())
self._next()
def next(self):
self._next()
def done(self):
return self._done()
def flags(self):
return self._flags()
def position(self):
return self._position()
def reset(self):
self._reset()
def seek(self, a):
self._seek(a)
def set_flags(self, flags, mask):
self._set_flags(flags, mask)
def value(self):
return self._value() |
def tasklist_manager(request, manager_nospawn, override_xdg, monkeypatch):
monkeypatch.setattr('libqtile.widget.tasklist.has_xdg', override_xdg)
config = getattr(request, 'param', dict())
class TasklistConfig(Config):
auto_fullscreen = True
groups = [libqtile.config.Group('a'), libqtile.config.Group('b')]
layouts = [layout.Stack()]
floating_layout = libqtile.resources.default_config.floating_layout
keys = []
mouse = []
screens = [Screen(top=bar.Bar([TestTaskList(name='tasklist', **config)], 28))]
manager_nospawn.start(TasklistConfig)
(yield manager_nospawn) |
class _ArchX86(Arch):
NAME = 'x86'
INS_PTR = Reg('eip')
STK_PTR = Reg('esp')
_CSD = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32)
nop_instruction = b'\x90'
class optypes(IntEnum):
INVALID = x86_const.X86_OP_INVALID
IMM = x86_const.X86_OP_IMM
REG = x86_const.X86_OP_REG
MEM = x86_const.X86_OP_MEM |
(repr=True, frozen=True)
class BatchTensorDescriptor(TensorDescriptor):
def __init__(self, *instance_size, **kwargs):
if ((len(instance_size) == 1) and isinstance(instance_size[0], (list, tuple, torch.Size))):
instance_size = instance_size[0]
super().__init__((None, *instance_size), **kwargs)
def from_tensor(cls, tensor: torch.Tensor, compression=CompressionType.NONE) -> BatchTensorDescriptor:
return cls(*tensor.shape[1:], dtype=tensor.dtype, layout=tensor.layout, device=tensor.device, requires_grad=tensor.requires_grad, pin_memory=_safe_check_pinned(tensor), compression=(compression if tensor.is_floating_point() else CompressionType.NONE))
def make_empty(self, *batch_size: int, **kwargs) -> torch.Tensor:
assert (self.shape[0] is None), 'Make sure 0-th dimension is not specified (set to None)'
return super().make_empty(size=(*batch_size, *self.shape[1:]), **kwargs) |
def _parse_constraint(constraints: str, *, is_marker_constraint: bool=False) -> VersionConstraint:
if (constraints == '*'):
from poetry.core.constraints.version.version_range import VersionRange
return VersionRange()
or_constraints = re.split('\\s*\\|\\|?\\s*', constraints.strip())
or_groups = []
for constraints in or_constraints:
constraints = constraints.rstrip(',').rstrip()
and_constraints = re.split('(?<!^)(?<![\\^~=>< ,]) *(?<!-)[, ](?!-) *(?!,|$)', constraints)
constraint_objects = []
if (len(and_constraints) > 1):
for constraint in and_constraints:
constraint_objects.append(parse_single_constraint(constraint, is_marker_constraint=is_marker_constraint))
else:
constraint_objects.append(parse_single_constraint(and_constraints[0], is_marker_constraint=is_marker_constraint))
if (len(constraint_objects) == 1):
constraint = constraint_objects[0]
else:
constraint = constraint_objects[0]
for next_constraint in constraint_objects[1:]:
constraint = constraint.intersect(next_constraint)
or_groups.append(constraint)
if (len(or_groups) == 1):
return or_groups[0]
else:
from poetry.core.constraints.version.version_union import VersionUnion
return VersionUnion.of(*or_groups) |
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
input_files = []
for input_pattern in FLAGS.input_file.split(','):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info('*** Reading from input files ***')
for input_file in input_files:
tf.logging.info(' %s', input_file)
rng = random.Random(FLAGS.random_seed)
instances = create_training_instances(input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor, FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq, rng)
output_files = FLAGS.output_file.split(',')
tf.logging.info('*** Writing to output files ***')
for output_file in output_files:
tf.logging.info(' %s', output_file)
write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length, FLAGS.max_predictions_per_seq, output_files) |
class RealFsTestCase(fake_filesystem_unittest.TestCase, RealFsTestMixin):
def __init__(self, methodName='runTest'):
fake_filesystem_unittest.TestCase.__init__(self, methodName)
RealFsTestMixin.__init__(self)
def setUp(self):
RealFsTestMixin.setUp(self)
self.cwd = os.getcwd()
self.uid = get_uid()
set_uid(1000)
if (not self.use_real_fs()):
self.setUpPyfakefs()
self.filesystem = self.fs
self.os = os
self.open = open
self.create_basepath()
self.fs.set_disk_usage(1000, self.base_path)
def tearDown(self):
set_uid(self.uid)
RealFsTestMixin.tearDown(self)
def is_windows_fs(self):
if self.use_real_fs():
return (sys.platform == 'win32')
return self.filesystem.is_windows_fs |
class ServerAction(actions.BaseAction):
name = 'server'
security = 'server'
parent_parsers = [actions.RESTRICT_PARSER]
def add_action_subparser(cls, sub_handler):
subparser = super().add_action_subparser(sub_handler)
subparser.add_argument('--debug', action='store_true', help='Allow for remote python debugging (rpdb) using netcat')
return subparser
def __init__(self, values):
super().__init__(values)
if (('debug' in self.values) and self.values.debug):
self._set_breakpoint()
def connect(self):
conn_value = super().connect()
if conn_value.is_connection_ok():
Security.initialize(self.get_security_class(), [], security_level=self.values.restrict_mode, restrict_path=self.values.restrict_path)
return conn_value
def run(self):
ret_code = super().run()
if (ret_code & Globals.RET_CODE_ERR):
return ret_code
ret_code |= connection.PipeConnection(sys.stdin.buffer, sys.stdout.buffer).Server()
return ret_code
def _set_breakpoint(self):
try:
import rpdb
debug_values = os.getenv('RDIFF_BACKUP_DEBUG', '').split(':')
if (debug_values != ['']):
if debug_values[0]:
debug_addr = debug_values[0]
else:
debug_addr = '127.0.0.1'
if (len(debug_values) > 1):
debug_port = int(debug_values[1])
else:
debug_port = 4444
rpdb.set_trace(addr=debug_addr, port=debug_port)
else:
rpdb.set_trace()
except ImportError:
log.Log('Remote debugging impossible, please install rpdb', log.Log.WARNING) |
def cache_only(func):
import pynag.Model
def wrap(*args, **kwargs):
pynag.Model.ObjectFetcher._cache_only = True
try:
return func(*args, **kwargs)
finally:
pynag.Model.ObjectFetcher._cache_only = False
wrap.__name__ = func.__name__
wrap.__module__ = func.__module__
return wrap |
_flags(floatX='float64')
def test_debugprint_sitsot():
k = iscalar('k')
A = dvector('A')
(result, updates) = pytensor.scan(fn=(lambda prior_result, A: (prior_result * A)), outputs_info=pt.ones_like(A), non_sequences=A, n_steps=k)
final_result = result[(- 1)]
output_str = debugprint(final_result, file='str', print_op_info=True)
lines = output_str.split('\n')
expected_output = "Subtensor{i} [id A]\n Subtensor{start:} [id B]\n Scan{scan_fn, while_loop=False, inplace=none} [id C] (outer_out_sit_sot-0)\n k [id D] (n_steps)\n SetSubtensor{:stop} [id E] (outer_in_sit_sot-0)\n AllocEmpty{dtype='float64'} [id F]\n Add [id G]\n k [id D]\n Subtensor{i} [id H]\n Shape [id I]\n Unbroadcast{0} [id J]\n ExpandDims{axis=0} [id K]\n Second [id L]\n A [id M]\n ExpandDims{axis=0} [id N]\n 1.0 [id O]\n 0 [id P]\n Subtensor{i} [id Q]\n Shape [id R]\n Unbroadcast{0} [id J]\n \n 1 [id S]\n Unbroadcast{0} [id J]\n \n ScalarFromTensor [id T]\n Subtensor{i} [id H]\n \n A [id M] (outer_in_non_seqs-0)\n 1 [id U]\n -1 [id V]\n\n Inner graphs:\n\n Scan{scan_fn, while_loop=False, inplace=none} [id C]\n Mul [id W] (inner_out_sit_sot-0)\n *0-<Vector(float64, shape=(?,))> [id X] -> [id E] (inner_in_sit_sot-0)\n *1-<Vector(float64, shape=(?,))> [id Y] -> [id M] (inner_in_non_seqs-0)"
for (truth, out) in zip(expected_output.split('\n'), lines):
assert (truth.strip() == out.strip()) |
class BeaverConfig():
def __init__(self, args, logger=None):
self._logger = (logger or logging.getLogger(__name__))
self._logger.debug(('Processing beaver portion of config file %s' % args.config))
self._section_defaults = {'add_field': '', 'add_field_env': '', 'debug': '0', 'discover_interval': '15', 'encoding': 'utf_8', 'exclude': '', 'format': '', 'ignore_empty': '0', 'ignore_truncate': '0', 'delimiter': '\n', 'size_limit': '', 'multiline_regex_after': '', 'multiline_regex_before': '', 'message_format': '', 'sincedb_write_interval': '15', 'stat_interval': '1', 'start_position': 'end', 'tags': '', 'tail_lines': '0', 'type': '', 'redis_namespace': ''}
self._main_defaults = {'kafka_client_id': os.environ.get('KAFKA_CLIENT_ID', 'beaver-kafka'), 'kafka_hosts': os.environ.get('KAFKA_HOSTS', 'localhost:9092'), 'kafka_async': os.environ.get('KAFKA_ASYNC', True), 'kafka_topic': os.environ.get('KAFKA_TOPIC', 'logstash-topic'), 'kafka_key': os.environ.get('KAFKA_KEY'), 'kafka_codec': os.environ.get('KAFKA_CODEC'), 'kafka_ack_timeout': os.environ.get('KAFKA_ACK_TIMEOUT', 2000), 'kafka_batch_n': os.environ.get('KAFKA_BATCH_N', 10), 'kafka_batch_t': os.environ.get('KAFKA_BATCH_T', 10), 'kafka_round_robin': os.environ.get('KAFKA_ROUND_ROBIN', False), 'mqtt_clientid': 'paho', 'mqtt_host': 'localhost', 'mqtt_port': '1883', 'mqtt_topic': '/logstash', 'mqtt_keepalive': '60', 'rabbitmq_host': os.environ.get('RABBITMQ_HOST', 'localhost'), 'rabbitmq_port': os.environ.get('RABBITMQ_PORT', '5672'), 'rabbitmq_ssl': '0', 'rabbitmq_ssl_key': '', 'rabbitmq_ssl_cert': '', 'rabbitmq_ssl_cacert': '', 'rabbitmq_vhost': os.environ.get('RABBITMQ_VHOST', '/'), 'rabbitmq_username': os.environ.get('RABBITMQ_USERNAME', 'guest'), 'rabbitmq_password': os.environ.get('RABBITMQ_PASSWORD', 'guest'), 'rabbitmq_queue': os.environ.get('RABBITMQ_QUEUE', 'logstash-queue'), 'rabbitmq_exchange_type': os.environ.get('RABBITMQ_EXCHANGE_TYPE', 'direct'), 'rabbitmq_exchange_durable': os.environ.get('RABBITMQ_EXCHANGE_DURABLE', '0'), 'rabbitmq_queue_durable': os.environ.get('RABBITMQ_QUEUE_DURABLE', '0'), 'rabbitmq_ha_queue': os.environ.get('RABBITMQ_HA_QUEUE', '0'), 'rabbitmq_arguments': os.environ.get('RABBITMQ_ARGUMENTS', {}), 'rabbitmq_key': os.environ.get('RABBITMQ_KEY', 'logstash-key'), 'rabbitmq_exchange': os.environ.get('RABBITMQ_EXCHANGE', 'logstash-exchange'), 'rabbitmq_timeout': '1', 'rabbitmq_delivery_mode': 1, 'redis_url': os.environ.get('REDIS_URL', 'redis://localhost:6379/0'), 'redis_namespace': os.environ.get('REDIS_NAMESPACE', 'logstash:beaver'), 'redis_data_type': os.environ.get('REDIS_DATA_TYPE', 'list'), 'redis_password': '', 'sns_aws_access_key': '', 'sns_aws_secret_key': '', 'sns_aws_profile_name': '', 'sns_aws_region': 'us-east-1', 'sns_aws_topic_arn': '', 'sqs_aws_access_key': '', 'sqs_aws_secret_key': '', 'sqs_aws_profile_name': '', 'sqs_aws_region': 'us-east-1', 'sqs_aws_queue': '', 'sqs_aws_queue_owner_acct_id': '', 'sqs_bulk_lines': False, 'kinesis_aws_access_key': '', 'kinesis_aws_secret_key': '', 'kinesis_aws_region': 'us-east-1', 'kinesis_aws_stream': '', 'kinesis_aws_batch_size_max': '512000', 'tcp_host': '127.0.0.1', 'tcp_port': '9999', 'tcp_ssl_enabled': '0', 'tcp_ssl_verify': '0', 'tcp_ssl_cacert': '', 'tcp_ssl_cert': '', 'tcp_ssl_key': '', 'udp_host': os.environ.get('UDP_HOST', '127.0.0.1'), 'udp_port': os.environ.get('UDP_PORT', '9999'), 'zeromq_address': os.environ.get('ZEROMQ_ADDRESS', 'tcp://localhost:2120'), 'zeromq_pattern': 'push', 'zeromq_hwm': os.environ.get('ZEROMQ_HWM', ''), 'stomp_host': 'localhost', 'stomp_port': '61613', 'stomp_user': 'user', 'stomp_password': None, 'stomp_queue': 'queue/logstash', 'respawn_delay': '3', 'max_failure': '7', 'number_of_consumer_processes': '1', 'max_queue_size': '100', 'update_file_mapping_time': '', 'discover_interval': '15', 'queue_timeout': '60', 'refresh_worker_process': '', 'wait_timeout': '5', 'sincedb_path': '', 'logstash_version': '', 'ssh_key_file': '', 'ssh_tunnel': '', 'ssh_tunnel_port': '', 'ssh_remote_host': '', 'ssh_remote_port': '', 'ssh_options': '', 'subprocess_poll_sleep': '1', 'zeromq_bind': os.environ.get('BEAVER_MODE', ('bind' if os.environ.get('BIND', False) else 'connect')), 'files': os.environ.get('BEAVER_FILES', ''), 'format': os.environ.get('BEAVER_FORMAT', 'json'), 'fqdn': '0', 'hostname': '', 'output': '', 'path': os.environ.get('BEAVER_PATH', '/var/log'), 'transport': os.environ.get('BEAVER_TRANSPORT', 'stdout'), 'confd_path': '/etc/beaver/conf.d', 'config': '/dev/null', 'debug': '0', 'daemonize': '0', 'pid': '', 'ignore_old_files': 0}
self._configfile = args.config
self._config_parser = GlobSafeConfigParser
self._globbed = []
self._parse(args)
for key in self._beaver_config:
self._logger.debug('[CONFIG] "{0}" => "{1}"'.format(key, self._beaver_config.get(key)))
self._update_files()
self._check_for_deprecated_usage()
def beaver_config(self):
return self._beaver_config
def get(self, key, default=None):
return self._beaver_config.get(key, default)
def set(self, key, value):
self._beaver_config[key] = value
def get_field(self, field, filename):
return self._files.get(os.path.realpath(filename), self._section_defaults)[field]
def addglob(self, globname, globbed):
if (globname not in self._globbed):
self._logger.debug('Adding glob {0}'.format(globname))
config = self._file_config[globname]
self._file_config[globname] = config
for key in config:
self._logger.debug('Config: "{0}" => "{1}"'.format(key, config[key]))
else:
config = self._file_config.get(globname)
for filename in globbed:
self._files[filename] = config
self._globbed.append(globname)
def getfilepaths(self):
return self._files.keys()
def getglobs(self):
globs = []
[globs.extend([name, self._file_config[name].get('exclude')]) for name in self._file_config]
return dict(zip(globs[0::2], globs[1::2]))
def use_ssh_tunnel(self):
required = ['ssh_key_file', 'ssh_tunnel', 'ssh_tunnel_port', 'ssh_remote_host', 'ssh_remote_port']
has = len(filter((lambda x: (self.get(x) is not None)), required))
if ((has > 0) and (has != len(required))):
self._logger.warning('Missing {0} of {1} required config variables for ssh'.format((len(required) - has), len(required)))
return (has == len(required))
def _check_for_deprecated_usage(self):
env_vars = ['RABBITMQ_ARGUMENTSRABBITMQ_HOST', 'RABBITMQ_PORT', 'RABBITMQ_VHOST', 'RABBITMQ_USERNAME', 'RABBITMQ_PASSWORD', 'RABBITMQ_QUEUE', 'RABBITMQ_EXCHANGE_TYPE', 'RABBITMQ_EXCHANGE_DURABLE', 'RABBITMQ_KEY', 'RABBITMQ_EXCHANGE', 'REDIS_URL', 'REDIS_NAMESPACE', 'UDP_HOST', 'UDP_PORT', 'ZEROMQ_ADDRESS', 'BEAVER_FILES', 'BEAVER_FORMAT', 'BEAVER_MODE', 'BEAVER_PATH', 'BEAVER_TRANSPORT']
deprecated_env_var_usage = []
for e in env_vars:
v = os.environ.get(e, None)
if (v is not None):
deprecated_env_var_usage.append(e)
if (len(deprecated_env_var_usage) > 0):
warnings.simplefilter('default')
warnings.warn('ENV Variable support will be removed by version 20. Stop using: {0}'.format(', '.join(deprecated_env_var_usage)), DeprecationWarning)
update_file_mapping_time = self.get('update_file_mapping_time')
if update_file_mapping_time:
self.set('discover_interval', update_file_mapping_time)
warnings.simplefilter('default')
warnings.warn('"update_file_mapping_time" has been supersceded by "discover_interval". Stop using: "update_file_mapping_time', DeprecationWarning)
def _parse(self, args):
def _main_parser(config):
transpose = ['config', 'confd_path', 'debug', 'daemonize', 'files', 'format', 'fqdn', 'hostname', 'path', 'pid', 'transport']
namspace_dict = vars(args)
for key in transpose:
if ((key not in namspace_dict) or (namspace_dict[key] is None) or (namspace_dict[key] == '')):
continue
config[key] = namspace_dict[key]
if args.mode:
config['zeromq_bind'] = args.mode
for key in config:
if (config[key] == ''):
config[key] = None
require_bool = ['debug', 'daemonize', 'fqdn', 'rabbitmq_exchange_durable', 'rabbitmq_queue_durable', 'rabbitmq_ha_queue', 'rabbitmq_ssl', 'tcp_ssl_enabled', 'tcp_ssl_verify']
for key in require_bool:
config[key] = bool(int(config[key]))
require_int = ['max_failure', 'max_queue_size', 'queue_timeout', 'rabbitmq_port', 'rabbitmq_timeout', 'rabbitmq_delivery_mode', 'respawn_delay', 'subprocess_poll_sleep', 'refresh_worker_process', 'tcp_port', 'udp_port', 'wait_timeout', 'zeromq_hwm', 'logstash_version', 'kafka_batch_n', 'kafka_batch_t', 'kafka_ack_timeout', 'number_of_consumer_processes', 'ignore_old_files']
for key in require_int:
if (config[key] is not None):
config[key] = int(config[key])
require_float = ['update_file_mapping_time', 'discover_interval']
for key in require_float:
if (config[key] is not None):
config[key] = float(config[key])
if (config.get('format') == 'null'):
config['format'] = 'raw'
if ((config['files'] is not None) and (type(config['files']) == str)):
config['files'] = config['files'].split(',')
if (config['path'] is not None):
config['path'] = os.path.realpath(config['path'])
if (not os.path.isdir(config['path'])):
raise LookupError('{0} does not exist'.format(config['path']))
if (config.get('hostname') is None):
if (config.get('fqdn') is True):
config['hostname'] = socket.getfqdn()
else:
config['hostname'] = socket.gethostname()
if config.get('sincedb_path'):
config['sincedb_path'] = os.path.realpath(config.get('sincedb_path'))
if (config['zeromq_address'] and (type(config['zeromq_address']) == str)):
config['zeromq_address'] = [x.strip() for x in config.get('zeromq_address').split(',')]
if (config.get('ssh_options') is not None):
csv = config.get('ssh_options')
config['ssh_options'] = []
if (type(csv) == str):
for opt in csv.split(','):
config['ssh_options'].append(('-o %s' % opt.strip()))
else:
config['ssh_options'] = []
config['globs'] = {}
return config
def _section_parser(config, raise_exceptions=True):
fields = config.get('add_field', '')
if (type(fields) != dict):
try:
if (type(fields) == str):
fields = filter(None, fields.split(','))
if (len(fields) == 0):
config['fields'] = {}
elif ((len(fields) % 2) == 1):
if raise_exceptions:
raise Exception('Wrong number of values for add_field')
else:
fieldkeys = fields[0::2]
fieldvalues = [[x] for x in fields[1::2]]
config['fields'] = dict(zip(fieldkeys, fieldvalues))
except TypeError:
config['fields'] = {}
if ('add_field' in config):
del config['add_field']
envFields = config.get('add_field_env', '')
if (type(envFields) != dict):
try:
if (type(envFields) == str):
envFields = envFields.replace(' ', '')
envFields = filter(None, envFields.split(','))
if (len(envFields) == 0):
config['envFields'] = {}
elif ((len(envFields) % 2) == 1):
if raise_exceptions:
raise Exception('Wrong number of values for add_field_env')
else:
envFieldkeys = envFields[0::2]
envFieldvalues = []
for x in envFields[1::2]:
envFieldvalues.append(os.environ.get(x))
config['fields'].update(dict(zip(envFieldkeys, envFieldvalues)))
except TypeError:
config['envFields'] = {}
if ('add_field_env' in config):
del config['add_field_env']
try:
tags = config.get('tags', '')
if (type(tags) == str):
tags = filter(None, tags.split(','))
if (len(tags) == 0):
tags = []
config['tags'] = tags
except TypeError:
config['tags'] = []
if (config.get('format') == 'null'):
config['format'] = 'raw'
file_type = config.get('type', None)
if (not file_type):
config['type'] = 'file'
require_bool = ['debug', 'ignore_empty', 'ignore_truncate']
for k in require_bool:
config[k] = bool(int(config[k]))
config['delimiter'] = config['delimiter'].decode('string-escape')
if config['multiline_regex_after']:
config['multiline_regex_after'] = re.compile(config['multiline_regex_after'])
if config['multiline_regex_before']:
config['multiline_regex_before'] = re.compile(config['multiline_regex_before'])
require_int = ['sincedb_write_interval', 'stat_interval', 'tail_lines']
for k in require_int:
config[k] = int(config[k])
return config
conf = Configuration(name='beaver', path=self._configfile, main_defaults=self._main_defaults, section_defaults=self._section_defaults, main_parser=_main_parser, section_parser=_section_parser, path_from_main='confd_path', config_parser=self._config_parser)
config = conf.raw()
self._beaver_config = config['beaver']
self._file_config = config['sections']
self._main_parser = _main_parser(self._main_defaults)
self._section_defaults = _section_parser(self._section_defaults, raise_exceptions=False)
self._files = {}
for section in config['sections']:
globs = eglob(section, config['sections'][section].get('exclude', ''))
if (not globs):
self._logger.debug(('Skipping glob due to no files found: %s' % section))
continue
for globbed_file in globs:
self._files[os.path.realpath(globbed_file)] = config['sections'][section]
def _update_files(self):
globs = self.get('files', default=[])
files = self.get('files', default=[])
if globs:
globs = dict(zip(globs, ([None] * len(globs))))
else:
globs = {}
try:
files.extend(self.getfilepaths())
globs.update(self.getglobs())
except AttributeError:
files = self.getfilepaths()
globs = self.getglobs()
self.set('globs', globs)
self.set('files', files)
for f in files:
if (f not in self._file_config):
self._file_config[f] = self._section_defaults |
def _check_sym_funcs():
seen_results = set()
for (name, f) in _symm_funcs.items():
n = len(name)
query = list(range(4, (4 + n)))
result = f(query)
x = tuple(sorted(result))
assert (x not in seen_results), x
seen_results.add(x)
seen_terms = set()
for term in result:
assert (len(term) == n), (query, term)
assert (set(term) == set(query)), (query, term)
assert (term not in seen_terms)
seen_terms.add(term) |
def test_private(hatch, helpers, temp_dir_data, path_append, dist_name, mocker):
dist_dir = (((temp_dir_data / 'data') / 'pythons') / dist_name)
python_path = (dist_dir / get_distribution(dist_name).python_path)
install = mocker.patch('hatch.python.core.PythonManager.install', return_value=mocker.MagicMock(path=dist_dir, python_path=python_path))
result = hatch('python', 'install', '--private', dist_name)
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent(f'''
Installing {dist_name}
Installed {dist_name} {dist_dir}
'''))
install.assert_called_once_with(dist_name)
path_append.assert_not_called() |
def measure_UIQMs(dir_name, file_ext=None):
paths = sorted(glob(join(dir_name, '*.*')))
if file_ext:
paths = [p for p in paths if p.endswith(file_ext)]
uqims = []
for img_path in paths:
im = Image.open(img_path).resize((im_w, im_h))
uqims.append(getUIQM(np.array(im)))
return np.array(uqims) |
def scale_linestrength_eq(df, Tref, Tgas):
print('Scaling equilibrium linestrength')
def _calc_Q(molecule, iso, T_ref, T_gas):
Qref = get_Qgas(molecule, iso, T_ref)
Qgas = get_Qgas(molecule, iso, T_gas)
return (Qref, Qgas)
id_set = df.id.unique()
id = list(id_set)[0]
molecule = get_molecule(id)
iso_set = set(df.iso)
Qref_Qgas_ratio = {}
for iso in iso_set:
(Qref, Qgas) = _calc_Q(molecule, iso, Tref, Tgas)
Qref_Qgas_ratio[iso] = (Qref / Qgas)
line_strength = (df.int * df['iso'].map(Qref_Qgas_ratio))
line_strength *= exp((((- hc_k) * df.El) * ((1 / Tgas) - (1 / Tref))))
line_strength *= ((1 - exp((((- hc_k) * df.wav) / Tgas))) / (1 - exp((((- hc_k) * df.wav) / Tref))))
df['S'] = line_strength
assert ('S' in df)
return df |
class MobileNetFeaturePyramidExtractor(SSDMobileNetV1FeatureExtractor):
def extract_features(self, preprocessed_inputs, init_extraction=False):
if init_extraction:
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.'])
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(self._conv_hyperparams):
with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope:
(_, image_features) = mobilenet_v1.mobilenet_v1_base(preprocessed_inputs, final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope)
feature_head = image_features['Conv2d_13_pointwise']
feature_head = slim.conv2d(feature_head, 512, [3, 3], stride=1, padding='SAME', scope='Conv2d_Append_1x1_256')
feature_head = tf.nn.avg_pool(feature_head, strides=[1, 1, 1, 1], ksize=[1, 4, 4, 1], padding='VALID')
return feature_head
else:
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.'])
bottomup_features_names = ['Conv2d_11_pointwise', 'Conv2d_13_pointwise']
num_appended_layers = 0
appended_channel_num = [512]
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(self._conv_hyperparams):
with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope:
(_, image_features) = mobilenet_v1.mobilenet_v1_base(preprocessed_inputs, final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope)
topdown_features = self._topdown_feature_maps(image_features, bottomup_features_names=bottomup_features_names, num_appended_layers=num_appended_layers, appended_channel_num=appended_channel_num)
return topdown_features.values()
def _topdown_feature_maps(self, image_features, bottomup_features_names, num_appended_layers=2, appended_channel_num=256, stride=2, topdown_channel_num=512):
feature_head = image_features[bottomup_features_names[(- 1)]]
appended_features = dict()
appended_features_names = list()
for index in range(num_appended_layers):
if isinstance(appended_channel_num, list):
num_channel = appended_channel_num[index]
else:
num_channel = appended_channel_num
layer_name = 'Append_{}_Conv2d_3x3_{}'.format(index, num_channel)
feature_head = slim.conv2d(feature_head, num_channel, [3, 3], stride=stride, padding='SAME', scope=layer_name)
appended_features[layer_name] = feature_head
appended_features_names.append(layer_name)
bottomup_features_names += appended_features_names
image_features.update(appended_features)
topdown_features = list()
topdown_features_names = list()
level_ind = (len(bottomup_features_names) - 1)
layer_name = 'TopDown_{}_Conv2d_3x3_{}'.format(level_ind, topdown_channel_num)
feature_head = slim.conv2d(feature_head, topdown_channel_num, [3, 3], stride=1, padding='SAME', scope=layer_name)
topdown_features.append(feature_head)
topdown_features_names.append(layer_name)
level_ind -= 1
for bottomup_feature_name in bottomup_features_names[(- 2)::(- 1)]:
layer_name = 'Lateral_{}_Conv2d_1x1_{}'.format(level_ind, topdown_channel_num)
lateral_feature = slim.conv2d(image_features[bottomup_feature_name], topdown_channel_num, [1, 1], padding='SAME', scope=layer_name)
output_size = lateral_feature.get_shape().as_list()[1:3]
if (output_size[0] != feature_head.get_shape().as_list()[1]):
feature_head = tf.image.resize_images(feature_head, output_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
feature_head = slim.conv2d(feature_head, topdown_channel_num, [3, 3], padding='SAME', scope='TopDown_{}_Conv2d_3x3_{}'.format(level_ind, topdown_channel_num))
layer_name = 'TopDown_{}_Add_{}'.format(level_ind, topdown_channel_num)
feature_head += lateral_feature
topdown_features.append(feature_head)
topdown_features_names.append(layer_name)
level_ind -= 1
return collections.OrderedDict([(x, y) for (x, y) in zip(topdown_features_names[(- 1)::(- 1)], topdown_features[(- 1)::(- 1)])]) |
.unit()
.xfail(reason='See #377.')
def test_live_execution_skips_do_not_crowd_out_displayed_tasks(capsys, tmp_path):
path = tmp_path.joinpath('task_module.py')
task = Task(base_name='task_example', path=path, function=(lambda x: x))
task.name = 'task_module.py::task_example'
live_manager = LiveManager()
live = LiveExecution(live_manager=live_manager, n_entries_in_table=20, verbose=1, editor_url_scheme='no_link')
live_manager.start()
live.update_running_tasks(task)
live_manager.stop()
captured = capsys.readouterr()
assert ('Task' in captured.out)
assert ('Outcome' in captured.out)
assert ('task_module.py::task_example' in captured.out)
assert ('running' in captured.out)
successful_task = Task(base_name='task_success', path=path, function=(lambda x: x))
successful_task.name = 'task_module.py::task_success'
tasks = []
for i in range(25):
skipped_task = Task(base_name=f'task_skip_{i}', path=path, function=(lambda x: x))
skipped_task.name = f'task_module.py::task_skip_{i}'
tasks.append(skipped_task)
live_manager.start()
live.update_running_tasks(successful_task)
for task in tasks:
live.update_running_tasks(task)
live_manager.stop()
captured = capsys.readouterr()
assert ('running' in captured.out)
assert ('task_success' in captured.out)
for i in range(25):
assert (f'task_skip_{i}' in captured.out)
live_manager.resume()
report = ExecutionReport(task=successful_task, outcome=TaskOutcome.SUCCESS, exc_info=None)
live.update_reports(report)
for task in tasks:
report = ExecutionReport(task=task, outcome=TaskOutcome.SKIP, exc_info=None)
live.update_reports(report)
live_manager.stop()
captured = capsys.readouterr()
assert ('Task' in captured.out)
assert ('Outcome' in captured.out)
assert ('task_module.py::task_example' in captured.out)
assert ('task_module.py::task_success' in captured.out)
assert ('running' in captured.out)
assert (TaskOutcome.SUCCESS.symbol in captured.out)
assert ('task_skip' not in captured.out) |
class TestNumaCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NumaCollector', {'interval': 10, 'bin': 'true'})
self.collector = NumaCollector(config, None)
def test_import(self):
self.assertTrue(NumaCollector)
(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {'node_0_free_MB': 342, 'node_0_size_MB': 15976}
patch_communicate = patch('subprocess.Popen.communicate', Mock(return_value=(self.getFixture('single_node.txt').getvalue(), '')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics) |
('tag1', 'tag2')
class TestDjangoTagsToPytestMarkers(SimpleTestCase):
(autouse=True)
def gimme_my_markers(self, request: pytest.FixtureRequest) -> None:
self.markers = {m.name for m in request.node.iter_markers()}
('tag3', 'tag4')
def test_1(self) -> None:
assert (self.markers == {'tag1', 'tag2', 'tag3', 'tag4'})
def test_2(self) -> None:
assert (self.markers == {'tag1', 'tag2'})
('tag5')
def test_3(self) -> None:
assert (self.markers == {'tag1', 'tag2', 'tag5'}) |
def visualize_batch(batch):
if (len(batch.shape) == 4):
if (batch.shape[3] == 2):
batch = [flow_to_image(batch[i]) for i in range(batch.shape[0])]
cv2.imshow('Optical flow set', np.hstack(batch))
else:
batch = [batch[i] for i in range(batch.shape[0])]
cv2.imshow('Image sets', np.hstack(batch))
cv2.waitKey(0)
else:
if (batch.shape[4] == 2):
batch = [np.hstack([flow_to_image(batch[j][i]) for i in range(batch[j].shape[0])]) for j in range(batch.shape[0])]
cv2.imshow('Optical flow set', np.vstack(batch))
else:
batch = [np.hstack([batch[j][i] for i in range(batch[j].shape[0])]) for j in range(batch.shape[0])]
cv2.imshow('Image sets', np.vstack(batch))
cv2.waitKey(0) |
def omniglotfs():
base = torch.load((args.dataset_path + 'omniglot/base.pt'))
base_data = base.reshape((- 1), base.shape[2], base.shape[3], base.shape[4]).float()
base_targets = torch.arange(base.shape[0]).unsqueeze(1).repeat(1, base.shape[1]).reshape((- 1))
val = torch.load((args.dataset_path + 'omniglot/val.pt'))
val_data = val.reshape((- 1), val.shape[2], val.shape[3], val.shape[4]).float()
val_targets = torch.arange(val.shape[0]).unsqueeze(1).repeat(1, val.shape[1]).reshape((- 1))
novel = torch.load((args.dataset_path + 'omniglot/novel.pt'))
novel_data = novel.reshape((- 1), novel.shape[2], novel.shape[3], novel.shape[4]).float()
novel_targets = torch.arange(novel.shape[0]).unsqueeze(1).repeat(1, novel.shape[1]).reshape((- 1))
train_transforms = torch.nn.Sequential(transforms.RandomCrop(100, padding=4), transforms.Normalize(0.0782, 0.2685))
all_transforms = (torch.nn.Sequential(transforms.CenterCrop(100), transforms.Normalize(0.0782, 0.2685)) if (args.sample_aug == 1) else torch.nn.Sequential(transforms.RandomCrop(100, padding=4), transforms.Normalize(0.0782, 0.2685)))
if args.episodic:
train_loader = episodic_iterator(base_data, base.shape[0], transforms=train_transforms)
else:
train_loader = iterator(base_data, base_targets, transforms=train_transforms)
train_clean = iterator(base_data, base_targets, transforms=all_transforms, shuffle=False)
val_loader = iterator(val_data, val_targets, transforms=all_transforms, shuffle=False)
test_loader = iterator(novel_data, novel_targets, transforms=all_transforms, shuffle=False)
return ((train_loader, train_clean, val_loader, test_loader), [1, 100, 100], (base.shape[0], val.shape[0], novel.shape[0], novel.shape[1]), True, False) |
class TestPegasosQSVC(QiskitMachineLearningTestCase):
def setUp(self):
super().setUp()
algorithm_globals.random_seed = 10598
self.q = 2
self.tau = 100
self.feature_map = ZFeatureMap(feature_dimension=self.q, reps=1)
(sample, label) = make_blobs(n_samples=20, n_features=2, centers=2, random_state=3, shuffle=True)
sample = MinMaxScaler(feature_range=(0, np.pi)).fit_transform(sample)
self.sample_train = sample[:15]
self.label_train = label[:15]
self.sample_test = sample[15:]
self.label_test = label[15:]
self.q_4d = 4
self.feature_map_4d = ZFeatureMap(feature_dimension=self.q_4d, reps=1)
(sample_4d, label_4d) = make_blobs(n_samples=20, n_features=self.q_4d, centers=2, random_state=3, shuffle=True)
sample_4d = MinMaxScaler(feature_range=(0, np.pi)).fit_transform(sample_4d)
self.sample_train_4d = sample_4d[:15]
self.label_train_4d = label_4d[:15]
self.sample_test_4d = sample_4d[15:]
self.label_test_4d = label_4d[15:]
def test_qsvc(self):
qkernel = FidelityQuantumKernel(feature_map=self.feature_map)
pegasos_qsvc = PegasosQSVC(quantum_kernel=qkernel, C=1000, num_steps=self.tau)
pegasos_qsvc.fit(self.sample_train, self.label_train)
score = pegasos_qsvc.score(self.sample_test, self.label_test)
self.assertEqual(score, 1.0)
def test_decision_function(self):
qkernel = FidelityQuantumKernel(feature_map=self.feature_map)
pegasos_qsvc = PegasosQSVC(quantum_kernel=qkernel, C=1000, num_steps=self.tau)
pegasos_qsvc.fit(self.sample_train, self.label_train)
decision_function = pegasos_qsvc.decision_function(self.sample_test)
self.assertTrue(np.all(((decision_function > 0) == (self.label_test == 0))))
def test_qsvc_4d(self):
qkernel = FidelityQuantumKernel(feature_map=self.feature_map_4d)
pegasos_qsvc = PegasosQSVC(quantum_kernel=qkernel, C=1000, num_steps=self.tau)
pegasos_qsvc.fit(self.sample_train_4d, self.label_train_4d)
score = pegasos_qsvc.score(self.sample_test_4d, self.label_test_4d)
self.assertEqual(score, 1.0)
def test_precomputed_kernel(self):
qkernel = FidelityQuantumKernel(feature_map=self.feature_map)
pegasos_qsvc = PegasosQSVC(C=1000, num_steps=self.tau, precomputed=True)
kernel_matrix_train = qkernel.evaluate(self.sample_train, self.sample_train)
pegasos_qsvc.fit(kernel_matrix_train, self.label_train)
kernel_matrix_test = qkernel.evaluate(self.sample_test, self.sample_train)
score = pegasos_qsvc.score(kernel_matrix_test, self.label_test)
self.assertEqual(score, 1.0)
def test_change_kernel(self):
qkernel = FidelityQuantumKernel(feature_map=self.feature_map)
pegasos_qsvc = PegasosQSVC(C=1000, num_steps=self.tau)
pegasos_qsvc.quantum_kernel = qkernel
pegasos_qsvc.fit(self.sample_train, self.label_train)
score = pegasos_qsvc.score(self.sample_test, self.label_test)
self.assertEqual(score, 1)
def test_labels(self):
qkernel = FidelityQuantumKernel(feature_map=self.feature_map)
pegasos_qsvc = PegasosQSVC(quantum_kernel=qkernel, C=1000, num_steps=self.tau)
label_train_temp = self.label_train.copy()
label_train_temp[(self.label_train == 0)] = 2
label_train_temp[(self.label_train == 1)] = 3
label_test_temp = self.label_test.copy()
label_test_temp[(self.label_test == 0)] = 2
label_test_temp[(self.label_test == 1)] = 3
pegasos_qsvc.fit(self.sample_train, label_train_temp)
score = pegasos_qsvc.score(self.sample_test, label_test_temp)
self.assertEqual(score, 1.0)
def test_constructor(self):
with self.subTest('Default parameters'):
pegasos_qsvc = PegasosQSVC()
self.assertIsInstance(pegasos_qsvc.quantum_kernel, FidelityQuantumKernel)
self.assertFalse(pegasos_qsvc.precomputed)
self.assertEqual(pegasos_qsvc.num_steps, 1000)
with self.subTest('PegasosQSVC with QuantumKernel'):
qkernel = FidelityQuantumKernel(feature_map=self.feature_map)
pegasos_qsvc = PegasosQSVC(quantum_kernel=qkernel)
self.assertIsInstance(pegasos_qsvc.quantum_kernel, FidelityQuantumKernel)
self.assertFalse(pegasos_qsvc.precomputed)
with self.subTest('PegasosQSVC with precomputed kernel'):
pegasos_qsvc = PegasosQSVC(precomputed=True)
self.assertIsNone(pegasos_qsvc.quantum_kernel)
self.assertTrue(pegasos_qsvc.precomputed)
with self.subTest('PegasosQSVC with wrong parameters'):
qkernel = FidelityQuantumKernel(feature_map=self.feature_map)
with self.assertRaises(ValueError):
_ = PegasosQSVC(quantum_kernel=qkernel, precomputed=True)
with self.subTest('Both kernel and precomputed are passed'):
qkernel = FidelityQuantumKernel(feature_map=self.feature_map)
self.assertRaises(ValueError, PegasosQSVC, quantum_kernel=qkernel, precomputed=True)
def test_change_kernel_types(self):
qkernel = FidelityQuantumKernel(feature_map=self.feature_map)
pegasos_qsvc = PegasosQSVC(C=1000, num_steps=self.tau, precomputed=True)
kernel_matrix_train = qkernel.evaluate(self.sample_train, self.sample_train)
pegasos_qsvc.fit(kernel_matrix_train, self.label_train)
pegasos_qsvc.quantum_kernel = FidelityQuantumKernel(feature_map=self.feature_map)
pegasos_qsvc.fit(self.sample_train, self.label_train)
score = pegasos_qsvc.score(self.sample_test, self.label_test)
self.assertEqual(score, 1.0)
def test_save_load(self):
features = np.array([[0, 0], [0.1, 0.2], [1, 1], [0.9, 0.8]])
labels = np.array([0, 0, 1, 1])
qkernel = FidelityQuantumKernel(feature_map=self.feature_map)
regressor = PegasosQSVC(quantum_kernel=qkernel, C=1000, num_steps=self.tau)
regressor.fit(features, labels)
test_features = np.array([[0.5, 0.5]])
original_predicts = regressor.predict(test_features)
file_name = os.path.join(tempfile.gettempdir(), 'pegasos.model')
regressor.save(file_name)
try:
regressor_load = PegasosQSVC.load(file_name)
loaded_model_predicts = regressor_load.predict(test_features)
np.testing.assert_array_almost_equal(original_predicts, loaded_model_predicts)
class FakeModel(SerializableModelMixin):
pass
with self.assertRaises(TypeError):
FakeModel.load(file_name)
finally:
os.remove(file_name) |
def set_application_info(app):
from quodlibet._init import is_init
assert is_init()
from gi.repository import Gtk, GLib
assert app.process_name
set_process_title(app.process_name)
GLib.idle_add(set_process_title, app.process_name)
assert app.id
GLib.set_prgname(app.id)
assert app.name
GLib.set_application_name(app.name)
assert app.icon_name
theme = Gtk.IconTheme.get_default()
assert theme.has_icon(app.icon_name)
Gtk.Window.set_default_icon_name(app.icon_name) |
class GroupbySize(GroupbyAggregation):
def on_new(self, acc, new, grouper=None):
g = self.grouped(new, grouper=grouper)
result = acc.add(g.size(), fill_value=0)
result = result.astype(int)
result.index.name = acc.index.name
return (result, result)
def on_old(self, acc, old, grouper=None):
g = self.grouped(old, grouper=grouper)
result = acc.sub(g.size(), fill_value=0)
result = result.astype(int)
result.index.name = acc.index.name
return (result, result)
def initial(self, new, grouper=None):
if hasattr(grouper, 'iloc'):
grouper = grouper.iloc[:0]
if (isinstance(grouper, np.ndarray) or is_index_like(grouper)):
grouper = grouper[:0]
return self.grouped(new.iloc[:0], grouper=grouper).size() |
def clean_paragraphs(document):
if (document['id'] in BLACKLIST):
return None
html_pattern = re.compile('(</a>)|(<a.*?href.*?>)')
def remove_tags(text):
return re.sub(html_pattern, '', text)
cleaned_pars = []
for par_sentences in document['text'][1:]:
clean_par = []
for sentence in par_sentences:
clean_text = remove_tags(sentence)
if (len(clean_text) == 0):
continue
clean_par.append(clean_text)
if (len(clean_par) == 0):
continue
cleaned_pars.append(clean_par)
if (len(cleaned_pars) == 0):
return None
return {'id': document['id'], 'title': document['title'], 'url': document['url'], 'paragraphs': cleaned_pars, 'charoffset': document['charoffset']} |
class MemoryDB(BaseDB):
kv_store: Dict[(bytes, bytes)] = None
def __init__(self, kv_store: Dict[(bytes, bytes)]=None) -> None:
if (kv_store is None):
self.kv_store = {}
else:
self.kv_store = kv_store
def __getitem__(self, key: bytes) -> bytes:
return self.kv_store[key]
def __setitem__(self, key: bytes, value: bytes) -> None:
self.kv_store[key] = value
def _exists(self, key: bytes) -> bool:
return (key in self.kv_store)
def __delitem__(self, key: bytes) -> None:
del self.kv_store[key]
def __iter__(self) -> Iterator[bytes]:
return iter(self.kv_store)
def __len__(self) -> int:
return len(self.kv_store)
def __repr__(self) -> str:
return f'MemoryDB({self.kv_store!r})' |
def upgrade_state_dict_for_deltalm(state_dict: Dict[(str, Any)], pretrained_deltalm_checkpoint: str, is_encoder=True) -> Dict[(str, Any)]:
if (not os.path.exists(pretrained_deltalm_checkpoint)):
raise IOError('Model file not found: {}'.format(pretrained_deltalm_checkpoint))
with open(pretrained_deltalm_checkpoint, 'rb') as f:
state = torch.load(f, map_location=torch.device('cpu'))
if ('weights' in state):
deltalm_state_dict = state['weights']
elif ('model' in state):
deltalm_state_dict = state['model']
else:
deltalm_state_dict = state
new_deltalm_state_dict = {}
for key in deltalm_state_dict.keys():
if is_encoder:
if (key.startswith('encoder.') or key.startswith('src_embedding.')):
new_key = key.replace('encoder.', '')
new_key = new_key.replace('src_embedding.', '')
new_deltalm_state_dict[new_key] = deltalm_state_dict[key]
elif (key.startswith('decoder.') or key.startswith('tgt_embedding.')):
new_key = key.replace('decoder.', '')
new_key = new_key.replace('tgt_embedding.', '')
new_deltalm_state_dict[new_key] = deltalm_state_dict[key]
deltalm_state_dict = new_deltalm_state_dict
for key in deltalm_state_dict.keys():
if ('output_projection' in key):
continue
map_key = key
map_key = map_key.replace('.ffn_1.fc1', '.fc3')
map_key = map_key.replace('.ffn_1.fc2', '.fc4')
map_key = map_key.replace('.ffn_2', '')
map_key = map_key.replace('.ffn.', '.')
map_key = map_key.replace('emb_layer_norm', 'layernorm_embedding')
assert (map_key in state_dict), map_key
if (('embed_positions' in key) or ('embed_tokens' in key)):
left_size = state_dict[map_key].size(0)
right_size = deltalm_state_dict[key].size(0)
if (left_size <= right_size):
state_dict[map_key] = deltalm_state_dict[key][:left_size]
else:
state_dict[map_key][:right_size] = deltalm_state_dict[key]
else:
state_dict[map_key] = deltalm_state_dict[key]
return state_dict |
def infer_constraints_if_possible(template: Type, actual: Type, direction: int) -> (list[Constraint] | None):
if ((direction == SUBTYPE_OF) and (not mypy.subtypes.is_subtype(erase_typevars(template), actual))):
return None
if ((direction == SUPERTYPE_OF) and (not mypy.subtypes.is_subtype(actual, erase_typevars(template)))):
return None
if ((direction == SUPERTYPE_OF) and isinstance(template, TypeVarType) and (not mypy.subtypes.is_subtype(actual, erase_typevars(template.upper_bound)))):
return None
return infer_constraints(template, actual, direction) |
_loss('bce')
class BinaryCrossEntropyLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, sample_list, model_output):
scores = model_output['scores']
targets = sample_list['targets']
loss = F.binary_cross_entropy(scores, targets, reduction='mean')
return (loss * targets.size(1)) |
def test_buffer_position(buffer_):
for _ in range(10):
position = [random.uniform((- 10.0), 10.0) for _ in range(3)]
buffer_.position = position
if buffer_.is3d:
assert iter_almost_equal(buffer_.position, position)
else:
assert (buffer_.position == (0, 0, 0)) |
class Prepared():
normalized = None
legacy_normalized = None
def __init__(self, name):
self.name = name
if (name is None):
return
self.normalized = self.normalize(name)
self.legacy_normalized = self.legacy_normalize(name)
def normalize(name):
return re.sub('[-_.]+', '-', name).lower().replace('-', '_')
def legacy_normalize(name):
return name.lower().replace('-', '_')
def __bool__(self):
return bool(self.name) |
def set_up_clipboard(is_input):
command = []
if sys.platform.startswith('linux'):
if cmd_exists('xclip'):
command.append('xclip')
if is_input:
command.append('-selection')
command.append('c')
else:
command.append('-selection')
command.append('c')
command.append('-o')
elif cmd_exists('xsel'):
command.append('xsel')
command.append('-b')
if is_input:
command.append('-i')
else:
command.append('-o')
elif (sys.platform == 'darwin'):
if is_input:
command.append('pbcopy')
command.append('w')
else:
command.append('pbpaste')
command.append('r')
if (not command):
raise NameError('No clipboard manager')
return command |
_db
('score_index', [1, 2, 3, 4])
def test_submit_vote(graphql_client, user, conference_factory, submission_factory, score_index, requests_mock):
graphql_client.force_login(user)
conference = conference_factory(active_voting=True)
submission = submission_factory(conference=conference)
requests_mock.post(f'{settings.PRETIX_API}organizers/{conference.pretix_organizer_id}/events/{conference.pretix_event_id}/tickets/attendee-has-ticket/', json={'user_has_admission_ticket': True})
(resp, variables) = _submit_vote(graphql_client, submission, value_index=score_index)
assert (resp['data']['sendVote']['__typename'] == 'VoteType')
vote = Vote.objects.get(id=resp['data']['sendVote']['id'])
assert (vote.value == score_index)
assert (vote.submission.hashid == variables['submission'])
assert (vote.user_id == user.id) |
.parametrize('username,password', users)
.parametrize('export_format', export_formats)
def test_detail_export(db, client, username, password, export_format):
client.login(username=username, password=password)
instance = Page.objects.first()
url = ((reverse(urlnames['detail_export'], args=[instance.pk]) + export_format) + '/')
response = client.get(url)
assert (response.status_code == status_map['detail'][username]), response.content
if ((response.status_code == 200) and (export_format == 'xml')):
root = et.fromstring(response.content)
assert (root.tag == 'rdmo')
for child in root:
assert (child.tag in ['page', 'questionset', 'question']) |
def _dtype_to_pytorch_dtype(dtype: dt.DType) -> torch.dtype:
torch_dtype_name = ('bool' if (dtype.name == 'boolean') else dtype.name)
if (not hasattr(torch, torch_dtype_name)):
raise ValueError(f"Can't convert {dtype} to PyTorch")
torch_dtype = getattr(torch, torch_dtype_name)
return torch_dtype |
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for (data, target) in test_loader:
(data, target) = (data.to(device), target.to(device))
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(test_loader.dataset), ((100.0 * correct) / len(test_loader.dataset)))) |
class BatchEncoding(UserDict):
def __init__(self, data: Optional[Dict[(str, Any)]]=None, encoding: Optional[Union[(EncodingFast, Sequence[EncodingFast])]]=None, tensor_type: Union[(None, str, TensorType)]=None, prepend_batch_axis: bool=False, n_sequences: Optional[int]=None):
super().__init__(data)
if isinstance(encoding, EncodingFast):
encoding = [encoding]
self._encodings = encoding
if ((n_sequences is None) and (encoding is not None) and len(encoding)):
n_sequences = encoding[0].n_sequences
self._n_sequences = n_sequences
self.convert_to_tensors(tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis)
def n_sequences(self) -> Optional[int]:
return self._n_sequences
def is_fast(self) -> bool:
return (self._encodings is not None)
def __getitem__(self, item: Union[(int, str)]) -> Union[(Any, EncodingFast)]:
if isinstance(item, str):
return self.data[item]
elif (self._encodings is not None):
return self._encodings[item]
else:
raise KeyError('Indexing with integers (to access backend Encoding for a given batch index) is not available when using Python based tokenizers')
def __getattr__(self, item: str):
try:
return self.data[item]
except KeyError:
raise AttributeError
def __getstate__(self):
return {'data': self.data, 'encodings': self._encodings}
def __setstate__(self, state):
if ('data' in state):
self.data = state['data']
if ('encodings' in state):
self._encodings = state['encodings']
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
def encodings(self) -> Optional[List[EncodingFast]]:
return self._encodings
def tokens(self, batch_index: int=0) -> List[str]:
if (not self._encodings):
raise ValueError('tokens() is not available when using Python-based tokenizers')
return self._encodings[batch_index].tokens
def sequence_ids(self, batch_index: int=0) -> List[Optional[int]]:
if (not self._encodings):
raise ValueError('sequence_ids() is not available when using Python-based tokenizers')
return self._encodings[batch_index].sequence_ids
def words(self, batch_index: int=0) -> List[Optional[int]]:
if (not self._encodings):
raise ValueError('words() is not available when using Python-based tokenizers')
warnings.warn('`BatchEncoding.words()` property is deprecated and should be replaced with the identical, but more self-explanatory `BatchEncoding.word_ids()` property.', FutureWarning)
return self.word_ids(batch_index)
def word_ids(self, batch_index: int=0) -> List[Optional[int]]:
if (not self._encodings):
raise ValueError('word_ids() is not available when using Python-based tokenizers')
return self._encodings[batch_index].word_ids
def token_to_sequence(self, batch_or_token_index: int, token_index: Optional[int]=None) -> int:
if (not self._encodings):
raise ValueError('token_to_sequence() is not available when using Python based tokenizers')
if (token_index is not None):
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
if (batch_index < 0):
batch_index = (self._batch_size + batch_index)
if (token_index < 0):
token_index = (self._seq_len + token_index)
return self._encodings[batch_index].token_to_sequence(token_index)
def token_to_word(self, batch_or_token_index: int, token_index: Optional[int]=None) -> int:
if (not self._encodings):
raise ValueError('token_to_word() is not available when using Python based tokenizers')
if (token_index is not None):
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
if (batch_index < 0):
batch_index = (self._batch_size + batch_index)
if (token_index < 0):
token_index = (self._seq_len + token_index)
return self._encodings[batch_index].token_to_word(token_index)
def word_to_tokens(self, batch_or_word_index: int, word_index: Optional[int]=None, sequence_index: int=0) -> Optional[TokenSpan]:
if (not self._encodings):
raise ValueError('word_to_tokens() is not available when using Python based tokenizers')
if (word_index is not None):
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
if (batch_index < 0):
batch_index = (self._batch_size + batch_index)
if (word_index < 0):
word_index = (self._seq_len + word_index)
span = self._encodings[batch_index].word_to_tokens(word_index, sequence_index)
return (TokenSpan(*span) if (span is not None) else None)
def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int]=None) -> CharSpan:
if (not self._encodings):
raise ValueError('token_to_chars() is not available when using Python based tokenizers')
if (token_index is not None):
batch_index = batch_or_token_index
else:
batch_index = 0
token_index = batch_or_token_index
return CharSpan(*self._encodings[batch_index].token_to_chars(token_index))
def char_to_token(self, batch_or_char_index: int, char_index: Optional[int]=None, sequence_index: int=0) -> int:
if (not self._encodings):
raise ValueError('char_to_token() is not available when using Python based tokenizers')
if (char_index is not None):
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_token(char_index, sequence_index)
def word_to_chars(self, batch_or_word_index: int, word_index: Optional[int]=None, sequence_index: int=0) -> CharSpan:
if (not self._encodings):
raise ValueError('word_to_chars() is not available when using Python based tokenizers')
if (word_index is not None):
batch_index = batch_or_word_index
else:
batch_index = 0
word_index = batch_or_word_index
return CharSpan(*self._encodings[batch_index].word_to_chars(word_index, sequence_index))
def char_to_word(self, batch_or_char_index: int, char_index: Optional[int]=None, sequence_index: int=0) -> int:
if (not self._encodings):
raise ValueError('char_to_word() is not available when using Python based tokenizers')
if (char_index is not None):
batch_index = batch_or_char_index
else:
batch_index = 0
char_index = batch_or_char_index
return self._encodings[batch_index].char_to_word(char_index, sequence_index)
def convert_to_tensors(self, tensor_type: Optional[Union[(str, TensorType)]]=None, prepend_batch_axis: bool=False):
if (tensor_type is None):
return self
if (not isinstance(tensor_type, TensorType)):
tensor_type = TensorType(tensor_type)
if (tensor_type == TensorType.TENSORFLOW):
if (not is_tf_available()):
raise ImportError('Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.')
import tensorflow as tf
as_tensor = tf.constant
is_tensor = tf.is_tensor
elif (tensor_type == TensorType.PYTORCH):
if (not is_torch_available()):
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.')
import torch
as_tensor = torch.tensor
is_tensor = torch.is_tensor
elif (tensor_type == TensorType.JAX):
if (not is_flax_available()):
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.')
import jax.numpy as jnp
as_tensor = jnp.array
is_tensor = _is_jax
else:
as_tensor = np.asarray
is_tensor = _is_numpy
for (key, value) in self.items():
try:
if prepend_batch_axis:
value = [value]
if (not is_tensor(value)):
tensor = as_tensor(value)
self[key] = tensor
except:
if (key == 'overflowing_tokens'):
raise ValueError('Unable to create tensor returning overflowing tokens of different lengths. Please see if a fast version of this tokenizer is available to have this feature available.')
raise ValueError("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' 'truncation=True' to have batched tensors with the same length.")
return self
_required
def to(self, device: Union[(str, 'torch.device')]) -> 'BatchEncoding':
if (isinstance(device, str) or _is_torch_device(device) or isinstance(device, int)):
self.data = {k: v.to(device=device) for (k, v) in self.data.items()}
else:
logger.warning(f'Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.')
return self |
def substitute_variables(quadratic_program: QuadraticProgram, constants: Optional[Dict[(Union[(str, int)], float)]]=None, variables: Optional[Dict[(Union[(str, int)], Tuple[(Union[(str, int)], float)])]]=None) -> QuadraticProgram:
subs = {}
if constants:
for (i, v) in constants.items():
i_2 = quadratic_program.get_variable(i).name
if (i_2 in subs):
raise QiskitOptimizationError(f'Cannot substitute the same variable twice: {i} <- {v}')
subs[i_2] = SubstitutionExpression(const=v)
if variables:
for (i, (j, v)) in variables.items():
if (v == 0):
raise QiskitOptimizationError(f'coefficient must be non-zero: {i} {j} {v}')
i_2 = quadratic_program.get_variable(i).name
j_2 = quadratic_program.get_variable(j).name
if (i_2 == j_2):
raise QiskitOptimizationError(f'Cannot substitute the same variable: {i} <- {j} {v}')
if (i_2 in subs):
raise QiskitOptimizationError(f'Cannot substitute the same variable twice: {i} <- {j} {v}')
if (j_2 in subs):
raise QiskitOptimizationError(f'Cannot substitute by variable that gets substituted itself: {i} <- {j} {v}')
subs[i_2] = SubstitutionExpression(variable=j_2, coeff=v)
return _SubstituteVariables().substitute_variables(quadratic_program, subs) |
def check_bitdepth_rescale(palette, bitdepth, transparent, alpha, greyscale):
if palette:
if (len(bitdepth) != 1):
raise ProtocolError('with palette, only a single bitdepth may be used')
(bitdepth,) = bitdepth
if (bitdepth not in (1, 2, 4, 8)):
raise ProtocolError('with palette, bitdepth must be 1, 2, 4, or 8')
if (transparent is not None):
raise ProtocolError('transparent and palette not compatible')
if alpha:
raise ProtocolError('alpha and palette not compatible')
if greyscale:
raise ProtocolError('greyscale and palette not compatible')
return (bitdepth, None)
if (greyscale and (not alpha)):
(bitdepth,) = bitdepth
if (bitdepth in (1, 2, 4, 8, 16)):
return (bitdepth, None)
if (bitdepth > 8):
targetbitdepth = 16
elif (bitdepth == 3):
targetbitdepth = 4
else:
assert (bitdepth in (5, 6, 7))
targetbitdepth = 8
return (targetbitdepth, [(bitdepth, targetbitdepth)])
assert (alpha or (not greyscale))
depth_set = tuple(set(bitdepth))
if (depth_set in [(8,), (16,)]):
(bitdepth,) = depth_set
return (bitdepth, None)
targetbitdepth = (8, 16)[(max(bitdepth) > 8)]
return (targetbitdepth, [(b, targetbitdepth) for b in bitdepth]) |
class ContrastiveLoss(nn.Module):
def __init__(self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float=0.5, size_average: bool=True):
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def forward(self, sentence_features: Iterable[Dict[(str, Tensor)]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
assert (len(reps) == 2)
(rep_anchor, rep_other) = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = (0.5 * ((labels.float() * distances.pow(2)) + ((1 - labels).float() * F.relu((self.margin - distances)).pow(2))))
return (losses.mean() if self.size_average else losses.sum()) |
class Story(NameSlugModel, ContentManageable):
company_name = models.CharField(max_length=500)
company_url = models.URLField(verbose_name='Company URL')
company = models.ForeignKey(Company, related_name='success_stories', blank=True, null=True, on_delete=models.CASCADE)
category = models.ForeignKey(StoryCategory, related_name='success_stories', on_delete=models.CASCADE)
author = models.CharField(max_length=500, help_text='Author of the content')
author_email = models.EmailField(max_length=100, blank=True, null=True)
pull_quote = models.TextField()
content = MarkupField(default_markup_type=DEFAULT_MARKUP_TYPE)
is_published = models.BooleanField(default=False, db_index=True)
featured = models.BooleanField(default=False, help_text='Set to use story in the supernav')
image = models.ImageField(upload_to='successstories', blank=True, null=True)
submitted_by = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.SET_NULL)
objects = StoryManager()
class Meta():
ordering = ('-created',)
verbose_name = 'story'
verbose_name_plural = 'stories'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('success_story_detail', kwargs={'slug': self.slug})
def get_admin_url(self):
return reverse('admin:successstories_story_change', args=(self.id,))
def get_company_name(self):
if self.company:
return self.company.name
else:
return self.company_name
def get_company_url(self):
if self.company:
return self.company.url
else:
return self.company_url |
def save_json_predictions(opts, cost, sample_idx, k_low, features, cls_list, cls_names, img_ids):
num_classes = len(cls_list)
json_predictions = {}
for cls in range(num_classes):
suffix = 'sample{}_k{}'.format((sample_idx + 1), k_low)
model_file = svm_helper.get_low_shot_output_file(opts, cls, cost, suffix)
with open(model_file, 'rb') as fopen:
if six.PY2:
model = pickle.load(fopen)
else:
model = pickle.load(fopen, encoding='latin1')
prediction = model.decision_function(features)
cls_name = cls_names[cls]
for idx in range(len(prediction)):
img_id = img_ids[idx]
if (img_id in json_predictions):
json_predictions[img_id][cls_name] = prediction[idx]
else:
out_lbl = {}
out_lbl[cls_name] = prediction[idx]
json_predictions[img_id] = out_lbl
output_file = os.path.join(opts.output_path, 'test_{}_json_preds.json'.format(suffix))
with open(output_file, 'w') as fp:
json.dump(json_predictions, fp) |
def rxn_template(rxn_smiles, templates):
rxn_parts = split_rxn_parts(rxn_smiles)
(reactants, agents, products) = (rxn_parts[0], rxn_parts[1], rxn_parts[2])
temp_match = None
for t in templates:
agents_match = None
products_match = None
reactants_match = True
for r in reactants:
if (not t.IsMoleculeReactant(r)):
reactants_match = False
if reactants_match:
agents_match = True
for a in agents:
if (not t.IsMoleculeAgent(a)):
agents_match = False
if agents_match:
products_match = True
for p in products:
if (not t.IsMoleculeProduct(p)):
products_match = False
if products_match:
temp_match = t
if (not temp_match):
return temp_match
return templates[temp_match] |
def orders_to_selection(orders, pad=1.0):
selection = []
nslc_to_deltat = {}
for order in sorted(orders, key=orders_sort_key):
selection.append((order.codes.nslc + (order.tmin, order.tmax)))
nslc_to_deltat[order.codes.nslc] = order.deltat
selection = combine_selections(selection)
selection_padded = []
for (net, sta, loc, cha, tmin, tmax) in selection:
deltat = nslc_to_deltat[(net, sta, loc, cha)]
selection_padded.append((net, sta, loc, cha, (tmin - (pad * deltat)), (tmax + (pad * deltat))))
return selection_padded |
class Scenario(ScenarioGenerator):
def __init__(self):
super().__init__()
def road(self, **kwargs):
planview = xodr.PlanView()
planview.add_fixed_geometry(xodr.Line(100), 0, 0, 0)
planview.add_fixed_geometry(xodr.Arc(0.01, length=100), 100, 0, 0)
lanes = xodr.Lanes()
lanesection1 = xodr.LaneSection(0, xodr.standard_lane())
lanesection1.add_left_lane(xodr.standard_lane(rm=xodr.STD_ROADMARK_SOLID))
lanesection1.add_right_lane(xodr.standard_lane(rm=xodr.STD_ROADMARK_SOLID))
lanes.add_lanesection(lanesection1)
road1 = xodr.Road(0, planview, lanes)
road2 = xodr.create_road(xodr.Line(100), 1)
road1.add_successor(xodr.ElementType.road, 1, xodr.ContactPoint.start)
road2.add_predecessor(xodr.ElementType.road, 0, xodr.ContactPoint.end)
odr = xodr.OpenDrive('my_road')
odr.add_road(road1)
odr.add_road(road2)
odr.adjust_roads_and_lanes()
return odr |
def lazy_apply(module: torch.nn.Module, fn: Callable[([torch.nn.Module], None)]) -> torch.nn.Module:
if (not hasattr(module, '_functions_to_lazy_apply')):
module._functions_to_lazy_apply = []
if (not hasattr(module, '_lazy_apply_hook')):
module._lazy_apply_hook = module.register_forward_hook(_apply_functions_after_first_forward)
module._functions_to_lazy_apply.append(fn)
return module |
class Effect6233(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Energy Neutralizer')), 'falloffEffectiveness', src.getModifiedItemAttr('eliteBonusReconShip3'), skill='Recon Ships', **kwargs) |
def generate_summaries_or_translations(examples: List[str], out_file: str, model_name: str, batch_size: int=8, device: str=DEFAULT_DEVICE, fp16=False, task='summarization', prefix=None, **generate_kwargs) -> Dict:
fout = Path(out_file).open('w', encoding='utf-8')
model_name = str(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
if fp16:
model = model.half()
tokenizer = AutoTokenizer.from_pretrained(model_name)
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}')
start_time = time.time()
use_task_specific_params(model, task)
if (prefix is None):
prefix = (prefix or getattr(model.config, 'prefix', '') or '')
for examples_chunk in tqdm(list(chunks(examples, batch_size))):
examples_chunk = [(prefix + text) for text in examples_chunk]
batch = tokenizer(examples_chunk, return_tensors='pt', truncation=True, padding='longest').to(device)
summaries = model.generate(input_ids=batch.input_ids, attention_mask=batch.attention_mask, **generate_kwargs)
dec = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False)
for hypothesis in dec:
fout.write((hypothesis + '\n'))
fout.flush()
fout.close()
runtime = int((time.time() - start_time))
n_obs = len(examples)
return {'n_obs': n_obs, 'runtime': runtime, 'seconds_per_sample': round((runtime / n_obs), 4)} |
class ProjectMergeRequestApprovalRule(SaveMixin, ObjectDeleteMixin, RESTObject):
_repr_attr = 'name'
id: int
approval_rule_id: int
merge_request_iid: int
_
def save(self, **kwargs: Any) -> None:
self.approval_rule_id = self.id
self.merge_request_iid = self._parent_attrs['mr_iid']
self.id = self._parent_attrs['project_id']
SaveMixin.save(self, **kwargs) |
_rewriter([GenGammaRV])
def generalized_gamma_from_gamma(fgraph, node):
(*other_inputs, alpha, p, lambd) = node.inputs
(next_rng, g) = _gamma.make_node(*other_inputs, (alpha / p), ones_like(lambd)).outputs
g = ((g ** reciprocal(p)) * lambd)
return [next_rng, cast(g, dtype=node.default_output().dtype)] |
def test_classify():
res_dir = (this_dir / 'scratch_classify')
result_paths = list(res_dir.glob('**/{}'.format(cfg.results_file_name)))
for (rr, rpath) in enumerate(result_paths):
full_results = load_results(rpath)
print('testing results from {}'.format(rpath))
num_datasets = len(full_results['user_options']['user_feature_paths'])
num_targets = len(full_results['_target_set'])
clf_res = ClassifyCVResults(path=rpath)
_test_shape_finiteness(clf_res, full_results['num_rep_cv'], num_datasets)
for (mt_full, mt_short) in zip(('balanced_accuracy_score', 'area_under_roc'), ('balanced accuracy', 'AUC')):
if ((num_targets > 2) and ('AUC' in mt_short)):
continue
if (mt_full not in clf_res.metric_set):
print('metric {} does not seem to exist in {}'.format(mt_full, rpath))
continue
(arr, ds_ids) = clf_res.to_array(mt_full)
if ((arr.min() < 0.0) or (arr.max() > 1.0)):
raise ValueError('some estimates of {} are out of bounds i.e. <0.0 or >1.0'.format(mt_short)) |
class HistoricalRestrictions(Restrictions):
def __init__(self, restrictions):
self._restrictions_by_asset = {asset: sorted(restrictions_for_asset, key=(lambda x: x.effective_date)) for (asset, restrictions_for_asset) in iteritems(groupby((lambda x: x.asset), restrictions))}
def is_restricted(self, assets, dt):
if isinstance(assets, Asset):
return self._is_restricted_for_asset(assets, dt)
is_restricted = partial(self._is_restricted_for_asset, dt=dt)
return pd.Series(index=pd.Index(assets), data=vectorize(is_restricted, otypes=[bool])(assets))
def _is_restricted_for_asset(self, asset, dt):
state = RESTRICTION_STATES.ALLOWED
for r in self._restrictions_by_asset.get(asset, ()):
if (r.effective_date > dt):
break
state = r.state
return (state == RESTRICTION_STATES.FROZEN) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.