code stringlengths 281 23.7M |
|---|
def test_topics(gl, gitlab_version):
assert (not gl.topics.list())
create_dict = {'name': 'my-topic', 'description': 'My Topic'}
if (gitlab_version.major >= 15):
create_dict['title'] = 'my topic title'
topic = gl.topics.create(create_dict)
assert (topic.name == 'my-topic')
if (gitlab_version.major >= 15):
assert (topic.title == 'my topic title')
assert gl.topics.list()
topic.description = 'My Updated Topic'
topic.save()
updated_topic = gl.topics.get(topic.id)
assert (updated_topic.description == topic.description)
create_dict = {'name': 'my-second-topic', 'description': 'My Second Topic'}
if (gitlab_version.major >= 15):
create_dict['title'] = 'my second topic title'
topic2 = gl.topics.create(create_dict)
merged_topic = gl.topics.merge(topic.id, topic2.id)
assert (merged_topic['id'] == topic2.id)
topic2.delete()
assert (not gl.topics.list()) |
class SubtypeVisitor(RTypeVisitor[bool]):
def __init__(self, right: RType) -> None:
self.right = right
def visit_rinstance(self, left: RInstance) -> bool:
return (isinstance(self.right, RInstance) and (self.right.class_ir in left.class_ir.mro))
def visit_runion(self, left: RUnion) -> bool:
return all((is_subtype(item, self.right) for item in left.items))
def visit_rprimitive(self, left: RPrimitive) -> bool:
right = self.right
if is_bool_rprimitive(left):
if (is_tagged(right) or is_fixed_width_rtype(right)):
return True
elif is_bit_rprimitive(left):
if (is_bool_rprimitive(right) or is_tagged(right) or is_fixed_width_rtype(right)):
return True
elif is_short_int_rprimitive(left):
if is_int_rprimitive(right):
return True
elif is_fixed_width_rtype(left):
if is_int_rprimitive(right):
return True
return (left is right)
def visit_rtuple(self, left: RTuple) -> bool:
if is_tuple_rprimitive(self.right):
return True
if isinstance(self.right, RTuple):
return ((len(self.right.types) == len(left.types)) and all((is_subtype(t1, t2) for (t1, t2) in zip(left.types, self.right.types))))
return False
def visit_rstruct(self, left: RStruct) -> bool:
return (isinstance(self.right, RStruct) and (self.right.name == left.name))
def visit_rarray(self, left: RArray) -> bool:
return (left == self.right)
def visit_rvoid(self, left: RVoid) -> bool:
return isinstance(self.right, RVoid) |
class ValidateTest(TestCase):
def test_validate_does_not_mutate_schema_adding_nullable_key(self):
schema = {'type': 'object', 'properties': {'email': {'type': 'string'}, 'enabled': {'type': 'boolean'}}, 'example': {'enabled': False, 'email': ''}}
validate({'email': ''}, schema)
self.assertTrue(('nullable' not in schema['properties']['email'].keys())) |
def _generate_indices(left_index: np.ndarray, right_index: np.ndarray, conditions: list[tuple[(pd.Series, pd.Series, str)]]) -> tuple:
for condition in conditions:
(left, right, op) = condition
left = left._values[left_index]
right = right._values[right_index]
op = operator_map[op]
mask = op(left, right)
if (not mask.any()):
return None
if is_extension_array_dtype(mask):
mask = mask.to_numpy(dtype=bool, na_value=False)
if (not mask.all()):
left_index = left_index[mask]
right_index = right_index[mask]
return (left_index, right_index) |
class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):
last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None |
class DiracConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(DiracConv, self).__init__()
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=True)
def forward(self, x):
x = self.activ(x)
x = self.conv(x)
return x |
(name='module', params=[DataclassModule(dataclass=attr.dataclass, fields=attr.fields, field=attr.ib), DataclassModule(dataclass=dataclasses.dataclass, fields=dataclasses.fields, field=dataclasses.field)], ids=['attrs', 'dataclasses'])
def dataclass_param(request: _pytest.fixtures.SubRequest) -> DataclassModule:
module = t.cast(DataclassModule, request.param)
return module |
class InstanceL2Norm(nn.Module):
def __init__(self, size_average=True, eps=1e-05, scale=1.0):
super().__init__()
self.size_average = size_average
self.eps = eps
self.scale = scale
def forward(self, input):
if self.size_average:
return (input * (self.scale * (((input.shape[1] * input.shape[2]) * input.shape[3]) / (torch.sum((input * input).view(input.shape[0], 1, 1, (- 1)), dim=3, keepdim=True) + self.eps)).sqrt()))
else:
return (input * (self.scale / (torch.sum((input * input).view(input.shape[0], 1, 1, (- 1)), dim=3, keepdim=True) + self.eps).sqrt())) |
class ReductionCell0(nn.Module):
def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):
super(ReductionCell0, self).__init__()
self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type)
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type)
self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type)
self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type)
self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type)
self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type)
self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type)
self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type)
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = (x_comb_iter_0_left + x_comb_iter_0_right)
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = (x_comb_iter_1_left + x_comb_iter_1_right)
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_left)
x_comb_iter_2 = (x_comb_iter_2_left + x_comb_iter_2_right)
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = (x_comb_iter_3_right + x_comb_iter_1)
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
x_comb_iter_4 = (x_comb_iter_4_left + x_comb_iter_4_right)
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out |
class Unet(SegmentationModel):
def __init__(self, encoder_name: str='resnet34', encoder_depth: int=5, encoder_weights: Optional[str]='imagenet', decoder_use_batchnorm: bool=True, decoder_channels: List[int]=(256, 128, 64, 32, 16), decoder_attention_type: Optional[str]=None, in_channels: int=3, classes: int=1, activation: Optional[Union[(str, callable)]]=None, aux_params: Optional[dict]=None):
super().__init__()
self.encoder = get_encoder(encoder_name, in_channels=in_channels, depth=encoder_depth, weights=encoder_weights)
self.decoder = UnetDecoder(encoder_channels=self.encoder.out_channels, decoder_channels=decoder_channels, n_blocks=encoder_depth, use_batchnorm=decoder_use_batchnorm, center=(True if encoder_name.startswith('vgg') else False), attention_type=decoder_attention_type)
self.segmentation_head = SegmentationHead(in_channels=decoder_channels[(- 1)], out_channels=classes, activation=activation, kernel_size=3)
if (aux_params is not None):
self.classification_head = ClassificationHead(in_channels=self.encoder.out_channels[(- 1)], **aux_params)
else:
self.classification_head = None
self.name = 'u-{}'.format(encoder_name)
self.initialize() |
class DataloaderSkipNoneWrapper(DataloaderWrapper):
def __init__(self, dataloader: Iterable) -> None:
super().__init__(dataloader)
def __iter__(self) -> Iterator[Any]:
self._iter = iter(self.dataloader)
return self
def __next__(self) -> Any:
next_batch = None
while (next_batch is None):
next_batch = next(self._iter)
return next_batch |
_info
def Hawkeye_file(filename):
res = Manager().list([])
p = Pool(30)
q = Manager().Queue()
try:
f = open(filename, 'r')
urls = f.readlines()
f.close()
print('~:{}'.format(len(urls)))
if urls:
for i in urls:
p.apply_async(Get_tile_file, args=(urlcheck(i.replace('\n', '')), res, q))
p.close()
p.join()
return res
else:
print(',w()w')
except:
print(' w()w') |
class lazy_dict(Dict[(_K, _V)]):
def __init__(self, d, f_value):
dict.__init__(self, d)
self.f_value = f_value
def __missing__(self, key: _K) -> _V:
value = self.f_value(key)
self[key] = value
return value
def __repr__(self):
return '{}({}, f_value={!r})'.format(type(self).__qualname__, dict.__repr__(self), self.f_value)
def _repr_pretty_(self, p, cycle):
(p_open, p_close) = ((type(self).__qualname__ + '('), ')')
with p.group(len(p_open), p_open, p_close):
p.type_pprinters[dict](self, p, cycle)
p.text(',')
p.breakable()
p.text('f_value={}'.format(self.f_value)) |
def test_mouse_release_event_when_rotate_action_zero(view, item):
view.scene.addItem(item)
event = MagicMock()
event.scenePos.return_value = QtCore.QPointF(15, 25)
item.rotate_active = True
item.rotate_orig_degrees = 0
item.rotate_start_angle = (- 45)
item.event_anchor = QtCore.QPointF(10, 20)
view.scene.undo_stack = MagicMock(push=MagicMock())
item.mouseReleaseEvent(event)
view.scene.undo_stack.push.assert_not_called()
assert (item.rotate_active is False)
event.accept.assert_called_once_with() |
class CGBlock(dict):
def __init__(self, fid=None, pointer=None):
if (fid is not None):
self.read_cg(fid, pointer)
def read_cg(self, fid, pointer):
fid.seek(pointer)
self['pointer'] = pointer
(self['id'], self['reserved'], self['length'], self['link_count'], self['cg_cg_next'], self['cg_cn_first'], self['cg_tx_acq_name'], self['cg_si_acq_source'], self['cg_sr_first'], self['cg_md_comment']) = _CGStruct1.unpack(fid.read(72))
if (self['link_count'] > 6):
self['cg_cg_master'] = unpack(_LINK, fid.read(8))[0]
(self['cg_record_id'], self['cg_cycle_count'], self['cg_flags'], self['cg_path_separator'], self['cg_reserved'], self['cg_data_bytes'], self['cg_invalid_bytes']) = _CGStruct2.unpack(fid.read(32))
if self['cg_md_comment']:
self['Comment'] = CommentBlock()
self['Comment'].read_cm_cg(fid=fid, pointer=self['cg_md_comment'])
if self['cg_tx_acq_name']:
self['acq_name'] = {}
comment = CommentBlock()
comment.read_tx(fid=fid, pointer=self['cg_tx_acq_name'])
self['acq_name'].update(comment)
if self['cg_si_acq_source']:
self['acq_source'] = {}
si = SIBlock()
si.read_si(fid=fid, pointer=self['cg_si_acq_source'])
self['acq_source'].update(si)
def write(self, fid):
if (('cg_cg_master' in self) and (self['cg_cg_master'] is not None)):
data_bytes = (b'##CG', 0, self['length'], 7, 0, self['CN'], 0, 0, 0, 0, self['cg_cg_master'], 0, self['cg_cycle_count'], 8, 0, (b'\x00' * 4), self['cg_data_bytes'], self['cg_inval_bytes'])
fid.write(pack('<4sI2Q9Q2H4s2I', *data_bytes))
else:
data_bytes = (b'##CG', 0, self['length'], 6, 0, self['CN'], 0, 0, 0, 0, 0, self['cg_cycle_count'], 0, 0, (b'\x00' * 4), self['cg_data_bytes'], self['cg_inval_bytes'])
fid.write(pack('<4sI2Q8Q2H4s2I', *data_bytes)) |
class Stem(nn.Sequential):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=4, pool='maxpool', num_rep=3, num_act=None, chs_decay=0.5, layers: LayerFn=None):
super().__init__()
assert (stride in (2, 4))
layers = (layers or LayerFn())
if isinstance(out_chs, (list, tuple)):
num_rep = len(out_chs)
stem_chs = out_chs
else:
stem_chs = [round((out_chs * (chs_decay ** i))) for i in range(num_rep)][::(- 1)]
self.stride = stride
self.feature_info = []
prev_feat = ''
stem_strides = ([2] + ([1] * (num_rep - 1)))
if ((stride == 4) and (not pool)):
stem_strides[(- 1)] = 2
num_act = (num_rep if (num_act is None) else num_act)
stem_norm_acts = (([False] * (num_rep - num_act)) + ([True] * num_act))
prev_chs = in_chs
curr_stride = 1
for (i, (ch, s, na)) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)):
layer_fn = (layers.conv_norm_act if na else create_conv2d)
conv_name = f'conv{(i + 1)}'
if ((i > 0) and (s > 1)):
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s))
prev_chs = ch
curr_stride *= s
prev_feat = conv_name
if (pool and ('max' in pool.lower())):
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module('pool', nn.MaxPool2d(3, 2, 1))
curr_stride *= 2
prev_feat = 'pool'
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
assert (curr_stride == stride) |
def is_boolean(value, arg_name, logger=None):
if (not isinstance(value, bool)):
if logger:
logger.error(f'''Invalid value for the argument '{arg_name}': {value}. Specify a boolean.
''')
else:
print(f'''ERROR: Invalid value for the argument '{arg_name}': {value}. Specify a boolean.
''')
return False
else:
return True |
class PublisherFallbackAdsView(FallbackAdsMixin, PublisherAccessMixin, UserPassesTestMixin, DetailView):
model = Flight
template_name = 'adserver/publisher/fallback-ads-list.html'
def dispatch(self, request, *args, **kwargs):
self.publisher = get_object_or_404(Publisher, slug=self.kwargs['publisher_slug'])
self.advertiser = Advertiser.objects.filter(publisher=self.publisher).first()
self.flight = Flight.objects.filter(campaign__advertiser=self.advertiser, campaign__campaign_type=PUBLISHER_HOUSE_CAMPAIGN).first()
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({'publisher': self.publisher, 'advertisement_list': self.flight.advertisements.order_by('-live', 'name')})
return context
def get_object(self, queryset=None):
if ((not self.advertiser) or (not self.flight)):
log.error('Publisher %s is not set up correctly for fallback ads.')
raise Http404
return self.flight |
class ISSampler(BatchSampler):
def __init__(self, algo, n_backtrack='all', n_is_pretrain=0, init_is=0, skip_is_itrs=False, hist_variance_penalty=0.0, max_is_ratio=0, ess_threshold=0):
self.n_backtrack = n_backtrack
self.n_is_pretrain = n_is_pretrain
self.skip_is_itrs = skip_is_itrs
self.hist_variance_penalty = hist_variance_penalty
self.max_is_ratio = max_is_ratio
self.ess_threshold = ess_threshold
self._hist = []
self._is_itr = init_is
super(ISSampler, self).__init__(algo)
def history(self):
return self._hist
def add_history(self, policy_distribution, paths):
self._hist.append((policy_distribution, paths))
def get_history_list(self, n_past='all'):
if (n_past == 'all'):
return self._hist
return self._hist[(- min(n_past, len(self._hist))):]
def obtain_samples(self, itr):
if (itr < self.n_is_pretrain):
paths = self.obtain_is_samples(itr)
return paths
if (self._is_itr and (not self.skip_is_itrs)):
paths = self.obtain_is_samples(itr)
else:
paths = super(ISSampler, self).obtain_samples(itr)
if (not self.skip_is_itrs):
self.add_history(self.algo.policy.distribution, paths)
self._is_itr = ((self._is_itr + 1) % 2)
return paths
def obtain_is_samples(self, itr):
paths = []
for (hist_policy_distribution, hist_paths) in self.get_history_list(self.n_backtrack):
h_paths = self.sample_isweighted_paths(policy=self.algo.policy, hist_policy_distribution=hist_policy_distribution, max_samples=self.algo.batch_size, max_path_length=self.algo.max_path_length, paths=hist_paths, hist_variance_penalty=self.hist_variance_penalty, max_is_ratio=self.max_is_ratio, ess_threshold=self.ess_threshold)
paths.extend(h_paths)
if (len(paths) > self.algo.batch_size):
paths = random.sample(paths, self.algo.batch_size)
if self.algo.whole_paths:
return paths
else:
paths_truncated = parallel_sampler.truncate_paths(paths, self.algo.batch_size)
return paths_truncated
def sample_isweighted_paths(self, policy, hist_policy_distribution, max_samples, max_path_length=100, paths=None, randomize_draw=False, hist_variance_penalty=0.0, max_is_ratio=10, ess_threshold=0):
if (not paths):
return []
n_paths = len(paths)
n_samples = min(len(paths), max_samples)
if randomize_draw:
samples = random.sample(paths, n_samples)
elif paths:
if (n_samples == len(paths)):
samples = paths
else:
start = random.randint(0, (len(paths) - n_samples))
samples = paths[start:(start + n_samples)]
samples = copy.deepcopy(samples)
if (ess_threshold > 0):
is_weights = []
dist1 = policy.distribution
dist2 = hist_policy_distribution
for path in samples:
(_, agent_infos) = policy.get_actions(path['observations'])
hist_agent_infos = path['agent_infos']
if (hist_variance_penalty > 0):
hist_agent_infos['log_std'] += log((1.0 + hist_variance_penalty))
path['agent_infos'] = agent_infos
loglike_p = dist1.log_likelihood(path['actions'], agent_infos)
loglike_hp = dist2.log_likelihood(path['actions'], hist_agent_infos)
is_ratio = exp((sum(loglike_p) - sum(loglike_hp)))
if (max_is_ratio > 0):
is_ratio = min(is_ratio, max_is_ratio)
if (ess_threshold > 0):
is_weights.append(is_ratio)
path['rewards'] *= is_ratio
if ess_threshold:
if (kong_ess(is_weights) < ess_threshold):
return []
return samples |
def add_model_args(parser: ArgumentParser) -> None:
parser.add_argument('--model', choices=('rf', 'gp', 'nn', 'mpn'), default='rf', help='the model type to use')
parser.add_argument('--test-batch-size', type=int, help='the size of batch of predictions during model inference. NOTE: This has nothing to do with model training/performance and might only affect the timing of the inference step. It is only useful to play with this parameter if performance is absolutely critical.')
parser.add_argument('--retrain-from-scratch', action='store_true', default=False, help='whether the model should be retrained from scratch at each iteration as opposed to retraining online.')
parser.add_argument('--model-seed', type=int, help='the random seed to use for model initialization. Not specifying will result in random model initializations each time the model is trained.')
parser.add_argument('--n-estimators', type=int, default=100, help='the number of trees in the forest')
parser.add_argument('--max-depth', nargs='?', type=int, const=None, default=8, help='the maximum depth of the tree. Not specifying this argument at all will default to 8. Adding the flag without specifying number a number will default to an unlimited depth')
parser.add_argument('--min-samples-leaf', type=int, default=1, help='the minimum number of samples required to be at a leaf node')
parser.add_argument('--gp-kernel', choices={'dotproduct'}, default='dotproduct', help='Kernel to use for Gaussian Process model')
parser.add_argument('--init-lr', type=float, default=0.0001, help='the initial learning rate for the MPNN model')
parser.add_argument('--max-lr', type=float, default=0.001, help='the maximum learning rate for the MPNN model')
parser.add_argument('--final-lr', type=float, default=0.0001, help='the final learning rate for the MPNN model')
parser.add_argument('--conf-method', default='none', choices={'ensemble', 'twooutput', 'mve', 'dropout', 'none'}, help='Confidence estimation method for NN/MPNN models')
parser.add_argument('--ddp', action='store_true', default=False, help='Whether to perform distributed MPN training over a multi-GPU setup via PyTorch DDP. Currently only works with CUDA >= 11.0')
parser.add_argument('--precision', type=int, default=32, choices=(16, 32), help='the precision to use when training PyTorch models in number of bits. Native precision is 32, but 16-bit precision can lead to lower memory footprint during training and faster training times on Volta GPUs. DO NOT use 16-bit precision on non-Volta GPUs. Currently only supported for single-GPU training (i.e., ddp=False)') |
def makeItemTree(stack, title):
topItem = QtWidgets.QTreeWidgetItem([title])
topItem.frame = None
font = topItem.font(0)
font.setWeight(font.Weight.Bold)
topItem.setFont(0, font)
items = [topItem]
for entry in stack:
if isinstance(entry, QtWidgets.QTreeWidgetItem):
item = entry
else:
(text, frame) = entry
item = QtWidgets.QTreeWidgetItem([text.rstrip()])
item.frame = frame
topItem.addChild(item)
items.append(item)
return items |
def update():
for episode in range(100):
observation = env.reset()
while True:
env.render()
action = RL.choose_action(str(observation))
(observation_, reward, done) = env.step(action)
RL.learn(str(observation), action, reward, str(observation_))
observation = observation_
if done:
break
print('game over')
env.destroy() |
class RecentFilesView(QtWidgets.QListView):
def __init__(self, parent, view, files=None):
super().__init__(parent)
self.view = view
self.files = (files or [])
self.clicked.connect(self.on_clicked)
self.setModel(RecentFilesModel(self.files))
self.setMouseTracking(True)
def on_clicked(self, index):
self.view.open_from_file(self.files[index.row()])
def update_files(self, files):
self.files = files
self.model().files = files
self.reset()
def sizeHint(self):
size = QtCore.QSize()
height = sum(((self.sizeHintForRow(i) + 2) for i in range(len(self.files))))
width = max((self.sizeHintForColumn(i) for i in range(len(self.files))))
size.setHeight(height)
size.setWidth((width + 2))
return size
def mouseMoveEvent(self, event):
index = self.indexAt(QtCore.QPoint(int(event.position().x()), int(event.position().y())))
if index.isValid():
self.setCursor(Qt.CursorShape.PointingHandCursor)
else:
self.setCursor(Qt.CursorShape.ArrowCursor)
super().mouseMoveEvent(event) |
class Post(ContentManageable):
title = models.CharField(max_length=200, blank=True, null=True)
content = MarkupField(default_markup_type=DEFAULT_MARKUP_TYPE)
abstract = models.TextField(blank=True, null=True)
MEDIA_TEXT = 1
MEDIA_PHOTO = 2
MEDIA_VIDEO = 3
MEDIA_LINK = 4
MEDIA_CHOICES = ((MEDIA_TEXT, 'text'), (MEDIA_PHOTO, 'photo'), (MEDIA_VIDEO, 'video'), (MEDIA_LINK, 'link'))
media_type = models.IntegerField(choices=MEDIA_CHOICES, default=MEDIA_TEXT)
source_url = models.URLField(max_length=1000, blank=True)
meta = JSONField(blank=True, default=dict)
STATUS_PRIVATE = 1
STATUS_PUBLIC = 2
STATUS_CHOICES = ((STATUS_PRIVATE, 'private'), (STATUS_PUBLIC, 'public'))
status = models.IntegerField(choices=STATUS_CHOICES, default=STATUS_PRIVATE, db_index=True)
objects = PostQuerySet.as_manager()
class Meta():
verbose_name = _('post')
verbose_name_plural = _('posts')
get_latest_by = 'created'
ordering = ['-created']
def __str__(self):
return f'Post {self.get_media_type_display()} ({self.pk})'
def get_absolute_url(self):
return reverse('community:post_detail', kwargs={'pk': self.pk}) |
class DrawMav():
def __init__(self, state, window):
(self.mav_points, self.mav_meshColors) = self.get_points()
mav_position = np.array([[state.north], [state.east], [(- state.altitude)]])
R = Euler2Rotation(state.phi, state.theta, state.psi)
rotated_points = self.rotate_points(self.mav_points, R)
translated_points = self.translate_points(rotated_points, mav_position)
R = np.array([[0, 1, 0], [1, 0, 0], [0, 0, (- 1)]])
translated_points = (R translated_points)
mesh = self.points_to_mesh(translated_points)
self.mav_body = gl.GLMeshItem(vertexes=mesh, vertexColors=self.mav_meshColors, drawEdges=True, smooth=False, computeNormals=False)
window.addItem(self.mav_body)
def update(self, state):
mav_position = np.array([[state.north], [state.east], [(- state.altitude)]])
R = Euler2Rotation(state.phi, state.theta, state.psi)
rotated_points = self.rotate_points(self.mav_points, R)
translated_points = self.translate_points(rotated_points, mav_position)
R = np.array([[0, 1, 0], [1, 0, 0], [0, 0, (- 1)]])
translated_points = (R translated_points)
mesh = self.points_to_mesh(translated_points)
self.mav_body.setMeshData(vertexes=mesh, vertexColors=self.mav_meshColors)
def rotate_points(self, points, R):
rotated_points = (R points)
return rotated_points
def translate_points(self, points, translation):
translated_points = (points + np.dot(translation, np.ones([1, points.shape[1]])))
return translated_points
def get_points(self):
points = np.array([[0, 0, 0], [1, 1, 1], [1, 1, 0]]).T
scale = 50
points = (scale * points)
red = np.array([1.0, 0.0, 0.0, 1])
green = np.array([0.0, 1.0, 0.0, 1])
blue = np.array([0.0, 0.0, 1.0, 1])
yellow = np.array([1.0, 1.0, 0.0, 1])
meshColors = np.empty((13, 3, 4), dtype=np.float32)
meshColors[0] = yellow
return (points, meshColors)
def points_to_mesh(self, points):
points = points.T
mesh = np.array([[points[0], points[1], points[2]]])
return mesh |
(name='print')
('tab', value=cmdutils.Value.count_tab)
('pdf', flag='f', metavar='file')
def printpage(tab: Optional[apitypes.Tab], preview: bool=False, *, pdf: Optional[pathlib.Path]=None) -> None:
if (tab is None):
return
try:
if preview:
_print_preview(tab)
elif pdf:
_print_pdf(tab, pdf)
else:
tab.printing.show_dialog()
except apitypes.WebTabError as e:
raise cmdutils.CommandError(e) |
class ChannelStateWaiter():
raiden: 'RaidenService'
retry_timeout: float
token_network_registry_address: TokenNetworkRegistryAddress
token_address: TokenAddress
partner_address: Address
def _get_channel_state(self, chain_state: ChainState) -> Optional[NettingChannelState]:
return _get_channel_state_by_partner_address(chain_state, self.token_network_registry_address, self.token_address, self.partner_address)
def wait(self, condition: ChannelStateCondition) -> None:
chain_state = views.state_from_raiden(self.raiden)
while (not condition(chain_state, self._get_channel_state(chain_state))):
assert self.raiden.is_running(), ALARM_TASK_ERROR_MSG
assert self.raiden.alarm.is_running(), ALARM_TASK_ERROR_MSG
log.debug('Waiting on channel', node=to_checksum_address(self.raiden.address), partner_address=to_checksum_address(self.partner_address), condition=condition)
gevent.sleep(self.retry_timeout)
chain_state = views.state_from_raiden(self.raiden) |
def test(env, pg_reinforce, n=50):
reward_list = []
dialogLen_list = []
success_list = []
for i_test in tqdm(range(n)):
assert (len(pg_reinforce.reward_buffer) == 0)
(cur_reward, cur_dialogLen, cur_success) = run_one_dialog(env, pg_reinforce)
assert (cur_success is not None)
reward_list.append(cur_reward)
dialogLen_list.append(cur_dialogLen)
success_list.append(int(cur_success))
return (reward_list, dialogLen_list, success_list) |
def _create_splits(features_paths: List[Path], labels_dir_dict: Dict[(Task, Path)], splits_path: Path) -> None:
labels_path_dicts = _create_labels_path_dicts(features_paths, labels_dir_dict)
labels_path_exists = [_any_labels_exist(labels_path_dict) for labels_path_dict in labels_path_dicts]
all_names = [name_from_path(features_path) for features_path in features_paths]
unlabeled_split = [name for (name, labels_path_exists) in zip(all_names, labels_path_exists) if (not labels_path_exists)]
labeled_names = [name for (name, labels_path_exists) in zip(all_names, labels_path_exists) if labels_path_exists]
n_labeled = len(labeled_names)
random.seed()
shuffled_labeled_names = random.sample(labeled_names, n_labeled)
n_train = round((FRACTION_TRAIN * n_labeled))
n_validation = round((FRACTION_VALIDATION * n_labeled))
validation_end = (n_train + n_validation)
train_split = sorted(shuffled_labeled_names[:n_train])
validation_split = sorted(shuffled_labeled_names[n_train:validation_end])
test_split = sorted(shuffled_labeled_names[validation_end:])
splits = {SPLIT_KEY_TRAIN: train_split, SPLIT_KEY_VALIDATION: validation_split, SPLIT_KEY_TEST: test_split, SPLIT_KEY_UNLABELED: unlabeled_split}
_write_splits(splits_path, splits) |
class ServerLoggingFormatter(logging.Formatter):
converter = time.gmtime
def format(self, record):
if flask.has_request_context():
who = flask.request.remote_addr
is_socketio = hasattr(flask.request, 'sid')
where = flask.request.url
if is_socketio:
record.context = 'SocketIO'
user = getattr(flask.request, 'current_user', None)
if (user is not None):
who = user.name
where = getattr(flask.request, 'message', where)
else:
record.context = 'Flask'
record.who = who
record.where = where
else:
record.who = None
record.where = None
record.context = 'Free'
return super().format(record) |
def upgrade(saveddata_engine):
try:
saveddata_engine.execute('SELECT defaultChar, chars FROM characters LIMIT 1')
except sqlalchemy.exc.DatabaseError:
saveddata_engine.execute('ALTER TABLE characters ADD COLUMN defaultChar INTEGER')
saveddata_engine.execute('ALTER TABLE characters ADD COLUMN chars VARCHAR')
try:
saveddata_engine.execute('SELECT booster FROM fits LIMIT 1')
except sqlalchemy.exc.DatabaseError:
saveddata_engine.execute('ALTER TABLE fits ADD COLUMN booster BOOLEAN') |
def run_pip(venv_dir, *args, quiet=False, **kwargs):
args = list(args)
if quiet:
args.insert(1, '-q')
arg_str = ' '.join((str(arg) for arg in args))
utils.print_col('venv$ pip {}'.format(arg_str), 'blue')
venv_python = get_venv_python(venv_dir)
return subprocess.run(([venv_python, '-m', 'pip'] + args), check=True, **kwargs) |
.timeout(60)
def test_upload_collection_generators(local_client, remote_client):
records = generate_fixtures(UPLOAD_NUM_VECTORS)
vectors = []
payload = []
for record in records:
vectors.append(record.vector)
payload.append(record.payload)
payload = itertools.cycle(payload)
local_client.upload_collection(COLLECTION_NAME, vectors, payload, ids=itertools.count())
remote_client.upload_collection(COLLECTION_NAME, vectors, payload, ids=itertools.count())
compare_collections(local_client, remote_client, UPLOAD_NUM_VECTORS) |
class ElectronicStructureResult(EigenstateResult):
def hartree_fock_energy(self) -> float:
return self.get('hartree_fock_energy')
_fock_energy.setter
def hartree_fock_energy(self, value: float) -> None:
self.data['hartree_fock_energy'] = value
def nuclear_repulsion_energy(self) -> Optional[float]:
return self.get('nuclear_repulsion_energy')
_repulsion_energy.setter
def nuclear_repulsion_energy(self, value: float) -> None:
self.data['nuclear_repulsion_energy'] = value
def nuclear_dipole_moment(self) -> Optional[DipoleTuple]:
return self.get('nuclear_dipole_moment')
_dipole_moment.setter
def nuclear_dipole_moment(self, value: DipoleTuple) -> None:
self.data['nuclear_dipole_moment'] = value
def total_energies(self) -> np.ndarray:
nre = (self.nuclear_repulsion_energy if (self.nuclear_repulsion_energy is not None) else 0)
return (self.electronic_energies + nre)
def electronic_energies(self) -> np.ndarray:
return ((self.computed_energies + self.ph_extracted_energy) + self.frozen_extracted_energy)
def computed_energies(self) -> np.ndarray:
return self.get('computed_energies')
_energies.setter
def computed_energies(self, value: np.ndarray) -> None:
self.data['computed_energies'] = value
def ph_extracted_energy(self) -> float:
return self.get('ph_extracted_energy')
_extracted_energy.setter
def ph_extracted_energy(self, value: float) -> None:
self.data['ph_extracted_energy'] = value
def frozen_extracted_energy(self) -> float:
return self.get('frozen_extracted_energy')
_extracted_energy.setter
def frozen_extracted_energy(self, value: float) -> None:
self.data['frozen_extracted_energy'] = value
def has_dipole(self) -> bool:
return ((self.nuclear_dipole_moment is not None) and (self.electronic_dipole_moment is not None))
def reverse_dipole_sign(self) -> bool:
return self.get('reverse_dipole_sign')
_dipole_sign.setter
def reverse_dipole_sign(self, value: bool) -> None:
self.data['reverse_dipole_sign'] = value
def total_dipole_moment(self) -> Optional[List[float]]:
if (self.dipole_moment is None):
return None
tdm: List[float] = []
for dip in self.dipole_moment:
if np.any(np.equal(list(dip), None)):
tdm.append(None)
else:
tdm.append(np.sqrt(np.sum(np.power(list(dip), 2))))
return tdm
def total_dipole_moment_in_debye(self) -> Optional[List[float]]:
tdm = self.total_dipole_moment
if (tdm is None):
return None
return [(dip / QMolecule.DEBYE) for dip in tdm]
def dipole_moment(self) -> Optional[List[DipoleTuple]]:
edm = self.electronic_dipole_moment
if self.reverse_dipole_sign:
edm = [cast(DipoleTuple, tuple(((((- 1) * x) if (x is not None) else None) for x in dip))) for dip in edm]
return [_dipole_tuple_add(dip, self.nuclear_dipole_moment) for dip in edm]
def dipole_moment_in_debye(self) -> Optional[List[DipoleTuple]]:
dipm = self.dipole_moment
if (dipm is None):
return None
dipmd = []
for dip in dipm:
dipmd0 = ((dip[0] / QMolecule.DEBYE) if (dip[0] is not None) else None)
dipmd1 = ((dip[1] / QMolecule.DEBYE) if (dip[1] is not None) else None)
dipmd2 = ((dip[2] / QMolecule.DEBYE) if (dip[2] is not None) else None)
dipmd += [(dipmd0, dipmd1, dipmd2)]
return dipmd
def electronic_dipole_moment(self) -> Optional[List[DipoleTuple]]:
return [_dipole_tuple_add(comp_dip, _dipole_tuple_add(ph_dip, frozen_dip)) for (comp_dip, ph_dip, frozen_dip) in zip(self.computed_dipole_moment, self.ph_extracted_dipole_moment, self.frozen_extracted_dipole_moment)]
def computed_dipole_moment(self) -> Optional[List[DipoleTuple]]:
return self.get('computed_dipole_moment')
_dipole_moment.setter
def computed_dipole_moment(self, value: List[DipoleTuple]) -> None:
self.data['computed_dipole_moment'] = value
def ph_extracted_dipole_moment(self) -> Optional[List[DipoleTuple]]:
return self.get('ph_extracted_dipole_moment')
_extracted_dipole_moment.setter
def ph_extracted_dipole_moment(self, value: List[DipoleTuple]) -> None:
self.data['ph_extracted_dipole_moment'] = value
def frozen_extracted_dipole_moment(self) -> Optional[List[DipoleTuple]]:
return self.get('frozen_extracted_dipole_moment')
_extracted_dipole_moment.setter
def frozen_extracted_dipole_moment(self, value: List[DipoleTuple]) -> None:
self.data['frozen_extracted_dipole_moment'] = value
def has_observables(self):
return ((self.total_angular_momentum is not None) or (self.num_particles is not None) or (self.magnetization is not None))
def total_angular_momentum(self) -> Optional[List[float]]:
return self.get('total_angular_momentum')
_angular_momentum.setter
def total_angular_momentum(self, value: List[float]) -> None:
self.data['total_angular_momentum'] = value
def spin(self) -> Optional[List[float]]:
if (self.total_angular_momentum is None):
return None
spin = []
for total_angular_momentum in self.total_angular_momentum:
spin.append((((- 1.0) + np.sqrt((1 + (4 * total_angular_momentum)))) / 2))
return spin
def num_particles(self) -> Optional[List[float]]:
return self.get('num_particles')
_particles.setter
def num_particles(self, value: List[float]) -> None:
self.data['num_particles'] = value
def magnetization(self) -> Optional[List[float]]:
return self.get('magnetization')
def magnetization(self, value: List[float]) -> None:
self.data['magnetization'] = value
def __str__(self) -> str:
return '\n'.join(self.formatted)
def formatted(self) -> List[str]:
lines = []
lines.append('=== GROUND STATE ENERGY ===')
lines.append(' ')
lines.append('* Electronic ground state energy (Hartree): {}'.format(round(self.electronic_energies[0], 12)))
lines.append(' - computed part: {}'.format(round(self.computed_energies[0], 12)))
lines.append(' - frozen energy part: {}'.format(round(self.frozen_extracted_energy, 12)))
lines.append(' - particle hole part: {}'.format(round(self.ph_extracted_energy, 12)))
if (self.nuclear_repulsion_energy is not None):
lines.append('~ Nuclear repulsion energy (Hartree): {}'.format(round(self.nuclear_repulsion_energy, 12)))
lines.append('> Total ground state energy (Hartree): {}'.format(round(self.total_energies[0], 12)))
if (len(self.computed_energies) > 1):
lines.append(' ')
lines.append('=== EXCITED STATE ENERGIES ===')
lines.append(' ')
for (idx, (elec_energy, total_energy)) in enumerate(zip(self.electronic_energies[1:], self.total_energies[1:])):
lines.append('{: 3d}: '.format((idx + 1)))
lines.append('* Electronic excited state energy (Hartree): {}'.format(round(elec_energy, 12)))
lines.append('> Total excited state energy (Hartree): {}'.format(round(total_energy, 12)))
if self.has_observables():
lines.append(' ')
lines.append('=== MEASURED OBSERVABLES ===')
lines.append(' ')
for (idx, (num_particles, spin, total_angular_momentum, magnetization)) in enumerate(zip(self.num_particles, self.spin, self.total_angular_momentum, self.magnetization)):
line = '{: 3d}: '.format(idx)
if (num_particles is not None):
line += ' # Particles: {:.3f}'.format(num_particles)
if (spin is not None):
line += ' S: {:.3f}'.format(spin)
if (total_angular_momentum is not None):
line += ' S^2: {:.3f}'.format(total_angular_momentum)
if (magnetization is not None):
line += ' M: {:.3f}'.format(magnetization)
lines.append(line)
if self.has_dipole():
lines.append(' ')
lines.append('=== DIPOLE MOMENTS ===')
lines.append(' ')
if (self.nuclear_dipole_moment is not None):
lines.append('~ Nuclear dipole moment (a.u.): {}'.format(_dipole_to_string(self.nuclear_dipole_moment)))
lines.append(' ')
for (idx, (elec_dip, comp_dip, frozen_dip, ph_dip, dip, tot_dip, dip_db, tot_dip_db)) in enumerate(zip(self.electronic_dipole_moment, self.computed_dipole_moment, self.frozen_extracted_dipole_moment, self.ph_extracted_dipole_moment, self.dipole_moment, self.total_dipole_moment, self.dipole_moment_in_debye, self.total_dipole_moment_in_debye)):
lines.append('{: 3d}: '.format(idx))
lines.append(' * Electronic dipole moment (a.u.): {}'.format(_dipole_to_string(elec_dip)))
lines.append(' - computed part: {}'.format(_dipole_to_string(comp_dip)))
lines.append(' - frozen energy part: {}'.format(_dipole_to_string(frozen_dip)))
lines.append(' - particle hole part: {}'.format(_dipole_to_string(ph_dip)))
if (self.nuclear_dipole_moment is not None):
lines.append(' > Dipole moment (a.u.): {} Total: {}'.format(_dipole_to_string(dip), _float_to_string(tot_dip)))
lines.append(' (debye): {} Total: {}'.format(_dipole_to_string(dip_db), _float_to_string(tot_dip_db)))
lines.append(' ')
return lines |
class TestMimicTPW2Reader(unittest.TestCase):
yaml_file = 'mimicTPW2_comp.yaml'
def setUp(self):
from satpy._config import config_search_paths
from satpy.readers.mimic_TPW2_nc import MimicTPW2FileHandler
self.reader_configs = config_search_paths(os.path.join('readers', self.yaml_file))
self.p = mock.patch.object(MimicTPW2FileHandler, '__bases__', (FakeNetCDF4FileHandlerMimic,))
self.fake_handler = self.p.start()
self.p.is_local = True
def tearDown(self):
self.p.stop()
def test_init(self):
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
loadables = r.select_files_from_pathnames(['comp.130000.nc'])
assert (len(loadables) == 1)
r.create_filehandlers(loadables)
assert r.file_handlers
def test_load_mimic(self):
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
with mock.patch('satpy.readers.mimic_TPW2_nc.netCDF4.Variable', xr.DataArray):
loadables = r.select_files_from_pathnames(['comp.130000.nc'])
r.create_filehandlers(loadables)
ds = r.load(['tpwGrid'])
assert (len(ds) == 1)
for d in ds.values():
assert (d.attrs['platform_shortname'] == 'aggregated microwave')
assert (d.attrs['sensor'] == 'mimic')
assert ('area' in d.attrs)
assert ('units' in d.attrs)
assert (d.attrs['area'] is not None) |
class MultiHeadAttention(nn.Module):
def __init__(self, emb_size, num_heads, dropout):
super().__init__()
self.emb_size = emb_size
self.num_heads = num_heads
self.keys = nn.Linear(emb_size, emb_size)
self.queries = nn.Linear(emb_size, emb_size)
self.values = nn.Linear(emb_size, emb_size)
self.att_drop = nn.Dropout(dropout)
self.projection = nn.Linear(emb_size, emb_size)
def forward(self, x, mask=None):
queries = rearrange(self.queries(x), 'b n (h d) -> b h n d', h=self.num_heads)
keys = rearrange(self.keys(x), 'b n (h d) -> b h n d', h=self.num_heads)
values = rearrange(self.values(x), 'b n (h d) -> b h n d', h=self.num_heads)
energy = torch.einsum('bhqd, bhkd -> bhqk', queries, keys)
if (mask is not None):
fill_value = torch.finfo(torch.float32).min
energy.mask_fill((~ mask), fill_value)
scaling = (self.emb_size ** (1 / 2))
att = F.softmax((energy / scaling), dim=(- 1))
att = self.att_drop(att)
out = torch.einsum('bhal, bhlv -> bhav ', att, values)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.projection(out)
return out |
class VolGroup_TestCase(unittest.TestCase):
def runTest(self):
data1 = FC3_VolGroupData()
data2 = FC3_VolGroupData()
self.assertEqual(data1.format, True)
self.assertEqual(data1.pesize, 32768)
self.assertEqual(data1.preexist, False)
self.assertEqual(F21_VolGroupData().pesize, 0)
self.assertEqual(data1, data2)
self.assertNotEqual(data1, None)
for atr in ['vgname']:
setattr(data1, atr, '')
setattr(data2, atr, 'test')
self.assertNotEqual(data1, data2)
self.assertNotEqual(data2, data1)
setattr(data1, atr, '')
setattr(data2, atr, '')
for attr_dash in ['reserved-space', 'reserved-percent']:
attr_under = attr_dash.replace('-', '_')
for (v1, v2) in [(1, None), (None, 2), (1, 2)]:
kwargs = {attr_dash: v1, attr_under: v2}
data = F16_VolGroupData(**kwargs)
self.assertEqual(getattr(data, attr_under), (v1 or v2)) |
class State(ViewColumn):
name = 'State'
def __init__(self, fittingView, params):
ViewColumn.__init__(self, fittingView)
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.resizable = False
self.size = 16
self.maxsize = self.size
self.mask = wx.LIST_MASK_IMAGE
def getText(self, mod):
return ''
def getToolTip(self, mod):
if (isinstance(mod, Module) and (not mod.isEmpty)):
return State_(mod.state).name.title()
def getImageId(self, stuff):
generic_active = self.fittingView.imageList.GetImageIndex(('state_%s_small' % State_.ACTIVE.name.lower()), 'gui')
generic_inactive = self.fittingView.imageList.GetImageIndex(('state_%s_small' % State_.OFFLINE.name.lower()), 'gui')
if isinstance(stuff, Drone):
if (stuff.amountActive > 0):
return generic_active
else:
return generic_inactive
elif isinstance(stuff, Rack):
return (- 1)
elif isinstance(stuff, Module):
if stuff.isEmpty:
return (- 1)
else:
return self.fittingView.imageList.GetImageIndex(('state_%s_small' % State_(stuff.state).name.lower()), 'gui')
elif isinstance(stuff, Fit):
fitID = self.mainFrame.getActiveFit()
if (self.fittingView.__class__.__name__ == 'CommandView'):
info = stuff.getCommandInfo(fitID)
else:
info = stuff.getProjectionInfo(fitID)
if (info is None):
return (- 1)
if info.active:
return generic_active
return generic_inactive
elif (isinstance(stuff, Implant) and stuff.character):
return self.fittingView.imageList.GetImageIndex(('state_%s_small' % State_.ONLINE.name.lower()), 'gui')
else:
active = getattr(stuff, 'active', None)
if (active is None):
return (- 1)
if active:
return generic_active
return generic_inactive |
def add_computed_time(t):
if (t[0] in 'now noon midnight'.split()):
t['computed_time'] = {'now': datetime.now().time().replace(microsecond=0), 'noon': time(hour=12), 'midnight': time()}[t[0]]
else:
t['HH'] = {'am': (int(t['HH']) % 12), 'pm': ((int(t['HH']) % 12) + 12)}[t.ampm]
t['computed_time'] = time(hour=t.HH, minute=t.MM, second=t.SS) |
def test_init_false(converter: BaseConverter) -> None:
class A():
a: int
b: int = field(init=False)
_c: int = field(init=False)
d: int = field(init=False, default=4)
converter.register_unstructure_hook(A, make_dict_unstructure_fn(A, converter))
a = A(1)
a.b = 2
a._c = 3
assert (converter.unstructure(a) == {'a': 1})
converter.register_structure_hook(A, make_dict_structure_fn(A, converter, _cattrs_detailed_validation=converter.detailed_validation))
structured = converter.structure({'a': 1}, A)
assert (not hasattr(structured, 'b'))
assert (not hasattr(structured, '_c'))
assert (structured.d == 4)
assert (structured.a == 1) |
def columnize(array, displaywidth=80, colsep=' ', arrange_vertical=True, ljust=True, lineprefix='', opts={}):
if (not isinstance(array, (list, tuple))):
raise TypeError('array needs to be an instance of a list or a tuple')
if (len(opts.keys()) > 0):
o = {key: get_option(key, opts) for key in default_opts}
if o['arrange_array']:
o.update({'array_prefix': '[', 'lineprefix': ' ', 'linesuffix': ',\n', 'array_suffix': ']\n', 'colsep': ', ', 'arrange_vertical': False})
else:
o = default_opts.copy()
o.update({'displaywidth': displaywidth, 'colsep': colsep, 'arrange_vertical': arrange_vertical, 'ljust': ljust, 'lineprefix': lineprefix})
if o['colfmt']:
array = [(o['colfmt'] % i) for i in array]
else:
array = [str(i) for i in array]
size = len(array)
if (0 == size):
return '<empty>\n'
elif (size == 1):
return ('%s%s%s\n' % (o['array_prefix'], str(array[0]), o['array_suffix']))
if ((o['displaywidth'] - len(o['lineprefix'])) < 4):
o['displaywidth'] = (len(o['lineprefix']) + 4)
else:
o['displaywidth'] -= len(o['lineprefix'])
o['displaywidth'] = max(4, (o['displaywidth'] - len(o['lineprefix'])))
if o['arrange_vertical']:
def array_index(nrows, row, col):
return ((nrows * col) + row)
for nrows in range(1, size):
ncols = (((size + nrows) - 1) // nrows)
colwidths = []
totwidth = (- len(o['colsep']))
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = array_index(nrows, row, col)
if (i >= size):
break
x = array[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += (colwidth + len(o['colsep']))
if (totwidth > o['displaywidth']):
break
if (totwidth <= o['displaywidth']):
break
s = ''
for row in range(nrows):
texts = []
for col in range(ncols):
i = (row + (nrows * col))
if (i >= size):
x = ''
else:
x = array[i]
texts.append(x)
while (texts and (not texts[(- 1)])):
del texts[(- 1)]
for col in range(len(texts)):
if o['ljust']:
texts[col] = texts[col].ljust(colwidths[col])
else:
texts[col] = texts[col].rjust(colwidths[col])
s += ('%s%s%s' % (o['lineprefix'], str(o['colsep'].join(texts)), o['linesuffix']))
return s
else:
def array_index(ncols, row, col):
return ((ncols * (row - 1)) + col)
colwidths = []
for ncols in range(size, 0, (- 1)):
min_rows = (((size + ncols) - 1) // ncols)
nrows = (min_rows - 1)
while (nrows < size):
nrows += 1
rounded_size = (nrows * ncols)
colwidths = []
totwidth = (- len(o['colsep']))
for col in range(ncols):
colwidth = 0
for row in range(1, (nrows + 1)):
i = array_index(ncols, row, col)
if (i >= rounded_size):
break
elif (i < size):
x = array[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += (colwidth + len(o['colsep']))
if (totwidth >= o['displaywidth']):
break
if ((totwidth <= o['displaywidth']) and (i >= (rounded_size - 1))):
nrows = row
break
elif (totwidth >= o['displaywidth']):
break
if ((totwidth <= o['displaywidth']) and (i >= (rounded_size - 1))):
break
s = ''
if (len(o['array_prefix']) != 0):
prefix = o['array_prefix']
else:
prefix = o['lineprefix']
for row in range(1, (nrows + 1)):
texts = []
for col in range(ncols):
i = array_index(ncols, row, col)
if (i >= size):
break
else:
x = array[i]
texts.append(x)
for col in range(len(texts)):
if o['ljust']:
texts[col] = texts[col].ljust(colwidths[col])
else:
texts[col] = texts[col].rjust(colwidths[col])
s += ('%s%s%s' % (prefix, str(o['colsep'].join(texts)), o['linesuffix']))
prefix = o['lineprefix']
if o['arrange_array']:
colsep = o['colsep'].rstrip()
colsep_pos = (- (len(colsep) + 1))
if (s[colsep_pos:] == (colsep + '\n')):
s = ((s[:colsep_pos] + o['array_suffix']) + '\n')
else:
s += o['array_suffix']
return s |
def test_non_unittest_no_setupclass_support(pytester: Pytester) -> None:
testpath = pytester.makepyfile('\n class TestFoo(object):\n x = 0\n\n \n def setUpClass(cls):\n cls.x = 1\n\n def test_method1(self):\n assert self.x == 0\n\n \n def tearDownClass(cls):\n cls.x = 1\n\n def test_not_teareddown():\n assert TestFoo.x == 0\n\n ')
reprec = pytester.inline_run(testpath)
reprec.assertoutcome(passed=2) |
def test_method_const_instance_attr_same_method() -> None:
node = builder.extract_node('\n class A:\n def __init__(self, x):\n if x:\n self.x = 1\n else:\n self.x = 2\n\n def set_x(self):\n self.x = 3\n\n def get_x(self):\n self.x = 4\n return self.x\n\n A().get_x() #\n ')
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert (len(inferred) == 4)
assert all((isinstance(node, nodes.Const) for node in inferred))
assert ({node.value for node in inferred} == {1, 2, 3, 4}) |
def test_adr_invalid_and_night(sam_data):
inverters = sam_data['adrinverter']
testinv = 'Zigor__Sunzet_3_TL_US_240V__CEC_2011_'
vdcs = np.array([39.873036, 0.0, np.nan, 420])
pdcs = np.array([188.09182, 0.0, 420, np.nan])
pacs = inverter.adr(vdcs, pdcs, inverters[testinv])
assert_allclose(pacs, np.array([np.nan, (- 0.25), np.nan, np.nan])) |
class TestChangeKeyboardControl(EndianTest):
def setUp(self):
self.req_args_0 = {'attrs': {'led': 196, 'auto_repeat_mode': 0, 'bell_pitch': (- 2303), 'bell_percent': (- 5), 'key_click_percent': (- 59), 'key': 190, 'bell_duration': (- 4223), 'led_mode': 1}}
self.req_bin_0 = b'f\x00\x00\n\x00\x00\x00\xff\xc5\x00\x00\x00\xfb\x00\x00\x00\xf7\x01\x00\x00\xef\x81\x00\x00\xc4\x00\x00\x00\x01\x00\x00\x00\xbe\x00\x00\x00\x00\x00\x00\x00'
def testPackRequest0(self):
bin = request.ChangeKeyboardControl._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.ChangeKeyboardControl._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
def concept_preparation(train_captions, dataset, source_lang=configs.main_lang, target_lang=None, topk=1000):
if (target_lang is None):
target_lang = source_lang
print(f'Parse English captions to get the most frequent {topk} concepts')
save_path = os.path.join(configs.concepts_root, dataset, target_lang)
os.makedirs(save_path, exist_ok=True)
read_path = os.path.join(configs.concepts_root, dataset, source_lang)
os.makedirs(read_path, exist_ok=True)
statistics_nouns_path = os.path.join(read_path, f'statistics_nouns.json')
statistics_verbs_path = os.path.join(read_path, f'statistics_verbs.json')
if (os.path.exists(statistics_nouns_path) and os.path.exists(statistics_verbs_path)):
print(f'Load existing statistics files from {statistics_nouns_path} and {statistics_verbs_path}')
statistics_nouns = json.load(open(statistics_nouns_path))
statistics_verbs = json.load(open(statistics_verbs_path))
else:
all_noun_chunks = []
all_verbs = []
nlp = spacy.load('en_core_web_sm')
for caption in tqdm(train_captions):
doc = nlp(caption.lower())
for nc in doc.noun_chunks:
all_noun_chunks.append(nc.text)
verbs = [token.text for token in doc if (token.pos_ == 'VERB')]
all_verbs.extend(verbs)
statistics_nouns = dict(Counter(all_noun_chunks))
statistics_verbs = dict(Counter(all_verbs))
print(f'Save statistics files to {statistics_nouns_path} and {statistics_verbs_path}')
json.dump(statistics_nouns, open(statistics_nouns_path, 'w'))
json.dump(statistics_verbs, open(statistics_verbs_path, 'w'))
candidate_nouns = sorted(statistics_nouns.items(), key=(lambda x: (- x[1])))
candidate_nouns = [item[0] for item in candidate_nouns[:topk]]
print('-- Noun Phrases')
print(candidate_nouns[:10])
candidate_verbs = sorted(statistics_verbs.items(), key=(lambda x: (- x[1])))
candidate_verbs = [item[0] for item in candidate_verbs[:topk]]
print('-- Verbs')
print(candidate_verbs[:10])
candidate_mix = (Counter(statistics_nouns) + Counter(statistics_verbs))
candidate_mix = sorted(candidate_mix.items(), key=(lambda x: (- x[1])))
candidate_mix = [item[0] for item in candidate_mix[:topk]]
print('-- Noun Phrases + Verbs')
print(candidate_mix[:10])
cp = os.path.join(save_path, 'concepts.txt')
vp = os.path.join(save_path, 'verbs.txt')
mp = os.path.join(save_path, 'concepts_with_verbs.txt')
print(f'''Save results to
{cp}
{vp}
{mp}''')
with open(cp, 'w') as f:
f.write('\n'.join(candidate_nouns))
with open(vp, 'w') as f:
f.write('\n'.join(candidate_verbs))
with open(mp, 'w') as f:
f.write('\n'.join(candidate_mix)) |
class TokenAddLayout(QGridLayout):
def __init__(self, dialog, callback):
QGridLayout.__init__(self)
self.setSpacing(8)
self.setColumnStretch(3, 1)
self.callback = callback
self.dialog = dialog
self.addresses = self.dialog.parent().wallet.get_addresses_sort_by_balance()
(addr_type, __) = b58_address_to_hash160(self.addresses[0])
if (not (addr_type == constants.net.ADDRTYPE_P2PKH)):
self.dialog.show_message(_('only P2PKH address supports QRC20 Token'))
self.dialog.reject()
return
address_lb = QLabel(_('Contract Address:'))
self.contract_addr_e = ButtonsLineEdit()
self.addWidget(address_lb, 1, 0)
self.addWidget(self.contract_addr_e, 1, 1, 1, (- 1))
address_lb = QLabel(_('My Address:'))
self.address_combo = QComboBox()
self.address_combo.setMinimumWidth(300)
self.address_combo.setEditable(True)
self.address_combo.addItems(self.addresses)
self.addWidget(address_lb, 2, 0)
self.addWidget(self.address_combo, 2, 1, 1, (- 1))
self.cancel_btn = CancelButton(dialog)
self.save_btn = QPushButton(_('Save'))
self.save_btn.setDefault(True)
self.save_btn.clicked.connect(self.save_input)
buttons = Buttons(*[self.cancel_btn, self.save_btn])
buttons.addStretch()
self.addLayout(buttons, 3, 2, 2, (- 1))
def save_input(self):
try:
contract_addr = self.contract_addr_e.text().strip()
bind_addr = self.address_combo.currentText().strip()
if (bind_addr not in self.addresses):
raise Exception('invalid bind address')
if (not is_hash160(contract_addr)):
raise Exception('invalid contract address:{}'.format(contract_addr))
self.callback(contract_addr, bind_addr)
self.dialog.reject()
except (BaseException,) as e:
import traceback, sys
traceback.print_exc(file=sys.stderr)
self.dialog.show_message(str(e)) |
def is_special_target(right: ProperType) -> bool:
if (isinstance(right, FunctionLike) and right.is_type_obj()):
if (right.type_object().fullname == 'builtins.tuple'):
return True
if (right.type_object().fullname in ('mypy.types.Type', 'mypy.types.ProperType', 'mypy.types.TypeAliasType')):
return True
if (right.type_object().fullname in ('mypy.types.UnboundType', 'mypy.types.TypeVarLikeType', 'mypy.types.TypeVarType', 'mypy.types.UnpackType', 'mypy.types.TypeVarTupleType', 'mypy.types.ParamSpecType', 'mypy.types.Parameters', 'mypy.types.RawExpressionType', 'mypy.types.EllipsisType', 'mypy.types.StarType', 'mypy.types.TypeList', 'mypy.types.CallableArgument', 'mypy.types.PartialType', 'mypy.types.ErasedType', 'mypy.types.DeletedType', 'mypy.types.RequiredType')):
return True
elif isinstance(right, TupleType):
return all((is_special_target(t) for t in get_proper_types(right.items)))
return False |
def load_op_library(path):
if (os.name == 'nt'):
if (not os.path.exists(path)):
path = re.sub('\\.so$', '.dll', path)
if (not os.path.exists(path)):
return None
path = resource_loader.get_path_to_datafile(path)
ret = load_library.load_op_library(path)
assert ret, ('Could not load %s' % path)
return ret |
def test_pre_greedy_node_rewriter():
empty_fgraph = FunctionGraph([], [])
x = MyVariable('x')
y = MyVariable('y')
c1 = Constant(MyType(), 1, 'c1')
c2 = Constant(MyType(), 2, 'c2')
o1 = op2(c1, c2)
o3 = op1(c1, y)
o2 = op1(o1, c2, x, o3, o1)
assert (o2.owner.inputs[0].owner is not None)
assert (o2.owner.inputs[4].owner is not None)
cst = pre_greedy_node_rewriter(empty_fgraph, [constant_folding], o2)
assert (cst.owner.inputs[0].owner is None)
assert (cst.owner.inputs[1] is c2)
assert (cst.owner.inputs[2] is x)
assert (cst.owner.inputs[3] is o3)
assert (cst.owner.inputs[4] is cst.owner.inputs[0])
fg = FunctionGraph([], [o1], clone=False)
o2 = op1(o1, c2, x, o3, o1)
cst = pre_greedy_node_rewriter(fg, [constant_folding], o2)
assert (cst.owner.inputs[0] is o1)
assert (cst.owner.inputs[4] is cst.owner.inputs[0])
ms = MakeSlice()(1)
cst = pre_greedy_node_rewriter(empty_fgraph, [constant_folding], ms)
assert isinstance(cst, SliceConstant)
assert isinstance(hash(cst.signature()), int) |
def test_resolve_module_exports_from_file_log_on_unknown_file_location(caplog, tmp_path):
file = (tmp_path / 'some.js')
file.write_text("export * from './does-not-exist.js';")
resolve_module_exports_from_file(file, 2)
assert (len(caplog.records) == 1)
assert caplog.records[0].message.startswith('Did not resolve exports for unknown file') |
_datapipe('zip_longest')
class ZipperLongestIterDataPipe(IterDataPipe):
datapipes: Tuple[IterDataPipe]
length: Optional[int]
fill_value: Any
def __init__(self, *datapipes: IterDataPipe, fill_value: Any=None):
if (not all((isinstance(dp, IterDataPipe) for dp in datapipes))):
raise TypeError('All inputs are required to be `IterDataPipe` for `ZipperLongestIterDataPipe`.')
super().__init__()
self.datapipes = datapipes
self.fill_value = fill_value
def __iter__(self) -> Iterator[Tuple]:
iterators = [iter(x) for x in self.datapipes]
finished: Set[int] = set()
while (len(finished) < len(iterators)):
values: List[Any] = []
for i in range(len(iterators)):
value = self.fill_value
if (i not in finished):
try:
value = next(iterators[i])
except StopIteration:
finished.add(i)
if (len(finished) == len(iterators)):
return
values.append(value)
(yield tuple(values))
def __len__(self) -> int:
if all((isinstance(dp, Sized) for dp in self.datapipes)):
return max((len(dp) for dp in self.datapipes))
else:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length") |
class AttributeTestModel(Model):
class Meta():
host = '
table_name = 'test'
binary_attr = BinaryAttribute(hash_key=True, legacy_encoding=False)
binary_set_attr = BinarySetAttribute(legacy_encoding=False)
number_attr = NumberAttribute()
number_set_attr = NumberSetAttribute()
unicode_attr = UnicodeAttribute()
unicode_set_attr = UnicodeSetAttribute()
datetime_attr = UTCDateTimeAttribute()
bool_attr = BooleanAttribute()
json_attr = JSONAttribute()
map_attr = AttributeTestMapAttribute()
raw_map_attr = MapAttribute()
ttl_attr = TTLAttribute()
null_attr = NullAttribute(null=True) |
def is_transaction_pending(chain_state: ChainState, transaction: ContractSendEvent, state_change: StateChange) -> bool:
return (not (is_transaction_effect_satisfied(chain_state, transaction, state_change) or is_transaction_invalidated(transaction, state_change) or is_transaction_expired(transaction, chain_state.block_number))) |
class PrimeCosmeticPatchesDialog(BaseCosmeticPatchesDialog, Ui_PrimeCosmeticPatchesDialog):
_cosmetic_patches: PrimeCosmeticPatches
def __init__(self, parent: (QtWidgets.QWidget | None), current: PrimeCosmeticPatches):
super().__init__(parent)
self.setupUi(self)
self._cosmetic_patches = current
self.field_to_slider_mapping = {'screen_brightness': self.screen_brightness_slider, 'screen_x_offset': self.screen_x_offset_slider, 'screen_y_offset': self.screen_y_offset_slider, 'screen_stretch': self.screen_stretch_slider, 'sfx_volume': self.sfx_volume_slider, 'music_volume': self.music_volume_slider, 'hud_alpha': self.hud_alpha_slider, 'helmet_alpha': self.helmet_alpha_slider}
self.field_to_check_mapping = {'hud_lag': self.hud_lag_check, 'invert_y_axis': self.invert_y_axis_check, 'rumble': self.rumble_check, 'swap_beam_controls': self.swap_beam_controls_check}
for sound_mode in SoundMode:
self.sound_mode_combo.addItem(sound_mode.name, sound_mode)
suit_layouts = [self.power_suit_color_layout, self.varia_suit_color_layout, self.gravity_suit_color_layout, self.phazon_suit_color_layout]
self.suit_color_preview_squares = []
for (suit_layout, suit_colors) in zip(suit_layouts, SUIT_DEFAULT_COLORS):
self.suit_color_preview_squares.append([self._add_preview_color_square_to_layout(suit_layout, color) for color in suit_colors])
fields = {field.name: field for field in dataclasses.fields(PrimeUserPreferences)}
for (field_name, slider) in self.field_to_slider_mapping.items():
field = fields[field_name]
slider.setMinimum(field.metadata['min'])
slider.setMaximum(field.metadata['max'])
value_label: QtWidgets.QLabel = getattr(self, f'{field_name}_value_label')
updater = slider_updater.create_label_slider_updater(value_label, field.metadata['display_as_percentage'])
updater(slider)
setattr(self, f'{field_name}_label_updater', updater)
self.connect_signals()
self.on_new_cosmetic_patches(current)
self._update_color_squares()
def connect_signals(self) -> None:
super().connect_signals()
self._persist_check_field(self.open_map_check, 'open_map')
self._persist_check_field(self.pickup_markers_check, 'pickup_markers')
self._persist_check_field(self.force_fusion_check, 'force_fusion')
self._persist_check_field(self.custom_hud_color, 'use_hud_color')
self.power_suit_rotation_field.valueChanged.connect(self._persist_suit_color_rotations)
self.varia_suit_rotation_field.valueChanged.connect(self._persist_suit_color_rotations)
self.gravity_suit_rotation_field.valueChanged.connect(self._persist_suit_color_rotations)
self.phazon_suit_rotation_field.valueChanged.connect(self._persist_suit_color_rotations)
self.custom_hud_color_button.clicked.connect(self._open_color_picker)
self.sound_mode_combo.currentIndexChanged.connect(self._on_sound_mode_update)
for (field_name, slider) in self.field_to_slider_mapping.items():
slider.valueChanged.connect(partial(self._on_slider_update, slider, field_name))
for (field_name, check) in self.field_to_check_mapping.items():
check.stateChanged.connect(partial(self._on_check_update, check, field_name))
def on_new_cosmetic_patches(self, patches: PrimeCosmeticPatches) -> None:
self.open_map_check.setChecked(patches.open_map)
self.pickup_markers_check.setChecked(patches.pickup_markers)
self.force_fusion_check.setChecked(patches.force_fusion)
self.custom_hud_color.setChecked(patches.use_hud_color)
self.power_suit_rotation_field.setValue(patches.suit_color_rotations[0])
self.varia_suit_rotation_field.setValue(patches.suit_color_rotations[1])
self.gravity_suit_rotation_field.setValue(patches.suit_color_rotations[2])
self.phazon_suit_rotation_field.setValue(patches.suit_color_rotations[3])
self.on_new_user_preferences(patches.user_preferences)
def on_new_user_preferences(self, user_preferences: PrimeUserPreferences) -> None:
set_combo_with_value(self.sound_mode_combo, user_preferences.sound_mode)
for field in dataclasses.fields(user_preferences):
if (field.name in self.field_to_slider_mapping):
slider = self.field_to_slider_mapping[field.name]
slider.setValue(getattr(user_preferences, field.name))
elif (field.name in self.field_to_check_mapping):
check = self.field_to_check_mapping[field.name]
check.setChecked(getattr(user_preferences, field.name))
def _persist_suit_color_rotations(self) -> None:
suit_color_rotations_tuple = (self.power_suit_rotation_field.value(), self.varia_suit_rotation_field.value(), self.gravity_suit_rotation_field.value(), self.phazon_suit_rotation_field.value())
self._cosmetic_patches = dataclasses.replace(self._cosmetic_patches, suit_color_rotations=suit_color_rotations_tuple)
self._update_color_squares()
def _open_color_picker(self) -> None:
init_color = self._cosmetic_patches.hud_color
color = QtWidgets.QColorDialog.getColor(QtGui.QColor(*init_color))
if color.isValid():
color_tuple = (color.red(), color.green(), color.blue())
estimated_ingame_alpha = max(color_tuple)
if (estimated_ingame_alpha < 150):
QtWidgets.QMessageBox.warning(self, 'Dangerous preset', 'Be careful, desaturated colors like this one tend to produce a transparent HUD.\nUse at your own risk.')
self._cosmetic_patches = dataclasses.replace(self._cosmetic_patches, hud_color=color_tuple)
self._update_color_squares()
def _update_color_squares(self) -> None:
color = self._cosmetic_patches.hud_color
style = 'background-color: rgb({},{},{})'.format(*color)
self.custom_hud_color_square.setStyleSheet(style)
for (i, suit_colors) in enumerate(SUIT_DEFAULT_COLORS):
for (j, color) in enumerate(suit_colors):
color = hue_rotate_color(color, self._cosmetic_patches.suit_color_rotations[i])
style = 'background-color: rgb({},{},{})'.format(*color)
self.suit_color_preview_squares[i][j].setStyleSheet(style)
def _add_preview_color_square_to_layout(self, layout: QtWidgets.QLayout, default_color: tuple[(int, int, int)]) -> QtWidgets.QFrame:
color_square = QtWidgets.QFrame(self.game_changes_box)
color_square.setMinimumSize(QtCore.QSize(22, 22))
color_square.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed, QtWidgets.QSizePolicy.Policy.Fixed))
color_square.setStyleSheet('background-color: rgb({},{},{})'.format(*default_color))
layout.addWidget(color_square)
return color_square
def cosmetic_patches(self) -> PrimeCosmeticPatches:
return self._cosmetic_patches
def preferences(self) -> PrimeUserPreferences:
return self._cosmetic_patches.user_preferences
def preferences(self, value: PrimeUserPreferences) -> None:
self._cosmetic_patches = dataclasses.replace(self._cosmetic_patches, user_preferences=value)
def _on_sound_mode_update(self) -> None:
self.preferences = dataclasses.replace(self.preferences, sound_mode=self.sound_mode_combo.currentData())
def _on_slider_update(self, slider: QtWidgets.QSlider, field_name: str, _: None) -> None:
self.preferences = dataclasses.replace(self.preferences, **{field_name: slider.value()})
getattr(self, f'{field_name}_label_updater')(slider)
def _on_check_update(self, check: QtWidgets.QCheckBox, field_name: str, _: None) -> None:
self.preferences = dataclasses.replace(self.preferences, **{field_name: check.isChecked()})
def reset(self) -> None:
self.on_new_cosmetic_patches(PrimeCosmeticPatches()) |
class WorkQueue(object):
def __init__(self, queue_name, transaction_factory, canonical_name_match_list=None, has_namespace=False):
self._queue_name = queue_name
self._transaction_factory = transaction_factory
self._currently_processing = False
self._has_namespaced_items = has_namespace
if (canonical_name_match_list is None):
self._canonical_name_match_list = []
else:
self._canonical_name_match_list = canonical_name_match_list
def _canonical_name(name_list):
return ('/'.join(name_list) + '/')
def _running_jobs(cls, now, name_match_query):
return cls._running_jobs_where(QueueItem.select(QueueItem.queue_name), now).where((QueueItem.queue_name ** name_match_query))
def _available_jobs(cls, now, name_match_query):
return cls._available_jobs_where(QueueItem.select(), now).where((QueueItem.queue_name ** name_match_query))
def _running_jobs_where(query, now):
return query.where((QueueItem.available == False), (QueueItem.processing_expires > now))
def _available_jobs_where(query, now):
return query.where((QueueItem.available_after <= now), ((QueueItem.available == True) | (QueueItem.processing_expires <= now)), (QueueItem.retries_remaining > 0))
def _available_jobs_not_running(cls, now, name_match_query, running_query):
return cls._available_jobs(now, name_match_query).where((~ (QueueItem.queue_name << running_query)))
def num_alive_jobs(self, canonical_name_list):
def strip_slash(name):
return name.lstrip('/')
canonical_name_list = list(map(strip_slash, canonical_name_list))
canonical_name_query = ('/'.join(([self._queue_name] + canonical_name_list)) + '%')
return QueueItem.select().where((QueueItem.queue_name ** canonical_name_query)).where((QueueItem.retries_remaining > 0)).count()
def num_available_jobs_between(self, available_min_time, available_max_time, canonical_name_list):
def strip_slash(name):
return name.lstrip('/')
canonical_name_list = list(map(strip_slash, canonical_name_list))
available = self._available_jobs(available_max_time, ('/'.join(([self._queue_name] + canonical_name_list)) + '%'))
return available.where((QueueItem.available_after >= available_min_time)).count()
def _name_match_query(self):
return ('%s%%' % self._canonical_name(([self._queue_name] + self._canonical_name_match_list)))
def _item_by_id_for_update(queue_id):
return db_for_update(QueueItem.select().where((QueueItem.id == queue_id))).get()
def get_metrics(self):
now = datetime.utcnow()
name_match_query = self._name_match_query()
running_query = self._running_jobs(now, name_match_query)
running_count = running_query.distinct().count()
available_query = self._available_jobs(now, name_match_query)
available_count = available_query.select(QueueItem.queue_name).distinct().count()
available_not_running_query = self._available_jobs_not_running(now, name_match_query, running_query)
available_not_running_count = available_not_running_query.select(QueueItem.queue_name).distinct().count()
return (running_count, available_not_running_count, available_count)
def update_metrics(self):
(running_count, available_not_running_count, available_count) = self.get_metrics()
queue_items_locked.labels(self._queue_name).set(running_count)
queue_items_available.labels(self._queue_name).set(available_count)
queue_items_available_unlocked.labels(self._queue_name).set(available_not_running_count)
def has_retries_remaining(self, item_id):
with self._transaction_factory(db):
try:
return (QueueItem.get(id=item_id).retries_remaining > 0)
except QueueItem.DoesNotExist:
return False
def delete_namespaced_items(self, namespace, subpath=None):
if (not self._has_namespaced_items):
return False
subpath_query = (('%s/' % subpath) if subpath else '')
queue_prefix = ('%s/%s/%s%%' % (self._queue_name, namespace, subpath_query))
return QueueItem.delete().where((QueueItem.queue_name ** queue_prefix)).execute()
def alive(self, canonical_name_list):
canonical_name = self._canonical_name(([self._queue_name] + canonical_name_list))
try:
select_query = QueueItem.select().where((QueueItem.queue_name == canonical_name))
now = datetime.utcnow()
overall_query = (self._available_jobs_where(select_query.clone(), now) | self._running_jobs_where(select_query.clone(), now))
overall_query.get()
return True
except QueueItem.DoesNotExist:
return False
def _queue_dict(self, canonical_name_list, message, available_after, retries_remaining):
return dict(queue_name=self._canonical_name(([self._queue_name] + canonical_name_list)), body=message, retries_remaining=retries_remaining, available_after=(datetime.utcnow() + timedelta(seconds=(available_after or 0))))
def batch_insert(self, batch_size=DEFAULT_BATCH_SIZE):
items_to_insert = []
def batch_put(canonical_name_list, message, available_after=0, retries_remaining=5):
items_to_insert.append(self._queue_dict(canonical_name_list, message, available_after, retries_remaining))
(yield batch_put)
remaining = list(items_to_insert)
while remaining:
current_batch = remaining[0:batch_size]
QueueItem.insert_many(current_batch).execute()
queue_item_puts.labels(self._queue_name).inc(len(current_batch))
remaining = remaining[batch_size:]
def put(self, canonical_name_list, message, available_after=0, retries_remaining=5):
item = QueueItem.create(**self._queue_dict(canonical_name_list, message, available_after, retries_remaining))
queue_item_puts.labels(self._queue_name).inc()
return str(item.id)
def _select_available_item(self, ordering_required, now):
name_match_query = self._name_match_query()
try:
if ordering_required:
running = self._running_jobs(now, name_match_query)
avail = self._available_jobs_not_running(now, name_match_query, running)
return avail.order_by(QueueItem.id).get()
else:
subquery = self._available_jobs(now, name_match_query).limit(50).alias('j1')
return QueueItem.select().join(subquery, on=(QueueItem.id == subquery.c.id)).order_by(db_random_func()).get()
except QueueItem.DoesNotExist:
return None
def _attempt_to_claim_item(self, db_item, now, processing_time):
set_unavailable_query = QueueItem.update(available=False, processing_expires=(now + timedelta(seconds=processing_time)), retries_remaining=(QueueItem.retries_remaining - 1), state_id=str(uuid.uuid4())).where((QueueItem.id == db_item.id), (QueueItem.state_id == db_item.state_id))
changed = set_unavailable_query.execute()
return (changed == 1)
def get(self, processing_time=300, ordering_required=False):
now = datetime.utcnow()
db_item = self._select_available_item(ordering_required, now)
if (db_item is None):
self._currently_processing = False
queue_item_gets.labels(self._queue_name, 'nonexistant').inc()
return None
was_claimed = self._attempt_to_claim_item(db_item, now, processing_time)
if (not was_claimed):
self._currently_processing = False
queue_item_gets.labels(self._queue_name, 'claimed').inc()
return None
self._currently_processing = True
queue_item_gets.labels(self._queue_name, 'acquired').inc()
return AttrDict({'id': db_item.id, 'body': db_item.body, 'retries_remaining': (db_item.retries_remaining - 1)})
def cancel(self, item_id):
count_removed = QueueItem.delete().where((QueueItem.id == item_id)).execute()
return (count_removed > 0)
def complete(self, completed_item):
self._currently_processing = (not self.cancel(completed_item.id))
def incomplete(self, incomplete_item, retry_after=300, restore_retry=False):
with self._transaction_factory(db):
retry_date = (datetime.utcnow() + timedelta(seconds=retry_after))
try:
incomplete_item_obj = self._item_by_id_for_update(incomplete_item.id)
incomplete_item_obj.available_after = retry_date
incomplete_item_obj.available = True
if restore_retry:
incomplete_item_obj.retries_remaining += 1
incomplete_item_obj.save()
self._currently_processing = False
return (incomplete_item_obj.retries_remaining > 0)
except QueueItem.DoesNotExist:
return False
def extend_processing(self, item, seconds_from_now, minimum_extension=MINIMUM_EXTENSION, updated_data=None):
with self._transaction_factory(db):
try:
queue_item = self._item_by_id_for_update(item.id)
new_expiration = (datetime.utcnow() + timedelta(seconds=seconds_from_now))
has_change = False
if ((new_expiration - queue_item.processing_expires) > minimum_extension):
queue_item.processing_expires = new_expiration
has_change = True
if ((updated_data is not None) and (queue_item.body != updated_data)):
queue_item.body = updated_data
has_change = True
if has_change:
queue_item.save()
return has_change
except QueueItem.DoesNotExist:
return False |
class CalcCriteriaTestCase(unittest.TestCase):
def setUp(self):
self.Z = np.array([[0, 1, 1, 0], [1, 0, 1, 0], [0, 1, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]], dtype=np.int)
self.Y = np.array([[0, 1, 1, 0], [1, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 0, 0]], dtype=np.int)
def test_hamming_loss(self):
assert_array_equal(pairwise_hamming_loss(self.Z, self.Y), sparse_pairwise_hamming_loss(ss.csr_matrix(self.Z), ss.csr_matrix(self.Y)))
def test_rank_loss(self):
assert_array_equal(pairwise_rank_loss(self.Z, self.Y), sparse_pairwise_rank_loss(ss.csr_matrix(self.Z), ss.csr_matrix(self.Y)))
def test_f1_score(self):
assert_array_equal(pairwise_f1_score(self.Z, self.Y), sparse_pairwise_f1_score(ss.csr_matrix(self.Z), ss.csr_matrix(self.Y)))
def test_accuracy_score(self):
assert_array_equal(pairwise_accuracy_score(self.Z, self.Y), sparse_pairwise_accuracy_score(ss.csr_matrix(self.Z), ss.csr_matrix(self.Y))) |
class CacheClearCommand(Command):
name = 'cache clear'
description = 'Clears a Poetry cache by name.'
arguments = [argument('cache', description='The name of the cache to clear.')]
options = [option('all', description='Clear all entries in the cache.')]
def handle(self) -> int:
cache = self.argument('cache')
parts = cache.split(':')
root = parts[0]
config = Config.create()
cache_dir = (config.repository_cache_directory / root)
try:
cache_dir.relative_to(config.repository_cache_directory)
except ValueError:
raise ValueError(f'{root} is not a valid repository cache')
cache = FileCache(cache_dir)
if (len(parts) == 1):
if (not self.option('all')):
raise RuntimeError(f'Add the --all option if you want to clear all {parts[0]} caches')
if (not cache_dir.exists()):
self.line(f'No cache entries for {parts[0]}')
return 0
entries_count = sum((len(files) for (_path, _dirs, files) in os.walk(str(cache_dir))))
delete = self.confirm(f'<question>Delete {entries_count} entries?</>', True)
if (not delete):
return 0
cache.flush()
elif (len(parts) == 2):
raise RuntimeError('Only specifying the package name is not yet supported. Add a specific version to clear')
elif (len(parts) == 3):
package = canonicalize_name(parts[1])
version = parts[2]
if (not cache.has(f'{package}:{version}')):
self.line(f'No cache entries for {package}:{version}')
return 0
delete = self.confirm(f'Delete cache entry {package}:{version}', True)
if (not delete):
return 0
cache.forget(f'{package}:{version}')
else:
raise ValueError('Invalid cache key')
return 0 |
def ArrayDataStrategy(draw, id_, n_dim, subtype):
if (not n_dim):
if isinstance(subtype, rt.Port):
return draw(InPortDataStrategy(id_, subtype))
else:
return draw(InterfaceDataStrategy(id_, subtype))
else:
data = {}
for i in range(n_dim[0]):
data.update(draw(ArrayDataStrategy(f'{id_}[{i}]', n_dim[1:], subtype)))
return data |
class FreeTypeError(FontException):
def __init__(self, message, errcode):
self.message = message
self.errcode = errcode
def __str__(self):
return ('%s: %s (%s)' % (self.__class__.__name__, self.message, self._ft_errors.get(self.errcode, 'unknown error')))
def check_and_raise_on_error(cls, errcode):
if (errcode != 0):
raise cls(None, errcode)
_ft_errors = {0: 'no error', 1: 'cannot open resource', 2: 'unknown file format', 3: 'broken file', 4: 'invalid FreeType version', 5: 'module version is too low', 6: 'invalid argument', 7: 'unimplemented feature', 8: 'broken table', 9: 'broken offset within table', 16: 'invalid glyph index', 17: 'invalid character code', 18: 'unsupported glyph image format', 19: 'cannot render this glyph format', 20: 'invalid outline', 21: 'invalid composite glyph', 22: 'too many hints', 23: 'invalid pixel size', 32: 'invalid object handle', 33: 'invalid library handle', 34: 'invalid module handle', 35: 'invalid face handle', 36: 'invalid size handle', 37: 'invalid glyph slot handle', 38: 'invalid charmap handle', 39: 'invalid cache manager handle', 40: 'invalid stream handle', 48: 'too many modules', 49: 'too many extensions', 64: 'out of memory', 65: 'unlisted object', 81: 'cannot open stream', 82: 'invalid stream seek', 83: 'invalid stream skip', 84: 'invalid stream read', 85: 'invalid stream operation', 86: 'invalid frame operation', 87: 'nested frame access', 88: 'invalid frame read', 96: 'raster uninitialized', 97: 'raster corrupted', 98: 'raster overflow', 99: 'negative height while rastering', 112: 'too many registered caches', 128: 'invalid opcode', 129: 'too few arguments', 130: 'stack overflow', 131: 'code overflow', 132: 'bad argument', 133: 'division by zero', 134: 'invalid reference', 135: 'found debug opcode', 136: 'found ENDF opcode in execution stream', 137: 'nested DEFS', 138: 'invalid code range', 139: 'execution context too long', 140: 'too many function definitions', 141: 'too many instruction definitions', 142: 'SFNT font table missing', 143: 'horizontal header (hhea, table missing', 144: 'locations (loca, table missing', 145: 'name table missing', 146: 'character map (cmap, table missing', 147: 'horizontal metrics (hmtx, table missing', 148: 'PostScript (post, table missing', 149: 'invalid horizontal metrics', 150: 'invalid character map (cmap, format', 151: 'invalid ppem value', 152: 'invalid vertical metrics', 153: 'could not find context', 154: 'invalid PostScript (post, table format', 155: 'invalid PostScript (post, table', 160: 'opcode syntax error', 161: 'argument stack underflow', 162: 'ignore', 176: "`STARTFONT' field missing", 177: "`FONT' field missing", 178: "`SIZE' field missing", 179: "`CHARS' field missing", 180: "`STARTCHAR' field missing", 181: "`ENCODING' field missing", 182: "`BBX' field missing", 183: "`BBX' too big"} |
class Processor():
def __init__(self, backend=ProcessorBackends.NUMPY, output_color: str='RGB'):
self.color_mode = output_color
self.backend = self._initialize_backend(backend)
def process(self, rect, width, height, region, rotation_angle):
return self.backend.process(rect, width, height, region, rotation_angle)
def _initialize_backend(self, backend):
if (backend == ProcessorBackends.NUMPY):
from dxcam.processor.numpy_processor import NumpyProcessor
return NumpyProcessor(self.color_mode) |
def batch_bounds_for_packing(lengths):
last_length = 0
count = len(lengths)
result = []
for (i, (length, group)) in enumerate(itertools.groupby(reversed(lengths))):
if ((i > 0) and (length <= last_length)):
raise ValueError('lengths must be decreasing and positive')
result.extend(([count] * (length - last_length)))
count -= sum((1 for _ in group))
last_length = length
return result |
def iter_12(cc_or_eom, k):
if isinstance(cc_or_eom, kccsd_rhf.RCCSD):
cc = cc_or_eom
else:
cc = cc_or_eom._cc
(o, v) = padding_k_idx(cc, kind='split')
kconserv = cc.khelper.kconserv
(yield (o[k],))
for ki in range(cc.nkpts):
for kj in range(cc.nkpts):
kb = kconserv[(ki, k, kj)]
(yield ((ki,), (kj,), o[ki], o[kj], v[kb])) |
def test_wrapper_name():
assert (get_name(Wrapper(42)) == 'int')
assert (get_name(Wrapper('eat at joe.')) == 'str')
assert (get_name(Wrapper(str)) == 'str')
assert (get_name(Wrapper(object)) == 'object')
assert (get_name(Wrapper(foo)) == 'foo')
assert (get_name(Wrapper(foo())) == 'foo')
assert (get_name(Wrapper(bar)) == 'bar')
assert (get_name(Wrapper(bar())) == 'baz')
assert (get_name(Wrapper(get_name)) == 'get_name') |
def _set_tensor_dict(module_dict, hooks, module, name: str, tensor: torch.Tensor) -> None:
was_buffer = False
out = module_dict['_parameters'].pop(name, None)
if (out is None):
out = module_dict['_buffers'].pop(name, None)
was_buffer = (out is not None)
if (out is None):
out = module_dict.pop(name)
if isinstance(tensor, torch.nn.Parameter):
for hook in hooks:
output = hook(module, name, tensor)
if (output is not None):
tensor = output
module_dict['_parameters'][name] = tensor
elif (was_buffer and isinstance(tensor, torch.Tensor)):
module_dict['_buffers'][name] = tensor
else:
module_dict[name] = tensor
return out |
class OutputsCallback(Callback):
def __init__(self, save_dir: Path=Path('./outputs'), layers: List[int]=[(- 1)], output_embeddings: bool=True, output_attentions: bool=False, output_logits: bool=False) -> None:
self.rank_label = uuid.uuid4()
self.output_attentions = output_attentions
self.output_logits = output_logits
self.output_embeddings = output_embeddings
self.save_dir = save_dir
self.save_dir.mkdir(parents=True, exist_ok=True)
self.layers = layers
(self.attentions, self.indices, self.na_hashes) = ([], [], [])
self.h5embeddings_open: Dict[(int, h5py.File)] = {}
self.h5logit_file = None
self.h5_kwargs = {}
self.io_time = 0
def on_predict_start(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule') -> None:
num_hidden_layers = (pl_module.model.model.config.num_hidden_layers + 1)
for ind in range(len(self.layers)):
layer_num = self.layers[ind]
if (layer_num < 0):
self.layers[ind] = (num_hidden_layers + layer_num)
if self.output_logits:
self.h5logit_file = h5py.File((self.save_dir / f'logits-{self.rank_label}.h5'), 'w')
self.h5logit_file.create_group('logits')
def on_predict_batch_end(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule', outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int) -> None:
seq_lens = batch['seq_lens'].detach().cpu().numpy().reshape((- 1))
fasta_inds = batch['indices'].detach().cpu().numpy().reshape((- 1))
if self.output_attentions:
attend = torch.sum(outputs.attentions[0].detach().cpu().squeeze(), dim=0)
self.attentions.append(attend)
if self.output_logits:
start = time.time()
logits = outputs.logits.detach().cpu().numpy()
for (logit, seq_len, fasta_ind) in zip(logits, seq_lens, fasta_inds):
self.h5logit_file['logits'].create_dataset(f'{fasta_ind}', data=logit[:seq_len], **self.h5_kwargs)
self.io_time += (time.time() - start)
if self.output_embeddings:
start = time.time()
for (layer, embeddings) in enumerate(outputs.hidden_states):
if (layer not in self.layers):
continue
h5_file = self.h5embeddings_open.get(layer)
if (h5_file is None):
name = (self.save_dir / f'embeddings-layer-{layer}-{self.rank_label}.h5')
h5_file = h5py.File(name, 'w')
h5_file.create_group('embeddings')
self.h5embeddings_open[layer] = h5_file
embed = embeddings.detach().cpu().numpy()
for (emb, seq_len, fasta_ind) in zip(embed, seq_lens, fasta_inds):
h5_file['embeddings'].create_dataset(f'{fasta_ind}', data=emb[:seq_len], **self.h5_kwargs)
h5_file.flush()
self.io_time += (time.time() - start)
self.na_hashes.extend(batch['na_hash'])
self.indices.append(batch['indices'].detach().cpu())
def on_predict_end(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule') -> None:
self.indices = torch.cat(self.indices).numpy().reshape((- 1))
if self.output_logits:
start = time.time()
self.h5logit_file.create_dataset('fasta-indices', data=self.indices, **self.h5_kwargs)
print(self.na_hashes, flush=True)
self.h5logit_file.create_dataset('na-hashes', data=self.na_hashes, **self.h5_kwargs)
self.h5logit_file.close()
self.io_time += (time.time() - start)
if self.output_embeddings:
start = time.time()
for h5_file in self.h5embeddings_open.values():
h5_file.create_dataset('fasta-indices', data=self.indices, **self.h5_kwargs)
h5_file.create_dataset('na-hashes', data=self.na_hashes, **self.h5_kwargs)
for h5_file in self.h5embeddings_open.values():
h5_file.close()
self.io_time += (time.time() - start)
print('IO time:\t', self.io_time) |
def _get_in_video_path(input_videos_dir: Path, video_relative_path: Path) -> Path:
in_video_dir = (input_videos_dir / video_relative_path.parent)
in_video_short_name = video_relative_path.name
in_video_paths = [p for p in in_video_dir.glob((in_video_short_name + '*')) if is_video_path(p)]
if (len(in_video_paths) != 1):
raise ValueError(f"There should be exactly one input video path matching the video's relative path. The video_relative_path is: {video_relative_path}, input paths: {in_video_paths}, input video dir: {input_videos_dir}")
return in_video_paths[0] |
def add_subcommand(subparsers, parents):
parser = subparsers.add_parser('migrate', parents=parents, help='Migrate a configuration file to the current API.')
parser.add_argument('-c', '--config', action='store', default=get_config_file(), help='Use the specified configuration file (migrates every .py file in this directory).')
parser.add_argument('--yes', action='store_true', help='Automatically apply diffs with no confirmation.')
parser.add_argument('--show-diff', action='store_true', help='When used with --yes, will still output diff.')
parser.add_argument('--lint', action='store_true', help="Providing linting output but don't update config.")
parser.add_argument('--list-migrations', action='store_true', help='List available migrations.')
parser.add_argument('--info', metavar='ID', help='Show detailed info for the migration with the given ID')
parser.add_argument('--after-version', metavar='VERSION', type=version_tuple, help='Run migrations introduced after VERSION.')
parser.add_argument('-r', '--run-migrations', type=(lambda value: value.split(',')), metavar='ID', help='Run named migration[s]. Comma separated list for multiple migrations')
parser.add_argument('--no-colour', action='store_true', help='Do not use colour in diff output.')
parser.add_argument('-v', '--verbose', action='store_true', help='Increase output verbosity')
parser.set_defaults(func=QtileMigrate()) |
class ScratchPad(group._Group):
def __init__(self, name='scratchpad', dropdowns: (list[config.DropDown] | None)=None, label='', single=False):
group._Group.__init__(self, name, label=label)
self._dropdownconfig = ({dd.name: dd for dd in dropdowns} if (dropdowns is not None) else {})
self.dropdowns: dict[(str, DropDownToggler)] = {}
self._spawned: dict[(str, Match)] = {}
self._to_hide: list[str] = []
self._single = single
def _check_unsubscribe(self):
if (not self.dropdowns):
hook.unsubscribe.client_killed(self.on_client_killed)
hook.unsubscribe.float_change(self.on_float_change)
def _spawn(self, ddconfig):
name = ddconfig.name
if (name not in self._spawned):
if (not self._spawned):
hook.subscribe.client_new(self.on_client_new)
pid = self.qtile.spawn(ddconfig.command)
self._spawned[name] = (ddconfig.match or Match(net_wm_pid=pid))
def on_client_new(self, client, *args, **kwargs):
name = None
for (n, match) in self._spawned.items():
if match.compare(client):
name = n
break
if (name is not None):
self._spawned.pop(name)
if (not self._spawned):
hook.unsubscribe.client_new(self.on_client_new)
self.dropdowns[name] = DropDownToggler(client, self.name, self._dropdownconfig[name])
if self._single:
for (n, d) in self.dropdowns.items():
if (n != name):
d.hide()
if (name in self._to_hide):
self.dropdowns[name].hide()
self._to_hide.remove(name)
if (len(self.dropdowns) == 1):
hook.subscribe.client_killed(self.on_client_killed)
hook.subscribe.float_change(self.on_float_change)
def on_client_killed(self, client, *args, **kwargs):
name = None
for (name, dd) in self.dropdowns.items():
if (dd.window is client):
del self.dropdowns[name]
break
self._check_unsubscribe()
def on_float_change(self, *args, **kwargs):
name = None
for (name, dd) in self.dropdowns.items():
if (not dd.window.floating):
if (dd.window.group is not self):
dd.unsubscribe()
del self.dropdowns[name]
break
self._check_unsubscribe()
_command()
def dropdown_toggle(self, name):
if self._single:
for (n, d) in self.dropdowns.items():
if (n != name):
d.hide()
if (name in self.dropdowns):
self.dropdowns[name].toggle()
elif (name in self._dropdownconfig):
self._spawn(self._dropdownconfig[name])
_command()
def hide_all(self):
for d in self.dropdowns.values():
d.hide()
_command()
def dropdown_reconfigure(self, name, **kwargs):
if (name not in self._dropdownconfig):
return
dd = self._dropdownconfig[name]
for (attr, value) in kwargs.items():
if hasattr(dd, attr):
setattr(dd, attr, value)
_command()
def dropdown_info(self, name=None):
if (name is None):
return {'dropdowns': [ddname for ddname in self._dropdownconfig]}
elif (name in self.dropdowns):
return self.dropdowns[name].info()
elif (name in self._dropdownconfig):
return self._dropdownconfig[name].info()
else:
raise ValueError(('No DropDown named "%s".' % name))
def get_state(self):
state = []
for (name, dd) in self.dropdowns.items():
client_wid = dd.window.wid
state.append((name, client_wid, dd.visible))
return state
def restore_state(self, state, restart: bool) -> list[int]:
orphans = []
for (name, wid, visible) in state:
if (name in self._dropdownconfig):
if restart:
self._spawned[name] = Match(wid=wid)
if (not visible):
self._to_hide.append(name)
else:
self.dropdowns[name] = DropDownToggler(self.qtile.windows_map[wid], self.name, self._dropdownconfig[name])
if (not visible):
self.dropdowns[name].hide()
else:
orphans.append(wid)
if self._spawned:
assert restart
hook.subscribe.client_new(self.on_client_new)
if ((not restart) and self.dropdowns):
hook.subscribe.client_killed(self.on_client_killed)
hook.subscribe.float_change(self.on_float_change)
return orphans |
def load_inferred_feature(feature_path: str, banning_gifs: set=set()):
_gif_ds = pd.read_csv(feature_path)
_gif_ds['gif_feature'] = _gif_ds['gif_feature'].apply(ast.literal_eval).apply(np.array)
_gif_ds = _gif_ds[_gif_ds['gif_id'].apply((lambda x: (x not in banning_gifs)))]
gif_index_to_id = _gif_ds['gif_id'].to_list()
return {'gif_features': np.stack(_gif_ds['gif_feature'].to_list()), 'gif_index_to_id': gif_index_to_id, 'gif_id_to_index': {gif_id: idx for (idx, gif_id) in enumerate(gif_index_to_id)}} |
('PyQt6.QtWidgets.QGraphicsScene.mousePressEvent')
def test_mouse_press_event_when_left_click_over_diff_item_in_edit_mode(mouse_mock, view, item):
txtitem = BeeTextItem('foo bar')
txtitem.exit_edit_mode = MagicMock()
view.scene.addItem(txtitem)
view.scene.edit_item = txtitem
view.scene.itemAt = MagicMock(return_value=item)
event = MagicMock(button=MagicMock(return_value=Qt.MouseButton.LeftButton))
view.scene.mousePressEvent(event)
event.accept.assert_not_called()
mouse_mock.assert_called_once_with(event)
txtitem.exit_edit_mode.assert_called_once_with()
assert (view.scene.move_active is True)
assert (view.scene.rubberband_active is False) |
_db
def test_submit_talk_with_not_valid_conf_topic(graphql_client, user, conference_factory, topic_factory):
graphql_client.force_login(user)
conference = conference_factory(topics=('my-topic',), languages=('it',), submission_types=('talk',), active_cfp=True, durations=('50',), audience_levels=('Beginner',))
topic = topic_factory(name='random topic')
(resp, _) = _submit_talk(graphql_client, conference, topic=topic.id)
assert (resp['data']['sendSubmission']['__typename'] == 'SendSubmissionErrors')
assert (resp['data']['sendSubmission']['errors']['validationTopic'] == ['Not a valid topic']) |
def test_contextvar_support() -> None:
var: contextvars.ContextVar[str] = contextvars.ContextVar('test')
var.set('before')
assert (var.get() == 'before')
async def inner() -> None:
task = _core.current_task()
assert (task.context.get(var) == 'before')
assert (var.get() == 'before')
var.set('after')
assert (var.get() == 'after')
assert (var in task.context)
assert (task.context.get(var) == 'after')
_core.run(inner)
assert (var.get() == 'before') |
def parse_args():
parser = argparse.ArgumentParser(description='Translate using existing NMT models', usage='translator.py [<args>] [-h | --help]')
parser.add_argument('--input', type=str, required=True, nargs=2, help='Path of input file')
parser.add_argument('--output', type=str, required=True, help='Path of output file')
parser.add_argument('--checkpoint', type=str, required=True, help='Path of trained models')
parser.add_argument('--vocabulary', type=str, nargs=2, required=True, help='Path of source and target vocabulary')
parser.add_argument('--model', type=str, required=True, help='Name of the model')
parser.add_argument('--parameters', type=str, help='Additional hyper parameters')
return parser.parse_args() |
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info
if (cpu in net_rx_dic.keys()):
rec_data = {'event_name': 'netif_receive_skb', 'event_t': time, 'skbaddr': skbaddr, 'len': skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if (len(rx_skb_list) > buffer_budget):
rx_skb_list.pop()
of_count_rx_skb_list += 1 |
class MobileNet(nn.Module):
def __init__(self, width_multiplier=1, class_num=100):
super().__init__()
alpha = width_multiplier
self.stem = nn.Sequential(BasicConv2d(3, int((32 * alpha)), 3, padding=1, bias=False), DepthSeperabelConv2d(int((32 * alpha)), int((64 * alpha)), 3, padding=1, bias=False))
self.conv1 = nn.Sequential(DepthSeperabelConv2d(int((64 * alpha)), int((128 * alpha)), 3, stride=2, padding=1, bias=False), DepthSeperabelConv2d(int((128 * alpha)), int((128 * alpha)), 3, padding=1, bias=False))
self.conv2 = nn.Sequential(DepthSeperabelConv2d(int((128 * alpha)), int((256 * alpha)), 3, stride=2, padding=1, bias=False), DepthSeperabelConv2d(int((256 * alpha)), int((256 * alpha)), 3, padding=1, bias=False))
self.conv3 = nn.Sequential(DepthSeperabelConv2d(int((256 * alpha)), int((512 * alpha)), 3, stride=2, padding=1, bias=False), DepthSeperabelConv2d(int((512 * alpha)), int((512 * alpha)), 3, padding=1, bias=False), DepthSeperabelConv2d(int((512 * alpha)), int((512 * alpha)), 3, padding=1, bias=False), DepthSeperabelConv2d(int((512 * alpha)), int((512 * alpha)), 3, padding=1, bias=False), DepthSeperabelConv2d(int((512 * alpha)), int((512 * alpha)), 3, padding=1, bias=False), DepthSeperabelConv2d(int((512 * alpha)), int((512 * alpha)), 3, padding=1, bias=False))
self.conv4 = nn.Sequential(DepthSeperabelConv2d(int((512 * alpha)), int((1024 * alpha)), 3, stride=2, padding=1, bias=False), DepthSeperabelConv2d(int((1024 * alpha)), int((1024 * alpha)), 3, padding=1, bias=False))
self.fc = nn.Linear(int((1024 * alpha)), class_num)
self.avg = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
x = self.stem(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.avg(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def sphinx_build(test_dir, confoverrides=None):
os.chdir('tests/{0}'.format(test_dir))
try:
app = Sphinx(srcdir='.', confdir='.', outdir='_build/text', doctreedir='_build/.doctrees', buildername='text', confoverrides=confoverrides)
app.build(force_all=True)
(yield)
finally:
if os.path.exists('_build'):
shutil.rmtree('_build')
os.chdir('../..') |
class CosineLrUpdaterHook(LrUpdaterHook):
def __init__(self, target_lr=0, **kwargs):
self.target_lr = target_lr
super(CosineLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
return (self.target_lr + ((0.5 * (base_lr - self.target_lr)) * (1 + cos((pi * (progress / max_progress)))))) |
def filter_by_size(indices, dataset, max_positions, raise_exception=False):
if (isinstance(max_positions, float) or isinstance(max_positions, int)):
if (hasattr(dataset, 'sizes') and isinstance(dataset.sizes, np.ndarray)):
ignored = indices[(dataset.sizes[indices] > max_positions)].tolist()
indices = indices[(dataset.sizes[indices] <= max_positions)]
elif (hasattr(dataset, 'sizes') and isinstance(dataset.sizes, list) and (len(dataset.sizes) == 1)):
ignored = indices[(dataset.sizes[0][indices] > max_positions)].tolist()
indices = indices[(dataset.sizes[0][indices] <= max_positions)]
else:
(indices, ignored) = _filter_by_size_dynamic(indices, dataset.size, max_positions)
else:
(indices, ignored) = _filter_by_size_dynamic(indices, dataset.size, max_positions)
if ((len(ignored) > 0) and raise_exception):
raise Exception('Size of sample #{} is invalid (={}) since max_positions={}, skip this example with --skip-invalid-size-inputs-valid-test'.format(ignored[0], dataset.size(ignored[0]), max_positions))
if (len(ignored) > 0):
logger.warn('{} samples have invalid sizes and will be skipped, max_positions={}, first few sample ids={}'.format(len(ignored), max_positions, ignored[:10]))
return indices |
def build_dataloader(dataset, imgs_per_gpu, workers_per_gpu, num_gpus=1, dist=True, **kwargs):
shuffle = kwargs.get('shuffle', True)
if dist:
(rank, world_size) = get_dist_info()
if shuffle:
sampler = DistributedGroupSampler(dataset, imgs_per_gpu, world_size, rank)
else:
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False)
batch_size = imgs_per_gpu
num_workers = workers_per_gpu
else:
sampler = (GroupSampler(dataset, imgs_per_gpu) if shuffle else None)
batch_size = (num_gpus * imgs_per_gpu)
num_workers = (num_gpus * workers_per_gpu)
data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu), pin_memory=False, **kwargs)
return data_loader |
def create_font(n, param):
ifont = ImageFont.truetype('font.ttf', param[1])
sizes = {}
for c in param[0]:
sizes[c] = create_character(n, c, ifont)
print(('const PROGMEM struct font_character font%d[] = {' % n))
for c in param[0]:
print(('{%d, %d, %d, %d, font%d_%02x},' % (ord(c), sizes[c][0], sizes[c][1], sizes[c][2], n, ord(c))))
print('};') |
()
('-c', '--checkpoint', required=True)
('-o', '--output_dir', required=True)
('-d', '--device', default='cuda:0')
def main(checkpoint, output_dir, device):
if os.path.exists(output_dir):
click.confirm(f'Output path {output_dir} already exists! Overwrite?', abort=True)
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
payload = torch.load(open(checkpoint, 'rb'), pickle_module=dill)
cfg = payload['cfg']
cls = hydra.utils.get_class(cfg._target_)
workspace = cls(cfg, output_dir=output_dir)
workspace: BaseWorkspace
workspace.load_payload(payload, exclude_keys=None, include_keys=None)
policy = workspace.model
if cfg.training.use_ema:
policy = workspace.ema_model
device = torch.device(device)
policy.to(device)
policy.eval()
env_runner = hydra.utils.instantiate(cfg.task.env_runner, output_dir=output_dir)
runner_log = env_runner.run(policy)
json_log = dict()
for (key, value) in runner_log.items():
if isinstance(value, wandb.sdk.data_types.video.Video):
json_log[key] = value._path
else:
json_log[key] = value
out_path = os.path.join(output_dir, 'eval_log.json')
json.dump(json_log, open(out_path, 'w'), indent=2, sort_keys=True) |
def decrypt_object(d):
objects = dict()
if (d is None):
return objects
if (d.get('name') in ['Object', 'object']):
return objects
if (d.get('type') in utils.TYPE_CORRESPONDENCE):
return objects
if (d.get('name') not in objects):
objects.update({d.get('name'): d})
for (prop_name, prop_value) in d.get('props').items():
if (prop_value.get('value') is None):
continue
if (not isinstance(prop_value.get('value'), dict)):
continue
if (prop_value.get('value').get('name') in ['Object', 'object']):
continue
objects.update(decrypt_object(prop_value.get('value')))
return objects |
class SegmentSequence():
def __init__(self, segments=None):
if isinstance(segments, bytes):
from .parser import FinTS3Parser
parser = FinTS3Parser()
data = parser.explode_segments(segments)
segments = [parser.parse_segment(segment) for segment in data]
self.segments = (list(segments) if segments else [])
def render_bytes(self) -> bytes:
from .parser import FinTS3Serializer
return FinTS3Serializer().serialize_message(self)
def __repr__(self):
return '{}.{}({!r})'.format(self.__class__.__module__, self.__class__.__name__, self.segments)
def print_nested(self, stream=None, level=0, indent=' ', prefix='', first_level_indent=True, trailer='', print_doc=True, first_line_suffix=''):
import sys
stream = (stream or sys.stdout)
stream.write((((((prefix + (level * indent)) if first_level_indent else '') + '{}.{}(['.format(self.__class__.__module__, self.__class__.__name__)) + first_line_suffix) + '\n'))
for segment in self.segments:
docstring = (print_doc and segment.__doc__)
if docstring:
docstring = docstring.splitlines()[0].strip()
if docstring:
docstring = ' # {}'.format(docstring)
else:
docstring = ''
segment.print_nested(stream=stream, level=(level + 1), indent=indent, prefix=prefix, first_level_indent=True, trailer=',', print_doc=print_doc, first_line_suffix=docstring)
stream.write(((prefix + (level * indent)) + ']){}\n'.format(trailer)))
def find_segments(self, query=None, version=None, callback=None, recurse=True, throw=False):
found_something = False
if (query is None):
query = []
elif (isinstance(query, str) or (not isinstance(query, (list, tuple, Iterable)))):
query = [query]
if (version is None):
version = []
elif (not isinstance(version, (list, tuple, Iterable))):
version = [version]
if (callback is None):
callback = (lambda s: True)
for s in self.segments:
if (((not query) or any(((isinstance(s, t) if isinstance(t, type) else (s.header.type == t)) for t in query))) and ((not version) or any(((s.header.version == v) for v in version))) and callback(s)):
(yield s)
found_something = True
if recurse:
for (name, field) in s._fields.items():
val = getattr(s, name)
if (val and hasattr(val, 'find_segments')):
for v in val.find_segments(query=query, version=version, callback=callback, recurse=recurse):
(yield v)
found_something = True
if (throw and (not found_something)):
raise FinTSNoResponseError("The bank's response did not contain a response to your request, please inspect debug log.")
def find_segment_first(self, *args, **kwargs):
for m in self.find_segments(*args, **kwargs):
return m
return None
def find_segment_highest_version(self, query=None, version=None, callback=None, recurse=True, default=None):
retval = None
for s in self.find_segments(query=query, version=version, callback=callback, recurse=recurse):
if ((not retval) or (s.header.version > retval.header.version)):
retval = s
if (retval is None):
return default
return retval |
def get_synthesizability(molecule):
buyable = get_buyability(molecule)
if buyable:
return 1.0
else:
HOST = '
params = {'smiles': molecule, 'max_depth': 5, 'max_branching': 25, 'expansion_time': 60, 'max_ppg': 100, 'template_count': 1000, 'max_cum_prob': 0.999, 'chemical_property_logic': 'none', 'max_chemprop_c': 0, 'max_chemprop_n': 0, 'max_chemprop_o': 0, 'max_chemprop_h': 0, 'chemical_popularity_logic': 'none', 'min_chempop_reactants': 5, 'min_chempop_products': 5, 'filter_threshold': 0.1, 'return_first': 'true'}
for _ in range(15):
resp = requests.get((HOST + '/api/treebuilder/'), params=params)
if ('error' not in resp.json().keys()):
break
if (('error' not in resp.json().keys()) or (len(resp.json()['trees']) == 0)):
sa_score = sascorer.calculateScore(molecule)
return sa_gaussian_wrapper(sa_score)
else:
return synthesizability_wrapper(resp.json()) |
class PipInstall(metaclass=ABCMeta):
def __init__(self, wheel, creator, image_folder) -> None:
self._wheel = wheel
self._creator = creator
self._image_dir = image_folder
self._extracted = False
self.__dist_info = None
self._console_entry_points = None
def _sync(self, src, dst):
raise NotImplementedError
def install(self, version_info):
self._extracted = True
self._uninstall_previous_version()
for filename in self._image_dir.iterdir():
into = (self._creator.purelib / filename.name)
self._sync(filename, into)
consoles = set()
script_dir = self._creator.script_dir
for (name, module) in self._console_scripts.items():
consoles.update(self._create_console_entry_point(name, module, script_dir, version_info))
logging.debug('generated console scripts %s', ' '.join((i.name for i in consoles)))
def build_image(self):
logging.debug('build install image for %s to %s', self._wheel.name, self._image_dir)
with zipfile.ZipFile(str(self._wheel)) as zip_ref:
self._shorten_path_if_needed(zip_ref)
zip_ref.extractall(str(self._image_dir))
self._extracted = True
new_files = self._generate_new_files()
self._fix_records(new_files)
def _shorten_path_if_needed(self, zip_ref):
if (os.name == 'nt'):
to_folder = str(self._image_dir)
zip_max_len = max((len(i) for i in zip_ref.namelist()))
path_len = (zip_max_len + len(to_folder))
if (path_len > 260):
self._image_dir.mkdir(exist_ok=True)
from virtualenv.util.path import get_short_path_name
to_folder = get_short_path_name(to_folder)
self._image_dir = Path(to_folder)
def _records_text(self, files):
return '\n'.join((f'{os.path.relpath(str(rec), str(self._image_dir))},,' for rec in files))
def _generate_new_files(self):
new_files = set()
installer = (self._dist_info / 'INSTALLER')
installer.write_text('pip\n', encoding='utf-8')
new_files.add(installer)
marker = (self._image_dir / f'{self._dist_info.stem}.virtualenv')
marker.write_text('', encoding='utf-8')
new_files.add(marker)
folder = mkdtemp()
try:
to_folder = Path(folder)
rel = os.path.relpath(str(self._creator.script_dir), str(self._creator.purelib))
version_info = self._creator.interpreter.version_info
for (name, module) in self._console_scripts.items():
new_files.update((Path(os.path.normpath(str(((self._image_dir / rel) / i.name)))) for i in self._create_console_entry_point(name, module, to_folder, version_info)))
finally:
safe_delete(folder)
return new_files
def _dist_info(self):
if (self._extracted is False):
return None
if (self.__dist_info is None):
files = []
for filename in self._image_dir.iterdir():
files.append(filename.name)
if (filename.suffix == '.dist-info'):
self.__dist_info = filename
break
else:
msg = f"no .dist-info at {self._image_dir}, has {', '.join(files)}"
raise RuntimeError(msg)
return self.__dist_info
def _fix_records(self, extra_record_data):
raise NotImplementedError
def _console_scripts(self):
if (self._extracted is False):
return None
if (self._console_entry_points is None):
self._console_entry_points = {}
entry_points = (self._dist_info / 'entry_points.txt')
if entry_points.exists():
parser = ConfigParser()
with entry_points.open(encoding='utf-8') as file_handler:
parser.read_file(file_handler)
if ('console_scripts' in parser.sections()):
for (name, value) in parser.items('console_scripts'):
match = re.match('(.*?)-?\\d\\.?\\d*', name)
our_name = (match.groups(1)[0] if match else name)
self._console_entry_points[our_name] = value
return self._console_entry_points
def _create_console_entry_point(self, name, value, to_folder, version_info):
result = []
maker = ScriptMakerCustom(to_folder, version_info, self._creator.exe, name)
specification = f'{name} = {value}'
new_files = maker.make(specification)
result.extend((Path(i) for i in new_files))
return result
def _uninstall_previous_version(self):
dist_name = self._dist_info.stem.split('-')[0]
in_folders = chain.from_iterable([i.iterdir() for i in (self._creator.purelib, self._creator.platlib)])
paths = (p for p in in_folders if ((p.stem.split('-')[0] == dist_name) and (p.suffix == '.dist-info') and p.is_dir()))
existing_dist = next(paths, None)
if (existing_dist is not None):
self._uninstall_dist(existing_dist)
def _uninstall_dist(dist):
dist_base = dist.parent
logging.debug('uninstall existing distribution %s from %s', dist.stem, dist_base)
top_txt = (dist / 'top_level.txt')
paths = ({(dist.parent / i.strip()) for i in top_txt.read_text(encoding='utf-8').splitlines()} if top_txt.exists() else set())
paths.add(dist)
(base_dirs, record) = (paths.copy(), (dist / 'RECORD'))
for name in ((i.split(',')[0] for i in record.read_text(encoding='utf-8').splitlines()) if record.exists() else ()):
path = (dist_base / name)
if (not any(((p in base_dirs) for p in path.parents))):
paths.add(path)
for path in sorted(paths):
if path.exists():
if (path.is_dir() and (not path.is_symlink())):
safe_delete(path)
else:
path.unlink()
def clear(self):
if self._image_dir.exists():
safe_delete(self._image_dir)
def has_image(self):
return (self._image_dir.exists() and (next(self._image_dir.iterdir()) is not None)) |
()
('bulk_file', type=click.Path(exists=True, dir_okay=False, readable=True, resolve_path=True))
('-restart', '--restart', type=stages, help='The stage the workflow should be restarted from.')
_options
def run(bulk_file: str, skip_stages: Optional[List[str]]=None, end: Optional[str]=None, restart: Optional[str]=None, config: Optional[str]=None, protocol: Optional[str]=None, cores: Optional[int]=None, memory: Optional[int]=None) -> None:
import glob
import os
from qubekit.utils.helpers import mol_data_from_csv
home = os.getcwd()
bulk_data = mol_data_from_csv(bulk_file)
for (name, mol_data) in bulk_data.items():
print(f'Analysing: {name}')
try:
if ((restart is not None) or (mol_data['restart'] is not None)):
fname = name.split('.')[0]
folder = glob.glob(f'QUBEKit_{fname}_*')[0]
with folder_setup(folder):
results = WorkFlowResult.parse_file('workflow_result.json')
if (config is None):
workflow = prep_config(results=results, cores=cores, memory=memory, protocol=None)
else:
workflow = prep_config(config_file=config, cores=cores, memory=memory)
workflow.restart_workflow(start=(restart or mol_data['restart']), skip_stages=skip_stages, end=(end or mol_data['end']), result=results)
else:
if (mol_data['smiles'] is not None):
molecule = Ligand.from_smiles(smiles_string=mol_data['smiles'], name=name)
else:
molecule = Ligand.from_file(file_name=name)
workflow = prep_config(config_file=(config or mol_data['config_file']), memory=memory, cores=cores, protocol=protocol)
with folder_setup(f"QUBEKit_{molecule.name}_{datetime.now().strftime('%Y_%m_%d')}"):
molecule.to_file(file_name=f'{molecule.name}.pdb')
workflow.new_workflow(molecule=molecule, skip_stages=skip_stages, end=(end or mol_data['end']))
except WorkFlowExecutionError:
os.chdir(home)
print(f'An error was encountered while running {name} see folder for more info.')
continue |
class CsvDataset(Dataset):
def __init__(self, input_filename, transforms, img_key, caption_key, sep='\t'):
logging.debug(f'Loading csv data from {input_filename}.')
df = pd.read_csv(input_filename, sep=sep)
self.images = df[img_key].tolist()
self.captions = df[caption_key].tolist()
self.transforms = transforms
logging.debug('Done loading data.')
def __len__(self):
return len(self.captions)
def __getitem__(self, idx):
images = self.transforms(Image.open(str(self.images[idx])))
texts = tokenize([str(self.captions[idx])])[0]
return (images, texts) |
class SnapshotsServicer(object):
def Create(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateFull(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListFull(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteFull(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') |
def check_version_info(conn, version_table, expected_version):
version_from_table = conn.execute(sa.select((version_table.c.version,))).scalar()
if (version_from_table is None):
version_from_table = 0
if (version_from_table != expected_version):
raise AssetDBVersionError(db_version=version_from_table, expected_version=expected_version) |
class LatexyzInsertPairCommand(sublime_plugin.TextCommand):
def run(self, edit, arg):
left = ('\\\\left' + arg[0].replace('\\', '\\\\'))
right = ('\\\\right' + arg[1].replace('\\', '\\\\'))
lz_settings = sublime.load_settings(lz_settings_file)
d = (1 if lz_settings.get('auto_create_fields', False) else 0)
self.view.run_command('latexyz_insert_snippet', {'contents': ((left + ('${%d:$SELECTION}' % d)) + right), 'before': len(arg[0]), 'after': len(arg[1])}) |
.parametrize('username,password', users)
.parametrize('issue_id', issues)
.parametrize('project_id', projects)
def test_issue_send_post_email(db, client, username, password, project_id, issue_id):
client.login(username=username, password=password)
issue = Issue.objects.filter(project_id=project_id, id=issue_id).first()
url = reverse('issue_send', args=[project_id, issue_id])
data = {'subject': 'Subject', 'message': 'Message', 'recipients': ['']}
response = client.post(url, data)
if issue:
if (project_id in change_issue_permission_map.get(username, [])):
assert (response.status_code == 302)
assert (len(mail.outbox) == 1)
assert (mail.outbox[0].subject == '[example.com] Subject')
assert (mail.outbox[0].body == 'Message')
else:
if password:
assert (response.status_code == 403)
else:
assert (response.status_code == 302)
assert (len(mail.outbox) == 0)
else:
assert (response.status_code == 404) |
_tf
class TFFunnelModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ((TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification) if is_tf_available() else ())
pipeline_model_mapping = ({'feature-extraction': (TFFunnelBaseModel, TFFunnelModel), 'fill-mask': TFFunnelForMaskedLM, 'question-answering': TFFunnelForQuestionAnswering, 'text-classification': TFFunnelForSequenceClassification, 'token-classification': TFFunnelForTokenClassification, 'zero-shot': TFFunnelForSequenceClassification} if is_tf_available() else {})
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFFunnelModelTester(self)
self.config_tester = ConfigTester(self, config_class=FunnelConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_saved_model_creation(self):
pass
def test_compile_tf_model(self):
pass |
def _run_box(box, client, registry, ca_cert):
(vagrant, vagrant_scp) = _check_vagrant()
if (not vagrant):
print('vagrant command not found')
return
if (not vagrant_scp):
print('vagrant-scp plugin not installed')
return
namespace = 'devtable'
repo_name = ('testrepo%s' % int(time.time()))
username = 'devtable'
password = 'password'
print(colored(('>>> Box: %s' % box), attrs=['bold']))
print(colored('>>> Starting box', 'yellow'))
_run_and_wait(['vagrant', 'destroy', '-f'], error_allowed=True)
_run_and_wait(['rm', 'Vagrantfile'], error_allowed=True)
_run_and_wait((['vagrant', 'init'] + box.split(' ')))
_run_and_wait(['vagrant', 'up', '--provider', 'virtualbox'])
_run_commands(_init_system(box))
if ca_cert:
print(colored(('>>> Setting up runtime with cert ' + ca_cert), 'yellow'))
_run_commands(_load_ca(box, ca_cert))
_run_commands(client.setup_client(registry, verify_tls=True))
else:
print(colored('>>> Setting up runtime with insecure HTTP(S)', 'yellow'))
_run_commands(client.setup_client(registry, verify_tls=False))
print(colored('>>> Client version', 'cyan'))
runtime_version = _run_commands(client.print_version())
print(_indent(runtime_version, 4))
print(colored('>>> Populating test image', 'yellow'))
_run_commands(client.populate_test_image(registry, namespace, repo_name))
print(colored('>>> Testing login', 'cyan'))
_run_commands(client.login(registry, username, password))
print(colored('>>> Testing push', 'cyan'))
_run_commands(client.push(registry, namespace, repo_name))
print(colored('>>> Removing all images', 'yellow'))
_run_commands(client.pre_pull_cleanup(registry, namespace, repo_name))
print(colored('>>> Testing pull', 'cyan'))
_run_commands(client.pull(registry, namespace, repo_name))
print(colored('>>> Verifying', 'cyan'))
_run_commands(client.verify(registry, namespace, repo_name))
print(colored('>>> Tearing down box', 'magenta'))
_run_and_wait(['vagrant', 'destroy', '-f'], error_allowed=True)
print(colored(('>>> Successfully tested box %s' % box), 'green'))
print('') |
def canonicalize_version(version: Union[(Version, str)], *, strip_trailing_zero: bool=True) -> str:
if isinstance(version, str):
try:
parsed = Version(version)
except InvalidVersion:
return version
else:
parsed = version
parts = []
if (parsed.epoch != 0):
parts.append(f'{parsed.epoch}!')
release_segment = '.'.join((str(x) for x in parsed.release))
if strip_trailing_zero:
release_segment = re.sub('(\\.0)+$', '', release_segment)
parts.append(release_segment)
if (parsed.pre is not None):
parts.append(''.join((str(x) for x in parsed.pre)))
if (parsed.post is not None):
parts.append(f'.post{parsed.post}')
if (parsed.dev is not None):
parts.append(f'.dev{parsed.dev}')
if (parsed.local is not None):
parts.append(f'+{parsed.local}')
return ''.join(parts) |
(frozen=True)
class EventPickupNode(ResourceNode):
event_node: EventNode
pickup_node: PickupNode
def create_from(cls, index: int, event_node: EventNode, next_node: PickupNode) -> EventPickupNode:
return cls(event_node.identifier.renamed(f'EventPickup - {event_node.event.long_name} + {next_node.name}'), index, (event_node.heal or next_node.heal), next_node.location, f'''{event_node.description}
{next_node.description}''', event_node.layers, {'event': event_node.extra, 'pickup': next_node.extra}, False, event_node, next_node)
def __repr__(self) -> str:
return 'EventPickupNode({!r} -> {}+{})'.format(self.name, self.event_node.event.long_name, self.pickup_node.pickup_index.index)
def is_resource_node(self) -> bool:
return True
def is_derived_node(self) -> bool:
return True
def resource(self, context: NodeContext) -> ResourceInfo:
return self.pickup_node.resource(context)
def requirement_to_leave(self, context: NodeContext) -> Requirement:
return ResourceRequirement.simple(self.pickup_node.resource(context))
def can_collect(self, context: NodeContext) -> bool:
event_collect = self.event_node.can_collect(context)
pickup_collect = self.pickup_node.can_collect(context)
return (event_collect or pickup_collect)
def is_collected(self, context: NodeContext) -> bool:
return (self.event_node.is_collected(context) and self.pickup_node.is_collected(context))
def resource_gain_on_collect(self, context: NodeContext) -> ResourceGain:
(yield from self.event_node.resource_gain_on_collect(context))
(yield from self.pickup_node.resource_gain_on_collect(context)) |
class VideoDatasetMultiClips(VideoDataset):
def __loading(self, path, video_frame_indices):
clips = []
segments = []
for clip_frame_indices in video_frame_indices:
clip = self.loader(path, clip_frame_indices)
if (self.spatial_transform is not None):
self.spatial_transform.randomize_parameters()
clip = [self.spatial_transform(img) for img in clip]
clips.append(torch.stack(clip, 0).permute(1, 0, 2, 3))
segments.append([min(clip_frame_indices), (max(clip_frame_indices) + 1)])
return (clips, segments)
def __getitem__(self, index):
path = self.data[index]['video']
video_frame_indices = self.data[index]['frame_indices']
if (self.temporal_transform is not None):
video_frame_indices = self.temporal_transform(video_frame_indices)
(clips, segments) = self.__loading(path, video_frame_indices)
if isinstance(self.target_type, list):
target = [self.data[index][t] for t in self.target_type]
else:
target = self.data[index][self.target_type]
if ('segment' in self.target_type):
if isinstance(self.target_type, list):
segment_index = self.target_type.index('segment')
targets = []
for s in segments:
targets.append(copy.deepcopy(target))
targets[(- 1)][segment_index] = s
else:
targets = segments
else:
targets = [target for _ in range(len(segments))]
return (clips, targets) |
def lock_screen(request):
crypt = CryptPwd()
if (request.method == 'GET'):
user = UserProfile.objects.get(username=request.user)
UserProfile.objects.filter(username=request.user).update(login_status=3)
request.session['lock'] = 'lock'
if ('lock_screen' not in request.META.get('HTTP_REFERER')):
request.session['referer_url'] = request.META.get('HTTP_REFERER')
public_key = crypt.gen_pri_pub_key
return render(request, 'lockscreen.html', locals())
elif (request.method == 'POST'):
de_password = crypt.de_js_encrypt(request.POST.get('pwd'))
user = auth.authenticate(username=request.session['username'], password=de_password)
if user:
del request.session['lock']
referer_url = request.session.get('referer_url')
return redirect(referer_url)
return render(request, 'lockscreen.html', {'login_error_info': '!!', 'public_key': crypt.gen_pri_pub_key}) |
class Infraction(Enum):
BAN = auto()
KICK = auto()
TIMEOUT = auto()
VOICE_MUTE = auto()
SUPERSTAR = auto()
WARNING = auto()
WATCH = auto()
NOTE = auto()
NONE = auto()
def __str__(self) -> str:
return self.name
async def invoke(self, user: (Member | User), message: discord.Message, channel: (discord.abc.GuildChannel | discord.DMChannel), alerts_channel: discord.TextChannel, duration: InfractionDuration, reason: str) -> None:
command_name = self.name.lower()
command = bot_module.instance.get_command(command_name)
if (not command):
(await alerts_channel.send(f':warning: Could not apply {command_name} to {user.mention}: command not found.'))
log.warning(f':warning: Could not apply {command_name} to {user.mention}: command not found.')
return
if isinstance(user, discord.User):
member = (await get_or_fetch_member(channel.guild, user.id))
if member:
user = member
else:
log.warning(f'The user {user} were set to receive an automatic {command_name}, but they were not found in the guild.')
return
ctx = FakeContext(message, channel, command)
if (self.name in ('KICK', 'WARNING', 'WATCH', 'NOTE')):
(await command(ctx, user, reason=(reason or None)))
else:
duration = ((arrow.utcnow().datetime + duration.value) if duration.value else None)
(await command(ctx, user, duration, reason=(reason or None))) |
def test_omitting_none(converter: BaseConverter):
class A():
a: int
b: int = field(init=False)
converter.register_unstructure_hook(A, make_dict_unstructure_fn(A, converter, a=override(), b=override()))
assert (converter.unstructure(A(1)) == {'a': 1})
converter.register_structure_hook(A, make_dict_structure_fn(A, converter, a=override(), b=override()))
assert (converter.structure({'a': 2}, A).a == 2)
assert (not hasattr(converter.structure({'a': 2}, A), 'b')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.