code stringlengths 281 23.7M |
|---|
class EditorWizardPage(BasePyzoWizardPage):
_title = translate('wizard', 'The editor is where you write your code')
_image_filename = 'pyzo_editor.png'
_descriptions = [translate('wizard', 'In the *editor*, each open file is represented as a tab. By\n right-clicking on a tab, files can be run, saved, closed, etc.'), translate('wizard', 'The right mouse button also enables one to make a file the\n *main file* of a project. This file can be recognized by its star\n symbol, and it enables running the file more easily.')] |
_against_invalid_ecpoint
def CKD_pub(parent_pubkey: bytes, parent_chaincode: bytes, child_index: int) -> Tuple[(bytes, bytes)]:
if (child_index < 0):
raise ValueError('the bip32 index needs to be non-negative')
if (child_index & BIP32_PRIME):
raise Exception('not possible to derive hardened child from parent pubkey')
return _CKD_pub(parent_pubkey=parent_pubkey, parent_chaincode=parent_chaincode, child_index=bfh(rev_hex(int_to_hex(child_index, 4)))) |
class FSNERModel(torch.nn.Module):
def __init__(self, pretrained_model_name_or_path='sayef/fsner-bert-base-uncased'):
super(FSNERModel, self).__init__()
self.bert = AutoModel.from_pretrained(pretrained_model_name_or_path, return_dict=True)
self.cos = torch.nn.CosineSimilarity(3, 1e-08)
self.softmax = torch.nn.Softmax(dim=1)
def BERT(self, **inputs):
return self.bert(**inputs).last_hidden_state
def VectorSum(self, token_embeddings):
return token_embeddings.sum(2, keepdim=True)
def Atten(self, q_rep, S_rep, T=1):
return self.softmax((T * self.cos(q_rep, S_rep)))
def forward(self, W_query, W_supports):
support_sizes = W_supports['sizes'].tolist()
start_token_id = W_supports['start_token_id'].item()
end_token_id = W_supports['end_token_id'].item()
del W_supports['sizes']
del W_supports['start_token_id']
del W_supports['end_token_id']
q = self.BERT(**W_query)
S = self.BERT(**W_supports)
p_starts = None
p_ends = None
start_token_masks = (W_supports['input_ids'] == start_token_id)
end_token_masks = (W_supports['input_ids'] == end_token_id)
for (i, size) in enumerate(support_sizes):
if (i == 0):
s = 0
else:
s = support_sizes[(i - 1)]
s_start = S[s:(s + size)][start_token_masks[s:(s + size)]]
s_end = S[s:(s + size)][end_token_masks[s:(s + size)]]
p_start = torch.matmul(q[i], s_start.T).sum(1).softmax(0)
p_end = torch.matmul(q[i], s_end.T).sum(1).softmax(0)
if (p_starts is not None):
p_starts = torch.vstack((p_starts, p_start))
p_ends = torch.vstack((p_ends, p_end))
else:
p_starts = p_start
p_ends = p_end
return (p_starts, p_ends) |
class StarDelegate(QStyledItemDelegate):
def paint(self, painter, option, index):
starRating = index.data()
if isinstance(starRating, StarRating):
if (option.state & QStyle.State_Selected):
painter.fillRect(option.rect, option.palette.highlight())
starRating.paint(painter, option.rect, option.palette, StarRating.ReadOnly)
else:
super(StarDelegate, self).paint(painter, option, index)
def sizeHint(self, option, index):
starRating = index.data()
if isinstance(starRating, StarRating):
return starRating.sizeHint()
else:
return super(StarDelegate, self).sizeHint(option, index)
def createEditor(self, parent, option, index):
starRating = index.data()
if isinstance(starRating, StarRating):
editor = StarEditor(parent)
editor.editingFinished.connect(self.commitAndCloseEditor)
return editor
else:
return super(StarDelegate, self).createEditor(parent, option, index)
def setEditorData(self, editor, index):
starRating = index.data()
if isinstance(starRating, StarRating):
editor.setStarRating(starRating)
else:
super(StarDelegate, self).setEditorData(editor, index)
def setModelData(self, editor, model, index):
starRating = index.data()
if isinstance(starRating, StarRating):
model.setData(index, editor.starRating())
else:
super(StarDelegate, self).setModelData(editor, model, index)
def commitAndCloseEditor(self):
editor = self.sender()
self.commitData.emit(editor)
self.closeEditor.emit(editor) |
def unpool_with_argmax(pool, ind, name=None, ksize=[1, 2, 2, 1]):
with tf.variable_scope(name):
input_shape = pool.get_shape().as_list()
output_shape = (input_shape[0], (input_shape[1] * ksize[1]), (input_shape[2] * ksize[2]), input_shape[3])
flat_input_size = np.prod(input_shape)
flat_output_shape = [output_shape[0], ((output_shape[1] * output_shape[2]) * output_shape[3])]
pool_ = tf.reshape(pool, [flat_input_size])
batch_range = tf.reshape(tf.range(output_shape[0], dtype=ind.dtype), shape=[input_shape[0], 1, 1, 1])
b = (tf.ones_like(ind) * batch_range)
b = tf.reshape(b, [flat_input_size, 1])
ind_ = tf.reshape(ind, [flat_input_size, 1])
ind_ = tf.concat([b, ind_], 1)
ret = tf.scatter_nd(ind_, pool_, shape=flat_output_shape)
ret = tf.reshape(ret, output_shape)
return ret |
def DecodeAttr(datatype, value):
if (datatype == 'string'):
return DecodeString(value)
elif (datatype == 'octets'):
return DecodeOctets(value)
elif (datatype == 'integer'):
return DecodeInteger(value)
elif (datatype == 'ipaddr'):
return DecodeAddress(value)
elif (datatype == 'ipv6prefix'):
return DecodeIPv6Prefix(value)
elif (datatype == 'ipv6addr'):
return DecodeIPv6Address(value)
elif (datatype == 'abinary'):
return DecodeAscendBinary(value)
elif (datatype == 'signed'):
return DecodeInteger(value, '!i')
elif (datatype == 'short'):
return DecodeInteger(value, '!H')
elif (datatype == 'byte'):
return DecodeInteger(value, '!B')
elif (datatype == 'date'):
return DecodeDate(value)
elif (datatype == 'integer64'):
return DecodeInteger64(value)
else:
raise ValueError(('Unknown attribute type %s' % datatype)) |
class CategoricalGRUPolicy(StochasticPolicy, LasagnePowered, Serializable):
def __init__(self, env_spec, hidden_dim=32, feature_network=None, state_include_action=True, hidden_nonlinearity=NL.tanh):
assert isinstance(env_spec.action_space, Discrete)
Serializable.quick_init(self, locals())
super(CategoricalGRUPolicy, self).__init__(env_spec)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if state_include_action:
input_dim = (obs_dim + action_dim)
else:
input_dim = obs_dim
l_input = L.InputLayer(shape=(None, None, input_dim), name='input')
if (feature_network is None):
feature_dim = input_dim
l_flat_feature = None
l_feature = l_input
else:
feature_dim = feature_network.output_layer.output_shape[(- 1)]
l_flat_feature = feature_network.output_layer
l_feature = OpLayer(l_flat_feature, extras=[l_input], name='reshape_feature', op=(lambda flat_feature, input: TT.reshape(flat_feature, [input.shape[0], input.shape[1], feature_dim])), shape_op=(lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)))
prob_network = GRUNetwork(input_shape=(feature_dim,), input_layer=l_feature, output_dim=env_spec.action_space.n, hidden_dim=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=TT.nnet.softmax, name='prob_network')
self.prob_network = prob_network
self.feature_network = feature_network
self.l_input = l_input
self.state_include_action = state_include_action
flat_input_var = TT.matrix('flat_input')
if (feature_network is None):
feature_var = flat_input_var
else:
feature_var = L.get_output(l_flat_feature, {feature_network.input_layer: flat_input_var})
self.f_step_prob = ext.compile_function([flat_input_var, prob_network.step_prev_hidden_layer.input_var], L.get_output([prob_network.step_output_layer, prob_network.step_hidden_layer], {prob_network.step_input_layer: feature_var}))
self.input_dim = input_dim
self.action_dim = action_dim
self.hidden_dim = hidden_dim
self.prev_action = None
self.prev_hidden = None
self.dist = RecurrentCategorical(env_spec.action_space.n)
out_layers = [prob_network.output_layer]
if (feature_network is not None):
out_layers.append(feature_network.output_layer)
LasagnePowered.__init__(self, out_layers)
def dist_info_sym(self, obs_var, state_info_vars):
(n_batches, n_steps) = obs_var.shape[:2]
obs_var = obs_var.reshape((n_batches, n_steps, (- 1)))
if self.state_include_action:
prev_action_var = state_info_vars['prev_action']
all_input_var = TT.concatenate([obs_var, prev_action_var], axis=2)
else:
all_input_var = obs_var
if (self.feature_network is None):
return dict(prob=L.get_output(self.prob_network.output_layer, {self.l_input: all_input_var}))
else:
flat_input_var = TT.reshape(all_input_var, ((- 1), self.input_dim))
return dict(prob=L.get_output(self.prob_network.output_layer, {self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var}))
def reset(self):
self.prev_action = None
self.prev_hidden = self.prob_network.hid_init_param.get_value()
def get_action(self, observation):
if self.state_include_action:
if (self.prev_action is None):
prev_action = np.zeros((self.action_space.flat_dim,))
else:
prev_action = self.action_space.flatten(self.prev_action)
all_input = np.concatenate([self.observation_space.flatten(observation), prev_action])
else:
all_input = self.observation_space.flatten(observation)
prev_action = np.nan
(probs, hidden_vec) = [x[0] for x in self.f_step_prob([all_input], [self.prev_hidden])]
action = special.weighted_sample(probs, range(self.action_space.n))
self.prev_action = action
self.prev_hidden = hidden_vec
agent_info = dict(prob=probs)
if self.state_include_action:
agent_info['prev_action'] = prev_action
return (action, agent_info)
def recurrent(self):
return True
def distribution(self):
return self.dist
def state_info_keys(self):
if self.state_include_action:
return ['prev_action']
else:
return [] |
.parametrize('dtype', ['U', 'S', 'u1', 'u2', 'i8', 'int'])
def test_read_bgen__contig_dtype(shared_datadir, dtype):
path = (shared_datadir / 'example.bgen')
ds = read_bgen(path, contig_dtype=dtype)
dtype = np.dtype(dtype)
if (dtype.kind in {'U', 'S'}):
assert (ds['variant_contig'].dtype == np.int64)
else:
assert (ds['variant_contig'].dtype == dtype) |
class TestConvertor(unittest.TestCase):
def test_basic(self):
self.assertEquals(time.convert(60, 's', 's'), 60.0)
self.assertEquals(time.convert(60, 's', 'm'), 1.0)
self.assertEquals(time.convert(60000, 'ms', 'm'), 1.0)
self.assertEquals(time.convert(60000, 'MS', 'minutes'), 1.0)
self.assertEquals(time.convert(((3600 * 1000) * 1.4), 'ms', 'h'), 1.4)
self.assertEquals(time.convert(((86400 * 1000) * 2.5), 'ms', 'd'), 2.5)
self.assertEquals(time.convert((((86400 * 1000) * 365) * 0.7), 'ms', 'y'), 0.7)
self.assertEquals(time.convert(1000, 'ms', 'us'), 1000000)
self.assertEquals(time.convert(1000, 'ms', 'ns'), )
self.assertEquals(time.convert(1.5, 'y', 'ns'), ((((((1.5 * 365) * 24) * 3600) * 1000) * 1000) * 1000))
def test_unrecognised_unit(self):
self.assertRaises(NotImplementedError, time.convert, 60, 's', 'months') |
def _threshold_and_support(input, dim=0):
(input_srt, _) = torch.sort(input, descending=True, dim=dim)
input_cumsum = (input_srt.cumsum(dim) - 1)
rhos = _make_ix_like(input, dim)
support = ((rhos * input_srt) > input_cumsum)
support_size = support.sum(dim=dim).unsqueeze(dim)
tau = input_cumsum.gather(dim, (support_size - 1))
tau /= support_size.to(input.dtype)
return (tau, support_size) |
class AbstractNotificationAdapter(QObject):
NAME: str
close_id = pyqtSignal(int)
click_id = pyqtSignal(int)
error = pyqtSignal(str)
clear_all = pyqtSignal()
def present(self, qt_notification: 'QWebEngineNotification', *, replaces_id: Optional[int]) -> int:
raise NotImplementedError
def _should_include_origin(self, origin: QUrl) -> bool:
return bool((origin.host() and config.instance.get('content.notifications.show_origin', url=origin)))
(int)
def on_web_closed(self, notification_id: int) -> None:
raise NotImplementedError |
class StaticWrapper(hwndwrapper.HwndWrapper):
friendlyclassname = 'Static'
windowclasses = ['Static', 'WindowsForms\\d*\\.STATIC\\..*', 'TPanel', '.*StaticText']
can_be_label = True
def __init__(self, hwnd):
super(StaticWrapper, self).__init__(hwnd)
def _needs_image_prop(self):
if (self.is_visible() and (self.has_style(win32defines.SS_ICON) or self.has_style(win32defines.SS_BITMAP) or self.has_style(win32defines.SS_CENTERIMAGE) or self.has_style(win32defines.SS_OWNERDRAW))):
return True
else:
return False
_NeedsImageProp = deprecated(_needs_image_prop, deprecated_name='_NeedsImageProp') |
class CrPVP(PVP):
VERBALIZER = {'0': ['silly'], '1': ['solid']}
def get_parts(self, example: InputExample) -> FilledPattern:
text_a = self.shortenable(example.text_a)
if (self.pattern_id == 1):
string_list_a = [text_a, 'I', 'think', 'it', 'is', self.mask, '!']
string_list_b = []
block_flag_a = [0, 1, 0, 0, 0, 0, 0]
block_flag_b = []
assert (len(string_list_a) == len(block_flag_a))
assert (len(string_list_b) == len(block_flag_b))
return (string_list_a, string_list_b, block_flag_a, block_flag_b)
elif (self.pattern_id == 4):
string_list_a = [text_a, 'I', 'think', 'it', 'is', self.mask, '!']
string_list_b = []
block_flag_a = [0, 1, 1, 1, 1, 0, 0]
block_flag_b = []
assert (len(string_list_a) == len(block_flag_a))
assert (len(string_list_b) == len(block_flag_b))
return (string_list_a, string_list_b, block_flag_a, block_flag_b)
else:
raise ValueError('unknown pattern_id.')
def verbalize(self, label) -> List[str]:
if (label not in CrPVP.VERBALIZER.keys()):
if (type(label) == int):
label = str(label)
elif (type(label) == str):
label = int(label)
return CrPVP.VERBALIZER[label] |
class ConvDropoutNormNonlin(nn.Module):
def __init__(self, input_channels, output_channels, conv_op=nn.Conv2d, conv_kwargs=None, norm_op=nn.BatchNorm2d, norm_op_kwargs=None, dropout_op=nn.Dropout2d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None):
super(ConvDropoutNormNonlin, self).__init__()
if (nonlin_kwargs is None):
nonlin_kwargs = {'negative_slope': 0.01, 'inplace': True}
if (dropout_op_kwargs is None):
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if (norm_op_kwargs is None):
norm_op_kwargs = {'eps': 1e-05, 'affine': True, 'momentum': 0.1}
if (conv_kwargs is None):
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs)
if ((self.dropout_op is not None) and (self.dropout_op_kwargs['p'] is not None) and (self.dropout_op_kwargs['p'] > 0)):
self.dropout = self.dropout_op(**self.dropout_op_kwargs)
else:
self.dropout = None
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)
self.lrelu = self.nonlin(**self.nonlin_kwargs)
def forward(self, x):
x = self.conv(x)
if (self.dropout is not None):
x = self.dropout(x)
return self.lrelu(self.instnorm(x)) |
class SGWinogradSchemaChallenge(Task):
VERSION = 0
DATASET_PATH = 'super_glue'
DATASET_NAME = 'wsc'
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self.has_training_docs():
if (self._training_docs is None):
self._training_docs = [doc for doc in self.dataset['train'] if doc['label']]
return self._training_docs
def validation_docs(self):
return self.dataset['validation']
def doc_to_text(self, doc):
raw_passage = doc['text']
pre = ' '.join(raw_passage.split()[:doc['span2_index']])
post = raw_passage[((len(pre) + len(doc['span2_text'])) + 1):]
passage = general_detokenize(((pre + ' *{}*'.format(doc['span2_text'])) + post))
noun = doc['span1_text']
pronoun = doc['span2_text']
text = ((f'''Passage: {passage}
''' + f'''Question: In the passage above, does the pronoun "*{pronoun}*" refer to "*{noun}*"?
''') + 'Answer:')
return text
def doc_to_target(self, doc):
return (' ' + yesno(doc['label']))
def construct_requests(self, doc, ctx):
(ll_yes, _) = rf.loglikelihood(ctx, ' yes')
(ll_no, _) = rf.loglikelihood(ctx, ' no')
return (ll_yes, ll_no)
def process_results(self, doc, results):
(ll_yes, ll_no) = results
gold = doc['label']
acc = (1.0 if ((ll_yes > ll_no) == gold) else 0.0)
return {'acc': acc}
def higher_is_better(self):
return {'acc': True}
def aggregation(self):
return {'acc': mean} |
def _group_keys_by_module(keys: List[str], original_names: Dict[(str, str)]):
def _submodule_name(key):
pos = key.rfind('.')
if (pos < 0):
return None
prefix = key[:(pos + 1)]
return prefix
all_submodules = [_submodule_name(k) for k in keys]
all_submodules = [x for x in all_submodules if x]
all_submodules = sorted(all_submodules, key=len)
ret = {}
for prefix in all_submodules:
group = [k for k in keys if k.startswith(prefix)]
if (len(group) <= 1):
continue
original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
if (len(original_name_lcp) == 0):
continue
for k in group:
if (k in ret):
continue
ret[k] = group
return ret |
class DevServer(SuperHTTPServer):
def __init__(self, base_url, *args, **kwargs):
self.base_url = base_url
super().__init__(*args, **kwargs)
def run(self):
try:
self.serve_forever()
except KeyboardInterrupt:
pass
finally:
self.server_close() |
_fixtures(WebFixture)
def test_basic_fixed_attributes(web_fixture):
fixture = web_fixture
widget = HTMLElement(fixture.view, 'x')
tester = WidgetTester(widget)
widget.set_attribute('attr1', 'value1')
widget.add_to_attribute('listattr', ['one', 'two'])
widget.add_to_attribute('listattr', ['three'])
assert widget.has_attribute('attr1')
assert widget.has_attribute('listattr')
assert (not widget.has_attribute('notthere'))
assert (widget.attributes.v['attr1'] == 'value1')
assert (widget.attributes.v['listattr'] == 'one three two')
rendered = tester.render_html()
assert (rendered == '<x attr1="value1" listattr="one three two">')
widget.set_attribute('id', '123')
widget.add_to_attribute('class', ['z', 'b'])
rendered = tester.render_html()
assert (rendered == '<x id="123" attr1="value1" listattr="one three two" class="b z">') |
class SendGrantInput(BaseGrantInput):
name: str
full_name: str
conference: strawberry.ID
age_group: AgeGroup
gender: str
occupation: Occupation
grant_type: GrantType
python_usage: str
been_to_other_events: str
community_contribution: str
interested_in_volunteering: InterestedInVolunteering
needs_funds_for_travel: bool
need_visa: bool
need_accommodation: bool
why: str
notes: str
travelling_from: str
website: str
twitter_handle: str
github_handle: str
linkedin_url: str
mastodon_handle: str
def validate(self, conference: Conference, user: User) -> GrantErrors:
errors = super().validate(conference=conference, user=user)
if GrantModel.objects.filter(user_id=user.id).exists():
errors.add_error('non_field_errors', 'Grant already submitted!')
return errors |
def pretf_blocks():
bucket = (yield resource.aws_s3_bucket.test(bucket='pretf-example-aws-files', acl='private'))
total_files = 0
total_bytes = 0
for source in ('files', 'more-files'):
objects = (yield aws_s3_bucket_objects(bucket=bucket, source=source))
total_files += objects.total_files
total_bytes += objects.total_bytes
(yield output.total_files(value=total_files))
(yield output.total_bytes(value=total_bytes)) |
class RelativeObjectPosition(_PositionType):
def __init__(self, entity, dx, dy, dz=None, orientation=Orientation()):
self.target = entity
self.dx = convert_float(dx)
self.dy = convert_float(dy)
self.dz = convert_float(dz)
if (not isinstance(orientation, Orientation)):
raise TypeError('input orientation is not of type Orientation')
self.orient = orientation
def __eq__(self, other):
if isinstance(other, RelativeObjectPosition):
if ((self.get_attributes() == other.get_attributes()) and (self.orient == other.orient)):
return True
return False
def parse(element):
position_element = element.find('RelativeObjectPosition')
dx = convert_float(position_element.attrib['dx'])
dy = convert_float(position_element.attrib['dy'])
if ('dz' in position_element.attrib):
dz = convert_float(position_element.attrib['dz'])
else:
dz = None
entityref = position_element.attrib['entityRef']
if (position_element.find('Orientation') != None):
orientation = Orientation.parse(position_element.find('Orientation'))
else:
orientation = Orientation()
return RelativeObjectPosition(entityref, dx, dy, dz, orientation)
def get_attributes(self):
retdict = {}
retdict['entityRef'] = self.target
retdict['dx'] = str(self.dx)
retdict['dy'] = str(self.dy)
if (self.dz != None):
retdict['dz'] = str(self.dz)
return retdict
def get_element(self, elementname='Position'):
element = ET.Element(elementname)
relpos = ET.SubElement(element, 'RelativeObjectPosition', attrib=self.get_attributes())
if self.orient.is_filled():
relpos.append(self.orient.get_element())
return element |
class Bottleneck(MetaModule):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = MetaConv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = MetaBatchNorm2d(planes)
self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = MetaBatchNorm2d(planes)
self.conv3 = MetaConv2d(planes, (self.expansion * planes), kernel_size=1, bias=False)
self.bn3 = MetaBatchNorm2d((self.expansion * planes))
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(MetaConv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), MetaBatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out |
def dump_infos_gdb(parsed):
with tempfile.TemporaryDirectory() as tempdir:
coredump = os.path.join(tempdir, 'dump')
subprocess.run(['coredumpctl', 'dump', '-o', coredump, str(parsed.pid)], check=True)
subprocess.run(['gdb', parsed.exe, coredump, '-ex', 'info threads', '-ex', 'thread apply all bt full', '-ex', 'quit'], check=True) |
_on_failure
.parametrize('number_of_nodes', [2])
.parametrize('deposit', [1000])
.parametrize('enable_rest_api', [True])
def test_api_channel_withdraw(api_server_test_instance: APIServer, raiden_network: List[RaidenService], token_addresses, pfs_mock):
(_, app1) = raiden_network
pfs_mock.add_apps(raiden_network)
token_address = token_addresses[0]
partner_address = app1.address
request = grequests.patch(api_url_for(api_server_test_instance, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address), json=dict(total_withdraw='0'))
response = request.send().response
assert_response_with_error(response, HTTPStatus.CONFLICT)
request = grequests.patch(api_url_for(api_server_test_instance, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address), json=dict(total_withdraw='1500'))
response = request.send().response
assert_response_with_error(response, HTTPStatus.CONFLICT)
request = grequests.patch(api_url_for(api_server_test_instance, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address), json=dict(total_withdraw='750'))
response = request.send().response
assert_response_with_code(response, HTTPStatus.OK)
request = grequests.patch(api_url_for(api_server_test_instance, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address), json=dict(total_withdraw='750'))
response = request.send().response
assert_response_with_error(response, HTTPStatus.CONFLICT) |
class Rename():
def __init__(self, project, resource, offset=None):
self.project = project
self.resource = resource
if (offset is not None):
self.old_name = worder.get_name_at(self.resource, offset)
this_pymodule = self.project.get_pymodule(self.resource)
(self.old_instance, self.old_pyname) = evaluate.eval_location2(this_pymodule, offset)
if (self.old_pyname is None):
raise exceptions.RefactoringError('Rename refactoring should be performed on resolvable python identifiers.')
else:
if ((not resource.is_folder()) and (resource.name == '__init__.py')):
resource = resource.parent
dummy_pymodule = libutils.get_string_module(self.project, '')
self.old_instance = None
self.old_pyname = pynames.ImportedModule(dummy_pymodule, resource=resource)
if resource.is_folder():
self.old_name = resource.name
else:
self.old_name = resource.name[:(- 3)]
def get_old_name(self):
return self.old_name
def get_changes(self, new_name, in_file=None, in_hierarchy=False, unsure=None, docs=False, resources=None, task_handle=taskhandle.DEFAULT_TASK_HANDLE):
if (unsure in (True, False)):
warnings.warn('unsure parameter should be a function that returns True or False', DeprecationWarning, stacklevel=2)
def unsure_func(value=unsure):
return value
unsure = unsure_func
if (in_file is not None):
warnings.warn('`in_file` argument has been deprecated; use `resources` instead. ', DeprecationWarning, stacklevel=2)
if in_file:
resources = [self.resource]
if _is_local(self.old_pyname):
resources = [self.resource]
if (resources is None):
resources = self.project.get_python_files()
changes = ChangeSet(f'Renaming <{self.old_name}> to <{new_name}>')
finder = occurrences.create_finder(self.project, self.old_name, self.old_pyname, unsure=unsure, docs=docs, instance=self.old_instance, in_hierarchy=(in_hierarchy and self.is_method()))
job_set = task_handle.create_jobset('Collecting Changes', len(resources))
for file_ in resources:
job_set.started_job(file_.path)
new_content = rename_in_module(finder, new_name, resource=file_)
if (new_content is not None):
changes.add_change(ChangeContents(file_, new_content))
job_set.finished_job()
if self._is_renaming_a_module():
resource = self.old_pyname.get_object().get_resource()
if self._is_allowed_to_move(resources, resource):
self._rename_module(resource, new_name, changes)
return changes
def _is_allowed_to_move(self, resources, resource):
if resource.is_folder():
try:
return (resource.get_child('__init__.py') in resources)
except exceptions.ResourceNotFoundError:
return False
else:
return (resource in resources)
def _is_renaming_a_module(self):
return isinstance(self.old_pyname.get_object(), pyobjects.AbstractModule)
def is_method(self):
pyname = self.old_pyname
return (isinstance(pyname, pynames.DefinedName) and isinstance(pyname.get_object(), pyobjects.PyFunction) and isinstance(pyname.get_object().parent, pyobjects.PyClass))
def _rename_module(self, resource, new_name, changes):
if (not resource.is_folder()):
new_name = (new_name + '.py')
parent_path = resource.parent.path
if (parent_path == ''):
new_location = new_name
else:
new_location = ((parent_path + '/') + new_name)
changes.add_change(MoveResource(resource, new_location)) |
def setup_smoketest(*, eth_client: EthClient, print_step: StepPrinter, free_port_generator: Iterator[Port], debug: bool=False, stdout: IO=None, append_report: Callable=print) -> Iterator[RaidenTestSetup]:
make_requests_insecure()
datadir = mkdtemp()
testchain_manager = setup_testchain_for_smoketest(eth_client=eth_client, print_step=print_step, free_port_generator=free_port_generator, base_datadir=datadir, base_logdir=datadir)
matrix_manager = setup_matrix_for_smoketest(print_step=print_step, free_port_generator=free_port_generator)
if debug:
stdout_manager = contextlib.nullcontext()
else:
assert (stdout is not None)
stdout_manager = contextlib.redirect_stdout(stdout)
with stdout_manager, testchain_manager as testchain, matrix_manager as server_urls:
ethereum_nodes = None
try:
raiden_setup = setup_raiden(matrix_server=server_urls[0][0], print_step=print_step, contracts_version=RAIDEN_CONTRACT_VERSION, eth_rpc_endpoint=testchain['eth_rpc_endpoint'], web3=testchain['web3'], base_datadir=testchain['base_datadir'], keystore=testchain['keystore'], free_port_generator=free_port_generator)
ethereum_nodes = testchain['node_executors']
assert all(ethereum_nodes)
(yield raiden_setup)
finally:
if (ethereum_nodes is not None):
for node_executor in ethereum_nodes:
node = node_executor.process
if (node is not None):
node.send_signal(signal.SIGINT)
try:
node.wait(10)
except TimeoutExpired:
print_step('Ethereum node shutdown unclean, check log!', error=True)
node.kill()
if isinstance(node_executor.stdio, tuple):
logfile = node_executor.stdio[1]
logfile.flush()
logfile.seek(0)
append_report('Ethereum Node log output', logfile.read()) |
class RemoveButton(QtWidgets.QToolButton):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setText('Remove')
def enterEvent(self, e):
if (self.window().showhelp is True):
QtWidgets.QToolTip.showText(e.globalPos(), '<h3>Undo Annotations</h3>Successively undo previously added annotations.') |
def is_pathname_valid(pathname: str) -> bool:
try:
if ((not isinstance(pathname, str)) or (not pathname)):
return False
(_, pathname) = os.path.splitdrive(pathname)
root_dirname = (os.environ.get('HOMEDRIVE', 'C:') if IS_WINDOWS else os.path.sep)
assert os.path.isdir(root_dirname)
root_dirname = (root_dirname.rstrip(os.path.sep) + os.path.sep)
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat((root_dirname + pathname_part))
except OSError as exc:
if hasattr(exc, 'winerror'):
if (exc.winerror == ERROR_INVALID_NAME):
return False
elif (exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}):
return False
except TypeError:
return False
else:
return True |
def test_proto3_schema():
mock_proto3_schema_str = '\n syntax = "proto3";\n\n message User {\n required string name = 1;\n required int32 age = 2;\n }\n '
client = FilesystemClient(scheme='file')
with patch.object(client, '_read_file', return_value=mock_proto3_schema_str):
result = client.schema('dummy_user.proto')
assert isinstance(result, StructType)
assert (len(result.fields) == 2)
assert (result.fields[0].extra_attrs['name'] == 'name')
assert isinstance(result.fields[0], StringType)
assert (result.fields[1].extra_attrs['name'] == 'age')
assert isinstance(result.fields[1], IntType) |
class TestEOH(QiskitAquaTestCase):
('initial_state', 'circuit')
def test_eoh(self, mode):
size = 2
aqua_globals.random_seed = 0
temp = aqua_globals.random.random(((2 ** size), (2 ** size)))
h_1 = (temp + temp.T)
qubit_op = MatrixOperator(matrix=h_1)
temp = aqua_globals.random.random(((2 ** size), (2 ** size)))
h_1 = (temp + temp.T)
evo_op = MatrixOperator(matrix=h_1)
if (mode == 'initial_state'):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
state_in = Custom(size, state='random')
else:
random_state = aqua_globals.random.random((2 ** size))
random_state = (random_state / np.linalg.norm(random_state))
state_in = QuantumCircuit(size)
state_in.initialize(random_state, range(size))
evo_time = 1
num_time_slices = 100
eoh = EOH(qubit_op, state_in, evo_op, evo_time=evo_time, num_time_slices=num_time_slices)
backend = BasicAer.get_backend('statevector_simulator')
quantum_instance = QuantumInstance(backend, shots=1)
ret = eoh.run(quantum_instance)
self.log.debug('Evaluation result: %s', ret) |
class MarkSheetTestCase(unittest.TestCase):
def setUp(self) -> None:
img_path = '../../image_data/DIP3E_CH11_Original_Images/Fig1111(a)(triangle).tif'
self.img = Image.open(img_path)
self.img = np.asarray(self.img)
self.img = self.img.astype(np.uint8)
def test_get_centroid(self):
centroid = mark_sheet.get_centroid(self.img)
print(centroid)
import matplotlib.pyplot as plt
plt.imshow(self.img, cmap='gray')
plt.plot([centroid[1]], [centroid[0]], linewidth='10', color='green')
plt.show()
def test_get_mark_sheet(self):
r = mark_sheet.get_mark_sheet(self.img)
import matplotlib.pyplot as plt
plt.plot(r)
plt.show() |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
output_dir = Path(training_args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
checkpoint = None
if ((len(os.listdir(training_args.output_dir)) > 0) and (not training_args.overwrite_output_dir)):
if ((output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file()):
checkpoint = output_dir
logger.info(f'Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
else:
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to continue regardless.')
set_seed(training_args.seed)
if ((data_args.train_file is not None) or (data_args.validation_file is not None)):
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.train_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
else:
raw_datasets = load_dataset('swag', 'regular', cache_dir=model_args.cache_dir)
ending_names = [f'ending{i}' for i in range(4)]
context_name = 'sent1'
question_header_name = 'sent2'
if (checkpoint is not None):
config_path = training_args.output_dir
elif model_args.config_name:
config_path = model_args.config_name
else:
config_path = model_args.model_name_or_path
config = AutoConfig.from_pretrained(config_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (data_args.max_seq_length is None):
max_seq_length = tokenizer.model_max_length
if (max_seq_length > 1024):
logger.warning(f'The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). Picking 1024 instead. You can change that default value by passing --max_seq_length xxx.')
max_seq_length = 1024
else:
if (data_args.max_seq_length > tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.')
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
first_sentences = [([context] * 4) for context in examples[context_name]]
question_headers = examples[question_header_name]
second_sentences = [[f'{header} {examples[end][i]}' for end in ending_names] for (i, header) in enumerate(question_headers)]
first_sentences = list(chain(*first_sentences))
second_sentences = list(chain(*second_sentences))
tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True, max_length=max_seq_length)
data = {k: [v[i:(i + 4)] for i in range(0, len(v), 4)] for (k, v) in tokenized_examples.items()}
return data
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
non_label_columns = [feature for feature in train_dataset.features if (feature not in ('label', 'labels'))]
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc='train dataset map pre-processing'):
train_dataset = train_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache))
if training_args.do_eval:
if ('validation' not in raw_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = raw_datasets['validation']
if (not training_args.do_train):
non_label_columns = [feature for feature in eval_dataset.features if (feature not in ('label', 'labels'))]
if (data_args.max_eval_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
eval_dataset = eval_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache))
if data_args.pad_to_max_length:
data_collator = DefaultDataCollator(return_tensors='tf')
else:
data_collator = DataCollatorForMultipleChoice(tokenizer)
with training_args.strategy.scope():
if (checkpoint is None):
model_path = model_args.model_name_or_path
else:
model_path = checkpoint
model = TFAutoModelForMultipleChoice.from_pretrained(model_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
num_replicas = training_args.strategy.num_replicas_in_sync
total_train_batch_size = (training_args.per_device_train_batch_size * num_replicas)
total_eval_batch_size = (training_args.per_device_eval_batch_size * num_replicas)
if training_args.do_train:
total_train_steps = ((len(train_dataset) // total_train_batch_size) * int(training_args.num_train_epochs))
(optimizer, lr_schedule) = create_optimizer(init_lr=training_args.learning_rate, num_train_steps=int(total_train_steps), num_warmup_steps=0)
else:
optimizer = 'adam'
model.compile(optimizer=optimizer, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy')])
if training_args.do_train:
dataset_exclude_cols = set((non_label_columns + ['label']))
tf_train_dataset = train_dataset.to_tf_dataset(columns=[col for col in train_dataset.column_names if (col not in dataset_exclude_cols)], shuffle=True, batch_size=total_train_batch_size, collate_fn=data_collator, drop_remainder=True, label_cols=('label' if ('label' in train_dataset.column_names) else None))
if training_args.do_eval:
validation_data = eval_dataset.to_tf_dataset(columns=[col for col in eval_dataset.column_names if (col not in dataset_exclude_cols)], shuffle=False, batch_size=total_eval_batch_size, collate_fn=data_collator, drop_remainder=True, label_cols=('label' if ('label' in eval_dataset.column_names) else None))
else:
validation_data = None
model.fit(tf_train_dataset, validation_data=validation_data, epochs=int(training_args.num_train_epochs), callbacks=[SavePretrainedCallback(output_dir=training_args.output_dir)])
if (training_args.do_eval and (not training_args.do_train)):
dataset_exclude_cols = set((non_label_columns + ['label']))
tf_eval_dataset = eval_dataset.to_tf_dataset(columns=[col for col in eval_dataset.column_names if (col not in dataset_exclude_cols)], shuffle=False, batch_size=total_eval_batch_size, collate_fn=data_collator, drop_remainder=True, label_cols=('label' if ('label' in eval_dataset.column_names) else None))
model.evaluate(tf_eval_dataset)
if training_args.push_to_hub:
model.push_to_hub(finetuned_from=model_args.model_name_or_path, tasks='multiple-choice', dataset_tags='swag', dataset_args='regular', dataset='SWAG', language='en') |
.parametrize('blockysize', [1, 2, 3, 7, 61, 62])
def test_creation_untiled_blockysize(tmp_path, blockysize):
tmpfile = (tmp_path / 'test.tif')
with rasterio.open(tmpfile, 'w', count=1, height=61, width=37, dtype='uint8', blockysize=blockysize, tiled=False) as dataset:
pass
with rasterio.open(tmpfile) as dataset:
assert (not dataset.is_tiled)
assert (dataset.profile['blockysize'] == min(blockysize, 61))
assert (dataset.block_shapes[0][0] == min(blockysize, 61)) |
def uc_sepset(cg: CausalGraph, priority: int=3, background_knowledge: (BackgroundKnowledge | None)=None) -> CausalGraph:
assert (priority in [0, 1, 2, 3, 4])
cg_new = deepcopy(cg)
R0 = []
UC_dict = {}
UT = [(i, j, k) for (i, j, k) in cg_new.find_unshielded_triples() if (i < k)]
for (x, y, z) in UT:
if ((background_knowledge is not None) and (background_knowledge.is_forbidden(cg_new.G.nodes[x], cg_new.G.nodes[y]) or background_knowledge.is_forbidden(cg_new.G.nodes[z], cg_new.G.nodes[y]) or background_knowledge.is_required(cg_new.G.nodes[y], cg_new.G.nodes[x]) or background_knowledge.is_required(cg_new.G.nodes[y], cg_new.G.nodes[z]))):
continue
if all(((y not in S) for S in cg.sepset[(x, z)])):
if (priority == 0):
edge1 = cg_new.G.get_edge(cg_new.G.nodes[x], cg_new.G.nodes[y])
if (edge1 is not None):
cg_new.G.remove_edge(edge1)
edge2 = cg_new.G.get_edge(cg_new.G.nodes[y], cg_new.G.nodes[x])
if (edge2 is not None):
cg_new.G.remove_edge(edge2)
cg_new.G.add_edge(Edge(cg_new.G.nodes[x], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
edge3 = cg_new.G.get_edge(cg_new.G.nodes[y], cg_new.G.nodes[z])
if (edge3 is not None):
cg_new.G.remove_edge(edge3)
edge4 = cg_new.G.get_edge(cg_new.G.nodes[z], cg_new.G.nodes[y])
if (edge4 is not None):
cg_new.G.remove_edge(edge4)
cg_new.G.add_edge(Edge(cg_new.G.nodes[z], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
elif (priority == 1):
edge1 = cg_new.G.get_edge(cg_new.G.nodes[x], cg_new.G.nodes[y])
if (edge1 is not None):
if ((cg_new.G.graph[(x, y)] == Endpoint.TAIL.value) and (cg_new.G.graph[(y, x)] == Endpoint.TAIL.value)):
cg_new.G.remove_edge(edge1)
cg_new.G.add_edge(Edge(cg_new.G.nodes[x], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
elif ((cg_new.G.graph[(x, y)] == Endpoint.ARROW.value) and (cg_new.G.graph[(y, x)] == Endpoint.TAIL.value)):
cg_new.G.remove_edge(edge1)
cg_new.G.add_edge(Edge(cg_new.G.nodes[x], cg_new.G.nodes[y], Endpoint.ARROW, Endpoint.ARROW))
else:
cg_new.G.add_edge(Edge(cg_new.G.nodes[x], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
edge2 = cg_new.G.get_edge(cg_new.G.nodes[z], cg_new.G.nodes[y])
if (edge2 is not None):
if ((cg_new.G.graph[(z, y)] == Endpoint.TAIL.value) and (cg_new.G.graph[(y, z)] == Endpoint.TAIL.value)):
cg_new.G.remove_edge(edge2)
cg_new.G.add_edge(Edge(cg_new.G.nodes[z], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
elif ((cg_new.G.graph[(z, y)] == Endpoint.ARROW.value) and (cg_new.G.graph[(y, z)] == Endpoint.TAIL.value)):
cg_new.G.remove_edge(edge2)
cg_new.G.add_edge(Edge(cg_new.G.nodes[z], cg_new.G.nodes[y], Endpoint.ARROW, Endpoint.ARROW))
else:
cg_new.G.add_edge(Edge(cg_new.G.nodes[z], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
elif (priority == 2):
if ((not cg_new.is_fully_directed(y, x)) and (not cg_new.is_fully_directed(y, z))):
edge1 = cg_new.G.get_edge(cg_new.G.nodes[x], cg_new.G.nodes[y])
if (edge1 is not None):
cg_new.G.remove_edge(edge1)
cg_new.G.add_edge(Edge(cg_new.G.nodes[x], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
edge2 = cg_new.G.get_edge(cg_new.G.nodes[z], cg_new.G.nodes[y])
if (edge2 is not None):
cg_new.G.remove_edge(edge2)
cg_new.G.add_edge(Edge(cg_new.G.nodes[z], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
else:
R0.append((x, y, z))
if (priority in [0, 1, 2]):
return cg_new
else:
if (priority == 3):
for (x, y, z) in R0:
cond = cg_new.find_cond_sets_with_mid(x, z, y)
UC_dict[(x, y, z)] = max([cg_new.ci_test(x, z, S) for S in cond])
UC_dict = sort_dict_ascending(UC_dict)
else:
for (x, y, z) in R0:
cond = cg_new.find_cond_sets_without_mid(x, z, y)
UC_dict[(x, y, z)] = max([cg_new.ci_test(x, z, S) for S in cond])
UC_dict = sort_dict_ascending(UC_dict, descending=True)
for (x, y, z) in UC_dict.keys():
if ((background_knowledge is not None) and (background_knowledge.is_forbidden(cg_new.G.nodes[x], cg_new.G.nodes[y]) or background_knowledge.is_forbidden(cg_new.G.nodes[z], cg_new.G.nodes[y]) or background_knowledge.is_required(cg_new.G.nodes[y], cg_new.G.nodes[x]) or background_knowledge.is_required(cg_new.G.nodes[y], cg_new.G.nodes[z]))):
continue
if ((not cg_new.is_fully_directed(y, x)) and (not cg_new.is_fully_directed(y, z))):
edge1 = cg_new.G.get_edge(cg_new.G.nodes[x], cg_new.G.nodes[y])
if (edge1 is not None):
cg_new.G.remove_edge(edge1)
cg_new.G.add_edge(Edge(cg_new.G.nodes[x], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
edge2 = cg_new.G.get_edge(cg_new.G.nodes[z], cg_new.G.nodes[y])
if (edge2 is not None):
cg_new.G.remove_edge(edge2)
cg_new.G.add_edge(Edge(cg_new.G.nodes[z], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
return cg_new |
def test_chat_photo():
user_photo_small = 'AQADAgADrKcxGylBBQAJIH3qihAAAwIAAylBBQAF7bDHYwABnc983KcAAh4E'
user_photo_small_unique = 'AQADIH3qihAAA9ynAAI'
user_photo_big = 'AQADAgADrKcxGylBBQAJIH3qihAAAwMAAylBBQAF7bDHYwABnc983qcAAh4E'
user_photo_big_unique = 'AQADIH3qihAAA96nAAI'
chat_photo_small = 'AQADAgATIH3qihAAAwIAA3t3-P______AAjhngEAAR4E'
chat_photo_small_unique = 'AQADIH3qihAAA-GeAQAB'
chat_photo_big = 'AQADAgATIH3qihAAAwMAA3t3-P______AAjjngEAAR4E'
chat_photo_big_unique = 'AQADIH3qihAAA-OeAQAB'
channel_photo_small = 'AQADAgATIH3qihAAAwIAA-fFwCoX____MvARg8nvpc3RpwACHgQ'
channel_photo_small_unique = 'AQADIH3qihAAA9GnAAI'
channel_photo_big = 'AQADAgATIH3qihAAAwMAA-fFwCoX____MvARg8nvpc3TpwACHgQ'
channel_photo_big_unique = 'AQADIH3qihAAA9OnAAI'
check(user_photo_small, FileType.CHAT_PHOTO)
check_unique(user_photo_small_unique, FileUniqueType.PHOTO)
check(user_photo_big, FileType.CHAT_PHOTO)
check_unique(user_photo_big_unique, FileUniqueType.PHOTO)
check(chat_photo_small, FileType.CHAT_PHOTO)
check_unique(chat_photo_small_unique, FileUniqueType.PHOTO)
check(chat_photo_big, FileType.CHAT_PHOTO)
check_unique(chat_photo_big_unique, FileUniqueType.PHOTO)
check(channel_photo_small, FileType.CHAT_PHOTO)
check_unique(channel_photo_small_unique, FileUniqueType.PHOTO)
check(channel_photo_big, FileType.CHAT_PHOTO)
check_unique(channel_photo_big_unique, FileUniqueType.PHOTO) |
def simulation(method, m=400, n=3000, rvec=[10, 50, 25], rho=0.1, nrep=50, seed=1234, burnin=200, win_size=200, track_cp_burnin=100, n_check_cp=20, alpha=0.01, proportion=0.5, n_positive=3, min_test_size=100, tolerance_num=0, factor=1):
result = ([np.nan] * nrep)
run_times = ([np.nan] * nrep)
random_state = np.random.get_state()
for rep in range(nrep):
if (rep == 0):
np.random.seed(seed=seed)
else:
np.random.set_state(random_state)
result[rep] = {}
run_times[rep] = {}
n_piece = (n / len(rvec))
U0 = []
for r in rvec:
U0.append(np.random.randn(m, r))
V0_burnin = np.random.randn(burnin, rvec[0])
V0 = []
for r in rvec:
V0.append(np.random.randn(n_piece, r))
L0 = U0[0].dot(V0_burnin.transpose())
for (U0_i, V0_i) in zip(U0, V0):
L0 = np.hstack((L0, U0_i.dot(V0_i.transpose())))
S0 = ((np.random.uniform(0, 1, size=(m, (n + burnin))) < rho).astype(int) * np.random.uniform((- 1000), 1000, size=(m, (n + burnin))))
M0 = (L0 + S0)
random_state = np.random.get_state()
if (method == 'online'):
start = timer()
(Lhat, Shat, rank, Uhat) = stoc_rpca(M0, burnin, lambda1=(1.0 / np.sqrt(m)), lambda2=((1.0 / np.sqrt(m)) * (10 ** 2)))
end = timer()
result[rep]['stoc_rpca'] = evaluate(Lhat, Shat, L0, S0, r, U0, burnin)
run_times[rep]['stoc_rpca'] = (end - start)
start = timer()
(Lhat, Shat, rank) = omwrpca(M0, burnin, win_size, lambda1=(1.0 / np.sqrt(m)), lambda2=((1.0 / np.sqrt(m)) * (10 ** 2)))
end = timer()
result[rep]['omwrpca'] = evaluate(Lhat, Shat, L0, S0, r, U0, burnin)
run_times[rep]['omwrpca'] = (end - start)
start = timer()
(Lhat, Shat, rank, cp, num_sparses) = omwrpca_cp(M0, burnin, win_size, track_cp_burnin, n_check_cp, alpha, proportion, n_positive, min_test_size, lambda1=(1.0 / np.sqrt(m)), lambda2=((1.0 / np.sqrt(m)) * (10 ** 2)), factor=1)
end = timer()
result[rep]['omwrpca_cp'] = evaluate(Lhat, Shat, L0, S0, r, U0, burnin)
result[rep]['omwrpca_cp: rank'] = rank
result[rep]['omwrpca_cp: cp'] = cp
run_times[rep]['omwrpca_cp'] = (end - start)
elif (method == 'batch'):
start = timer()
(Lhat, Shat, rank) = mwrpca(M0, burnin, win_size)
end = timer()
result[rep]['mwrpca'] = evaluate(Lhat, Shat, L0, S0, r, U0, burnin)
run_times[rep]['mwrpca'] = (end - start)
sim_result = {}
sim_result['result'] = result
sim_result['run_times'] = run_times
return sim_result |
class Person(models.Model):
name = models.CharField(max_length=255)
title = tagulous.models.SingleTagField(initial='Mr, Mrs', help_text='This is a SingleTagField - effectively a CharField with dynamic choices', on_delete=models.CASCADE)
skills = tagulous.models.TagField(Skill, help_text='This field does not split on spaces')
hobbies = tagulous.models.TagField(initial='eating, coding, gaming', force_lowercase=True, blank=True, help_text='This field splits on spaces and commas')
class Meta():
verbose_name_plural = 'people' |
def create_3cloths(cloth1_start, cloth1_end, cloth1_length, cloth2_start, cloth2_end, cloth2_length, cloth3_start, cloth3_end, cloth3_length, r_id, junction=1, n_lanes=1, lane_offset=3, road_marks=std_roadmark_broken()):
warn('create_cloth_arc_cloth should not be used anymore, please use the create_road (see exampels/xodr/clothoid_generation.py) function instead', DeprecationWarning, 2)
pv = PlanView()
spiral1 = Spiral(cloth1_start, cloth1_end, length=cloth1_length)
spiral2 = Spiral(cloth2_start, cloth2_end, length=cloth2_length)
spiral3 = Spiral(cloth3_start, cloth3_end, length=cloth3_length)
pv.add_geometry(spiral1)
pv.add_geometry(spiral2)
pv.add_geometry(spiral3)
center_lane = Lane()
if road_marks:
center_lane.add_roadmark(road_marks)
lsec = LaneSection(0, center_lane)
for i in range(1, (n_lanes + 1), 1):
rl = Lane(a=lane_offset)
ll = Lane(a=lane_offset)
if road_marks:
rl.add_roadmark(road_marks)
ll.add_roadmark(road_marks)
lsec.add_right_lane(rl)
lsec.add_left_lane(ll)
lanes = Lanes()
lanes.add_lanesection(lsec)
return Road(r_id, pv, lanes, road_type=junction) |
def generate_triplet_from_seeds(seeds: List[List[str]], max_triplets_per_set=None) -> List[Tuple[(str, str, str)]]:
triplets = []
all_words = {phrase for cluster in seeds for phrase in cluster}
for (i, seed_values) in enumerate(seeds):
other_words = list((all_words - set(seed_values)))
if (len(other_words) == 0):
continue
if ((max_triplets_per_set is not None) and (((len(seed_values) * (len(seed_values) - 1)) / 2) > max_triplets_per_set)):
logger.debug('Subsampling')
pair_gen = utils.Rnd.random_pairs(seed_values, max_triplets_per_set)
else:
pair_gen = itertools.combinations(seed_values, 2)
for pair in pair_gen:
if (random.random() > 0.5):
pair = (pair[1], pair[0])
neg = random.choice(other_words)
triplet = (*pair, neg)
triplets.append(triplet)
return triplets |
class ModelForModelSizeMeasure(nn.Module):
def __init__(self, C, num_classes, layers, criterion, alphas_normal, alphas_reduce, steps=4, multiplier=4, stem_multiplier=3):
super(ModelForModelSizeMeasure, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
C_curr = (stem_multiplier * C)
self.stem = nn.Sequential(nn.Conv2d(3, C_curr, 3, padding=1, bias=False), nn.BatchNorm2d(C_curr))
(C_prev_prev, C_prev, C_curr) = (C_curr, C_curr, C)
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if (i in [(layers // 3), ((2 * layers) // 3)]):
C_curr *= 2
reduction = True
cell = InnerCell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, alphas_reduce)
else:
reduction = False
cell = InnerCell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev, alphas_normal)
reduction_prev = reduction
self.cells += [cell]
(C_prev_prev, C_prev) = (C_prev, (multiplier * C_curr))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input_data):
s0 = s1 = self.stem(input_data)
for (i, cell) in enumerate(self.cells):
if cell.reduction:
(s0, s1) = (s1, cell(s0, s1))
else:
(s0, s1) = (s1, cell(s0, s1))
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), (- 1)))
return logits |
def test_nested_point_user_strategy():
limit_dict = {'p1': {'x': range(224, 240)}, 'p2': pst.bitstructs(Point2D, {'x': range(0, 2), 'y': range(2, 4)})}
print('')
(bs=pst.bitstructs(NestedPoint, limit_dict))
(max_examples=16)
def actual_test(bs):
assert isinstance(bs, NestedPoint)
assert (224 <= bs.p1.x <= 239)
assert (0 <= bs.p2.x < 2)
assert (2 <= bs.p2.y < 4)
print(bs)
actual_test() |
def test_handle_inittarget():
block_number = 1
pseudo_random_generator = random.Random()
channels = make_channel_set([channel_properties])
transfer_properties = LockedTransferSignedStateProperties(amount=channels[0].partner_state.contract_balance, expiration=((channels[0].reveal_timeout + block_number) + 1), canonical_identifier=channels[0].canonical_identifier, transferred_amount=0, locked_amount=channels[0].partner_state.contract_balance)
from_transfer = create(transfer_properties)
state_change = ActionInitTarget(from_hop=channels.get_hop(0), transfer=from_transfer, balance_proof=from_transfer.balance_proof, sender=from_transfer.balance_proof.sender)
iteration = target.handle_inittarget(state_change, channels[0], pseudo_random_generator, block_number)
assert search_for_item(iteration.events, SendSecretRequest, {'payment_identifier': from_transfer.payment_identifier, 'amount': from_transfer.lock.amount, 'secrethash': from_transfer.lock.secrethash, 'recipient': UNIT_TRANSFER_INITIATOR})
assert search_for_item(iteration.events, SendProcessed, {}) |
def add_control_inputs(op, cops):
if (not isinstance(op, _tf_ops.Operation)):
raise TypeError('Expected a tf.Operation, got: {}', type(op))
cops = _util.make_list_of_op(cops, allow_graph=False)
for cop in cops:
if (cop in op.control_inputs):
raise ValueError('{} is already a control_input of {}'.format(cop.name, op.name))
op._add_control_inputs(cops) |
def object_issubclass(node: nodes.NodeNG, class_or_seq: list[InferenceResult], context: (InferenceContext | None)=None) -> (util.UninferableBase | bool):
if (not isinstance(node, nodes.ClassDef)):
raise TypeError(f'{node} needs to be a ClassDef node')
return _object_type_is_subclass(node, class_or_seq, context=context) |
class User(ModelReprMixin, models.Model):
id = models.BigIntegerField(primary_key=True, validators=(MinValueValidator(limit_value=0, message='User IDs cannot be negative.'),), verbose_name='ID', help_text='The ID of this user, taken from Discord.')
name = models.CharField(max_length=32, help_text='The username, taken from Discord.')
discriminator = models.PositiveSmallIntegerField(validators=(MaxValueValidator(limit_value=9999, message='Discriminators may not exceed `9999`.'),), help_text='The discriminator of this user, taken from Discord.')
roles = ArrayField(models.BigIntegerField(validators=(MinValueValidator(limit_value=0, message='Role IDs cannot be negative.'), _validate_existing_role)), default=list, blank=True, help_text='IDs of roles the user has on the server')
in_guild = models.BooleanField(default=True, help_text='Whether this user is in our server.', verbose_name='In Guild')
def __str__(self):
return f'{self.name}#{self.discriminator:04d}'
def top_role(self) -> Role:
roles = Role.objects.filter(id__in=self.roles)
if (not roles):
return Role.objects.get(name='Developers')
return max(roles)
def username(self) -> str:
return str(self) |
class UserDashboardViewTest(TestCase):
def setUpTestData(cls):
add_default_data()
def login(self, name):
self.client.login(username=name, password=name)
self.pu = PytitionUser.objects.get(user__username=name)
return self.pu
def logout(self):
self.client.logout()
def tearDown(self):
pass
def test_NotLoggedIn(self):
self.logout()
response = self.client.get(reverse('user_dashboard'), follow=True)
self.assertRedirects(response, ((reverse('login') + '?next=') + reverse('user_dashboard')))
self.assertTemplateUsed(response, 'registration/login.html')
self.assertTemplateUsed(response, 'layouts/base.html')
def test_UserOK1(self):
john = self.login('john')
num_petitions = john.petition_set.count()
response = self.client.get(reverse('user_dashboard'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'petition/user_dashboard.html')
self.assertTemplateUsed(response, 'petition/user_base.html')
self.assertTemplateUsed(response, 'layouts/base.html')
petitions = response.context['petitions'].all()
self.assertEqual(len(petitions), num_petitions)
self.assertEqual(response.context['user'], john)
def test_UserOK2(self):
julia = self.login('julia')
num_petitions = julia.petition_set.count()
response = self.client.get(reverse('user_dashboard'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'petition/user_dashboard.html')
self.assertTemplateUsed(response, 'petition/user_base.html')
self.assertTemplateUsed(response, 'layouts/base.html')
petitions = response.context['petitions'].all()
self.assertEqual(len(petitions), num_petitions)
self.assertEqual(response.context['user'], julia)
def test_UserOK3(self):
max = self.login('max')
num_petitions = max.petition_set.count()
response = self.client.get(reverse('user_dashboard'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'petition/user_dashboard.html')
self.assertTemplateUsed(response, 'petition/user_base.html')
self.assertTemplateUsed(response, 'layouts/base.html')
petitions = response.context['petitions'].all()
self.assertEqual(len(petitions), num_petitions)
self.assertEqual(response.context['user'], max)
def test_UserOK4(self):
sarah = self.login('sarah')
num_petitions = sarah.petition_set.count()
response = self.client.get(reverse('user_dashboard'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'petition/user_dashboard.html')
self.assertTemplateUsed(response, 'petition/user_base.html')
self.assertTemplateUsed(response, 'layouts/base.html')
petitions = response.context['petitions'].all()
self.assertEqual(len(petitions), num_petitions)
self.assertEqual(response.context['user'], sarah) |
('pypyr.config.config.init')
def test_trace_log_level_none(mock_config_init):
arg_list = ['blah', 'ctx string']
with patch('pypyr.pipelinerunner.run') as mock_pipeline_run:
with patch('traceback.print_exc') as mock_traceback:
mock_pipeline_run.side_effect = AssertionError('Test Error Mock')
val = pypyr.cli.main(arg_list)
assert (val == 255)
mock_traceback.assert_not_called()
mock_config_init.assert_called_once() |
def raise_error_and_retry(max_try, time_gap, func, *args, **kargs):
import logging
logging.debug('Debugging, Enter raise_error_and_retry')
sth = None
for i in range(max_try):
success_flag = False
try:
sth = func(*args, **kargs)
success_flag = True
except Exception as e:
success_flag = False
logging.info(e)
traceback.print_exception(*sys.exc_info())
logging.info(('traceback.format_exc():\n%s' % traceback.format_exc()))
if success_flag:
logging.info('{}-th attempt to do func: {}, success!!!'.format(i, str(func)))
break
else:
logging.info('{}-th attempt to do func: {}, fail!!!, waiting {} seconds, retry....'.format(i, str(func), time_gap))
time.sleep(time_gap)
return sth |
class SmtLibSolver(object):
def set_logic(self, logic):
raise NotImplementedError
def declare_fun(self, symbol):
raise NotImplementedError
def declare_const(self, symbol):
raise NotImplementedError
def define_fun(self, name, args, rtype, expr):
raise NotImplementedError
def declare_sort(self, name, cardinality):
raise NotImplementedError
def define_sort(self, name, args, sort_expr):
raise NotImplementedError
def assert_(self, expr, named=None):
raise NotImplementedError
def get_assertions(self):
raise NotImplementedError
def check_sat(self):
raise NotImplementedError
def get_proof(self):
raise NotImplementedError
def get_unsat_core(self):
raise NotImplementedError
def get_values(self, exprs):
raise NotImplementedError
def get_assignment(self):
raise NotImplementedError
def push(self, levels=1):
raise NotImplementedError
def pop(self, levels=1):
raise NotImplementedError
def get_option(self, name):
raise NotImplementedError
def set_option(self, name, value):
raise NotImplementedError
def get_info(self, name):
raise NotImplementedError
def set_info(self, name, value):
raise NotImplementedError
def exit(self):
raise NotImplementedError |
def auto_load_resume(model, path, status):
if (status == 'train'):
pth_files = os.listdir(path)
nums_epoch = [int(name.replace('epoch', '').replace('.pth', '')) for name in pth_files if ('.pth' in name)]
if (len(nums_epoch) == 0):
return (0, init_lr)
else:
max_epoch = max(nums_epoch)
pth_path = os.path.join(path, (('epoch' + str(max_epoch)) + '.pth'))
print('Load model from', pth_path)
checkpoint = torch.load(pth_path)
new_state_dict = OrderedDict()
for (k, v) in checkpoint['model_state_dict'].items():
if ('module.' == k[:7]):
name = k[7:]
else:
name = k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
epoch = checkpoint['epoch']
lr = checkpoint['learning_rate']
print(('Resume from %s' % pth_path))
return (epoch, lr)
elif (status == 'test'):
print('Load model from', path)
checkpoint = torch.load(path, map_location='cpu')
new_state_dict = OrderedDict()
for (k, v) in checkpoint['model_state_dict'].items():
if ('module.' == k[:7]):
name = k[7:]
else:
name = k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
epoch = checkpoint['epoch']
print(('Resume from %s' % path))
return epoch |
def main():
def positive_float(value):
val = float(value)
if (val <= 0):
raise ArgumentError
return val
parser = ArgumentParser()
parser.add_argument('-V', '--version', action='version', version=__version__)
parser.add_argument('-u', '--unit', default='1e-6', type=positive_float, help='Output unit (in seconds) in which the timing info is displayed (default: 1e-6)')
parser.add_argument('-z', '--skip-zero', action='store_true', help='Hide functions which have not been called')
parser.add_argument('-r', '--rich', action='store_true', help='Use rich formatting')
parser.add_argument('-t', '--sort', action='store_true', help='Sort by ascending total time')
parser.add_argument('-m', '--summarize', action='store_true', help='Print a summary of total function time')
parser.add_argument('profile_output', help='*.lprof file created by kernprof')
args = parser.parse_args()
lstats = load_stats(args.profile_output)
show_text(lstats.timings, lstats.unit, output_unit=args.unit, stripzeros=args.skip_zero, rich=args.rich, sort=args.sort, summarize=args.summarize) |
class BotCreatePullRequestTest(TestCase):
def test_plain(self):
bot = bot_factory(bot_token=None)
bot._bot_repo = 'BOT REPO'
bot._user_repo = 'USER REPO'
bot.create_pull_request('title', 'body', 'new_branch')
self.assertEqual(bot.provider.create_pull_request.called, True)
self.assertEqual(bot.provider.create_pull_request.call_args_list[0][1], {'base_branch': 'base_branch', 'new_branch': 'new_branch', 'repo': 'USER REPO', 'body': 'body', 'title': 'title', 'pr_label': False, 'assignees': [], 'config': bot.config})
def test_bot_no_errors(self):
bot = bot_factory(bot_token='foo')
bot._bot_repo = 'BOT REPO'
bot._user_repo = 'USER REPO'
bot.create_pull_request('title', 'body', 'new_branch')
self.assertEqual(bot.provider.create_pull_request.called, True)
self.assertEqual(bot.provider.create_pull_request.call_args_list[0][1], {'base_branch': 'base_branch', 'new_branch': 'new_branch', 'repo': 'BOT REPO', 'body': 'body', 'title': 'title', 'pr_label': False, 'assignees': [], 'config': bot.config})
self.assertEqual(bot.provider.get_pull_request_permissions.called, False)
def test_bot_permission_error_resolved(self):
bot = bot_factory(bot_token='foo')
bot.provider.create_pull_request.side_effect = [NoPermissionError, 'the foo']
bot._bot_repo = 'BOT REPO'
bot._user_repo = 'USER REPO'
bot.create_pull_request('title', 'body', 'new_branch')
self.assertEqual(bot.provider.create_pull_request.called, True)
self.assertEqual(bot.provider.create_pull_request.call_args_list[0][1], {'base_branch': 'base_branch', 'new_branch': 'new_branch', 'repo': 'BOT REPO', 'body': 'body', 'title': 'title', 'pr_label': False, 'assignees': [], 'config': bot.config})
self.assertEqual(bot.provider.create_pull_request.call_args_list[1][1], {'base_branch': 'base_branch', 'new_branch': 'new_branch', 'repo': 'BOT REPO', 'body': 'body', 'title': 'title', 'pr_label': False, 'assignees': [], 'config': bot.config})
def test_bot_permission_error_not_resolved(self):
bot = bot_factory(bot_token='foo')
bot.provider.create_pull_request.side_effect = [NoPermissionError, NoPermissionError]
bot._bot_repo = 'BOT REPO'
bot._user_repo = 'USER REPO'
with self.assertRaises(NoPermissionError):
bot.create_pull_request('title', 'body', 'new_branch')
self.assertEqual(bot.provider.create_pull_request.called, True)
self.assertEqual(bot.provider.create_pull_request.call_args_list[0][1], {'base_branch': 'base_branch', 'new_branch': 'new_branch', 'repo': 'BOT REPO', 'body': 'body', 'title': 'title', 'pr_label': False, 'assignees': [], 'config': bot.config})
self.assertEqual(bot.provider.create_pull_request.call_args_list[1][1], {'base_branch': 'base_branch', 'new_branch': 'new_branch', 'repo': 'BOT REPO', 'body': 'body', 'title': 'title', 'pr_label': False, 'assignees': [], 'config': bot.config}) |
class VariableNotConsistentError(VariableError):
def __init__(self, old_var: 'VariableValue', new_var: 'VariableValue') -> None:
self.old_var = old_var
self.new_var = new_var
def __str__(self) -> str:
return f'create: {self.new_var.source} cannot set var.{self.new_var.name}={repr(self.new_var.value)} because {self.old_var.source} set var.{self.old_var.name}={repr(self.old_var.value)}' |
def check_repetition(DB=bib_db):
bib_dict = {}
for entry in DB.entries:
title = entry['title']
title = str(title).strip()
title = title.replace('{', '').replace('}', '')
if (title in bib_dict.keys()):
bib_dict[title] = (bib_dict[title] + 1)
else:
bib_dict[title] = 1
repet_bib = [i for i in bib_dict.keys() if (bib_dict[i] > 1)]
if (len(repet_bib) != 0):
print('Attention! Repetition detected in the bibtex file! Please check the following entries:')
print('')
for (i, title) in enumerate(repet_bib):
print((i + 1), title) |
class MultiResolutionSTFTLoss(torch.nn.Module):
def __init__(self, fft_sizes=[2048, 1024, 512], hop_sizes=[240, 120, 50], win_lengths=[1200, 600, 240], window='hann_window'):
super(MultiResolutionSTFTLoss, self).__init__()
assert (len(fft_sizes) == len(hop_sizes) == len(win_lengths))
self.stft_losses = torch.nn.ModuleList()
for (fs, ss, wl) in zip(fft_sizes, hop_sizes, win_lengths):
self.stft_losses += [STFTLoss(fs, ss, wl, window)]
def forward(self, x, y):
sc_loss = 0.0
mag_loss = 0.0
for f in self.stft_losses:
(sc_l, mag_l) = f(x, y)
sc_loss += sc_l
mag_loss += mag_l
sc_loss /= len(self.stft_losses)
mag_loss /= len(self.stft_losses)
return (sc_loss, mag_loss) |
def module_in_path(modname: str, path: (str | Iterable[str])) -> bool:
modname = modname.split('.')[0]
try:
filename = file_from_modpath([modname])
except ImportError:
return False
if (filename is None):
return False
filename = _normalize_path(filename)
if isinstance(path, str):
return filename.startswith(_cache_normalize_path(path))
return any((filename.startswith(_cache_normalize_path(entry)) for entry in path)) |
.parametrize('parameters, expected', [pytest.param(['-p', 'C'], 1, id='Only C'), pytest.param(['-p', 'C', '-p', 'AB', '-p', 'N', '-p', 'O'], 5, id='CNO alpha beta'), pytest.param([], 7, id='All present'), pytest.param(['--no-targets'], 0, id='No targets')])
def test_combine_cli_all(run_cli, tmpdir, acetone, coumarin, openff, rdkit_workflow, parameters, expected):
workflow = rdkit_workflow.copy(deep=True)
workflow.non_bonded = get_protocol(protocol_name='5b')
with tmpdir.as_cwd():
openff.run(coumarin)
openff.run(acetone)
os.mkdir('QUBEKit_acetone')
os.mkdir('QUBEKit_coumarin')
result = workflow._build_initial_results(molecule=acetone)
result.to_file(os.path.join('QUBEKit_acetone', 'workflow_result.json'))
result = workflow._build_initial_results(molecule=coumarin)
result.to_file(os.path.join('QUBEKit_coumarin', 'workflow_result.json'))
output = run_cli.invoke(combine, args=['combined.xml', *parameters])
assert (output.exit_code == 0)
assert ('2 molecules found, combining...' in output.output)
data = xmltodict.parse(open('combined.xml').read())
if (expected > 0):
assert (len(data['ForceField']['ForceBalance']) == expected)
else:
assert (data['ForceField']['ForceBalance'] is None) |
class LifeCycleHook():
__slots__ = ('__weakref__', '_context_providers', '_current_state_index', '_effect_funcs', '_effect_stops', '_effect_tasks', '_render_access', '_rendered_atleast_once', '_schedule_render_callback', '_scheduled_render', '_state', 'component')
component: ComponentType
def __init__(self, schedule_render: Callable[([], None)]) -> None:
self._context_providers: dict[(Context[Any], ContextProviderType[Any])] = {}
self._schedule_render_callback = schedule_render
self._scheduled_render = False
self._rendered_atleast_once = False
self._current_state_index = 0
self._state: tuple[(Any, ...)] = ()
self._effect_funcs: list[EffectFunc] = []
self._effect_tasks: list[Task[None]] = []
self._effect_stops: list[Event] = []
self._render_access = Semaphore(1)
def schedule_render(self) -> None:
if self._scheduled_render:
return None
try:
self._schedule_render_callback()
except Exception:
msg = f'Failed to schedule render via {self._schedule_render_callback}'
logger.exception(msg)
else:
self._scheduled_render = True
def use_state(self, function: Callable[([], T)]) -> T:
if (not self._rendered_atleast_once):
result = function()
self._state += (result,)
else:
result = self._state[self._current_state_index]
self._current_state_index += 1
return result
def add_effect(self, effect_func: EffectFunc) -> None:
self._effect_funcs.append(effect_func)
def set_context_provider(self, provider: ContextProviderType[Any]) -> None:
self._context_providers[provider.type] = provider
def get_context_provider(self, context: Context[T]) -> (ContextProviderType[T] | None):
return self._context_providers.get(context)
async def affect_component_will_render(self, component: ComponentType) -> None:
(await self._render_access.acquire())
self._scheduled_render = False
self.component = component
self.set_current()
async def affect_component_did_render(self) -> None:
self.unset_current()
self._rendered_atleast_once = True
self._current_state_index = 0
self._render_access.release()
del self.component
async def affect_layout_did_render(self) -> None:
stop = Event()
self._effect_stops.append(stop)
self._effect_tasks.extend((create_task(e(stop)) for e in self._effect_funcs))
self._effect_funcs.clear()
async def affect_component_will_unmount(self) -> None:
for stop in self._effect_stops:
stop.set()
self._effect_stops.clear()
try:
(await gather(*self._effect_tasks))
except Exception:
logger.exception('Error in effect')
finally:
self._effect_tasks.clear()
def set_current(self) -> None:
hook_stack = _HOOK_STATE.get()
if hook_stack:
parent = hook_stack[(- 1)]
self._context_providers.update(parent._context_providers)
hook_stack.append(self)
def unset_current(self) -> None:
if (_HOOK_STATE.get().pop() is not self):
raise RuntimeError('Hook stack is in an invalid state') |
class DbRouter():
def db_for_read(self, model, **hints):
if ((model._meta.app_label == 'app') and (model._meta.model_name == 'seconditem')):
return 'second'
return None
def db_for_write(self, model, **hints):
if ((model._meta.app_label == 'app') and (model._meta.model_name == 'seconditem')):
return 'second'
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
if ((app_label == 'app') and (model_name == 'seconditem')):
return (db == 'second') |
def generate_ode_function(*args, **kwargs):
generators = {'lambdify': LambdifyODEFunctionGenerator, 'cython': CythonODEFunctionGenerator, 'theano': TheanoODEFunctionGenerator}
generator = kwargs.pop('generator', 'lambdify')
try:
g = generator(*args, **kwargs)
except TypeError:
try:
Generator = generators[generator]
g = Generator(*args, **kwargs)
except KeyError:
msg = '{} is not a valid generator.'.format(generator)
raise NotImplementedError(msg)
else:
return g.generate()
else:
return g.generate() |
def bind_subscription_to_org(subscription_id, org_id, user_id):
try:
return OrganizationRhSkus.create(subscription_id=subscription_id, org_id=org_id, user_id=user_id)
except model.DataModelException as ex:
logger.error('Problem binding subscription to org %s: %s', org_id, ex)
except peewee.IntegrityError:
raise model.OrgSubscriptionBindingAlreadyExists() |
.skipif((os.getenv('GITHUB_ACTIONS', False) and (platform.system() in ['Windows', 'Darwin'])), reason='On Windows & MacOS precise timings are not accurate.')
class TestPTBJobstore():
def test_default_jobstore_instance(self, jobstore):
assert (type(jobstore) in (PTBMongoDBJobStore, PTBSQLAlchemyJobStore))
def test_next_runtime(self, jq, jobstore):
jq.run_repeating(dummy_job, 10, first=0.1)
assert (jobstore.get_next_run_time().second == pytest.approx((dtm.datetime.now().second + 10), 1))
def test_lookup_job(self, jq, jobstore):
initial_job = jq.run_once(dummy_job, 1)
aps_job = jobstore.lookup_job(initial_job.id)
assert (aps_job == initial_job.job)
assert (aps_job.name == initial_job.job.name)
assert (aps_job.args[0] is jq)
assert (aps_job.args[1].callback is initial_job.callback is dummy_job)
def test_non_existent_job(self, jobstore):
assert (jobstore.lookup_job('foo') is None)
def test_get_all_jobs(self, jq, jobstore):
j1 = jq.run_once(dummy_job, 1)
j2 = jq.run_once(dummy_job, 2)
j3 = jq.run_once(dummy_job, 3)
jobs = jobstore.get_all_jobs()
assert (jobs == [j1.job, j2.job, j3.job])
def test_operations_on_job(self, jq, jobstore):
trigger = apscheduler.triggers.interval.IntervalTrigger(seconds=3)
j1 = jq.run_once(dummy_job, 1)
jq.scheduler.get_job(j1.job.id).pause()
jq.scheduler.get_job(j1.job.id).resume()
j_final = jq.scheduler.get_job(j1.job.id).reschedule(trigger)
assert (j_final.id == j1.job.id)
def test_remove_job(self, jq, jobstore):
j1 = jq.run_once(dummy_job, 1)
j2 = jq.run_once(dummy_job, 2)
jobstore.remove_job(j1.id)
assert (jobstore.get_all_jobs() == [j2.job])
jobstore.remove_job(j2.id)
assert (jobstore.get_all_jobs() == [])
def test_sqlite_warning(self, caplog, app):
with caplog.at_level(logging.WARNING):
PTBSQLAlchemyJobStore(app, url='sqlite:///:memory:')
assert ('Use of SQLite db is not supported' in caplog.text) |
class Address(Base):
__tablename__ = 'eventresult_address'
id = Column(Integer, primary_key=True)
email_address = Column(UnicodeText)
name = Column(UnicodeText)
reviewed = Column(Boolean)
fields = ExposedNames()
fields.name = (lambda i: Field(label='Name', required=True))
fields.email_address = (lambda i: EmailField(label='Email', required=True))
def save(self):
Session.add(self)
Session.flush()
return self.id
def review(self):
self.reviewed = True
events = ExposedNames()
events.save = (lambda i: Event(label='Save', action=Action(i.save), address_id=IntegerField()))
events.review = (lambda i: Event(label='Mark as reviewed', action=Action(i.review))) |
def clipboard_manager(request, minimal_conf_noscreen, manager_nospawn):
widget = libqtile.widget.Clipboard(**getattr(request, 'param', dict()))
config = minimal_conf_noscreen
config.screens = [Screen(top=Bar([widget], 10))]
manager_nospawn.start(config)
if (manager_nospawn.backend.name != 'x11'):
pytest.skip('Test only available on X11.')
(yield manager_nospawn) |
def main(local_rank, args):
torch.backends.cudnn.benchmark = True
cfg = Config.fromfile(args.py_config)
dataset_config = cfg.dataset_params
ignore_label = dataset_config['ignore_label']
version = dataset_config['version']
train_dataloader_config = cfg.train_data_loader
val_dataloader_config = cfg.val_data_loader
grid_size = cfg.grid_size
if (args.launcher == 'none'):
distributed = False
rank = 0
cfg.gpu_ids = [0]
else:
distributed = True
ip = os.environ.get('MASTER_ADDR', '127.0.0.1')
port = os.environ.get('MASTER_PORT', '20506')
hosts = int(os.environ.get('WORLD_SIZE', 1))
rank = int(os.environ.get('RANK', 0))
gpus = torch.cuda.device_count()
print(f'tcp://{ip}:{port}')
dist.init_process_group(backend='nccl', init_method=f'tcp://{ip}:{port}', world_size=(hosts * gpus), rank=((rank * gpus) + local_rank))
world_size = dist.get_world_size()
cfg.gpu_ids = range(world_size)
torch.cuda.set_device(local_rank)
if (dist.get_rank() != 0):
import builtins
builtins.print = pass_print
logger = MMLogger(name='eval_log', log_file=args.log_file, log_level='INFO')
from builder import model_builder
my_model = model_builder.build(cfg.model)
n_parameters = sum((p.numel() for p in my_model.parameters() if p.requires_grad))
logger.info(f'Number of params: {n_parameters}')
logger.info(f'''Model:
{my_model}''')
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', True)
ddp_model_module = torch.nn.parallel.DistributedDataParallel
my_model = ddp_model_module(my_model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
my_model = my_model.cuda()
print('done ddp model')
SemKITTI_label_name = get_nuScenes_label_name(dataset_config['label_mapping'])
unique_label = np.asarray(cfg.unique_label)
unique_label_str = [SemKITTI_label_name[x] for x in unique_label]
from builder import data_builder
(train_dataset_loader, val_dataset_loader) = data_builder.build_occ(dataset_config, train_dataloader_config, val_dataloader_config, grid_size=grid_size, version=version, dist=distributed)
CalMeanIou_sem = MeanIoU(unique_label, ignore_label, unique_label_str, 'semantic')
CalMeanIou_geo = MeanIoU([1], ignore_label=255, label_str=['occupancy'], name='geometry')
assert osp.isfile(args.ckpt_path)
print('ckpt path:', args.ckpt_path)
map_location = 'cpu'
ckpt = torch.load(args.ckpt_path, map_location=map_location)
if ('state_dict' in ckpt):
ckpt = ckpt['state_dict']
print(my_model.load_state_dict(revise_ckpt(ckpt), strict=False))
print(f'successfully loaded ckpt')
print_freq = cfg.print_freq
my_model.eval()
CalMeanIou_sem.reset()
CalMeanIou_geo.reset()
with torch.no_grad():
for (i_iter_val, data) in enumerate(val_dataset_loader):
(voxel_position_coarse, points, val_vox_label, val_grid) = data
points = points.cuda()
val_grid = val_grid.to(torch.float32).cuda()
val_grid_vox_coarse = voxel_position_coarse.to(torch.float32).cuda()
voxel_label = val_vox_label.type(torch.LongTensor).cpu()
predict_labels_vox = my_model(points=points, grid_ind=val_grid, grid_ind_vox=None, grid_ind_vox_coarse=val_grid_vox_coarse, voxel_label=voxel_label, return_loss=False)
predict_labels_vox = torch.argmax(predict_labels_vox, dim=1).detach().cpu()
CalMeanIou_sem._after_step(predict_labels_vox.flatten(), voxel_label.flatten())
occ_gt_mask = ((voxel_label != 0) & (voxel_label != 255))
voxel_label[occ_gt_mask] = 1
occ_pred_mask = (predict_labels_vox != 0)
predict_labels_vox[occ_pred_mask] = 1
CalMeanIou_geo._after_step(predict_labels_vox.flatten(), voxel_label.flatten())
if (((i_iter_val % print_freq) == 0) and (dist.get_rank() == 0)):
logger.info(('[EVAL] Iter %5d: Loss: None' % i_iter_val))
val_miou_sem = CalMeanIou_sem._after_epoch()
val_miou_geo = CalMeanIou_geo._after_epoch()
logger.info(('val miou is %.3f' % val_miou_sem))
logger.info(('val iou is %.3f' % val_miou_geo)) |
def _reg_to_soq(binst: Union[(BloqInstance, DanglingT)], reg: Register, available: Union[(Set[Soquet], _IgnoreAvailable)]=_IgnoreAvailable()) -> SoquetT:
if reg.shape:
soqs = np.empty(reg.shape, dtype=object)
for ri in reg.all_idxs():
soq = Soquet(binst, reg, idx=ri)
soqs[ri] = soq
available.add(soq)
return soqs
soqs = Soquet(binst, reg)
available.add(soqs)
return soqs |
def MI_Estimator(ft1, ft2, model):
scores = model(ft1, ft2)
def js_fgan_lower_bound_obj(scores):
scores_diag = scores.diag()
first_term = (- F.softplus((- scores_diag)).mean())
n = scores.size(0)
second_term = ((torch.sum(F.softplus(scores)) - torch.sum(F.softplus(scores_diag))) / (n * (n - 1.0)))
return (first_term - second_term)
def direct_log_density_ratio_mi(scores):
return scores.diag().mean()
train_val = js_fgan_lower_bound_obj(scores)
eval_val = direct_log_density_ratio_mi(scores)
with torch.no_grad():
eval_train = (eval_val - train_val)
return (train_val + eval_train) |
class Lines(object):
def __init__(self, lines):
self.lines = lines
self.line_count = len(lines)
self.current_line_number = 0
def current_line(self):
return self.lines[(self.current_line_number - 1)].strip()
def next_line(self):
self.current_line_number += 1
if (self.current_line_number > self.line_count):
raise EOFError()
return self.current_line() |
class OutdatedHwFirmwareException(UserFacingException):
def text_ignore_old_fw_and_continue(self) -> str:
suffix = ((_('The firmware of your hardware device is too old. If possible, you should upgrade it. You can ignore this error and try to continue, however things are likely to break.') + '\n\n') + _('Ignore and continue?'))
if str(self):
return ((str(self) + '\n\n') + suffix)
else:
return suffix |
def main() -> None:
trials = 3
print('Testing baseline')
baseline = trial(trials, Command((lambda : None), (lambda : execute(['python3', '-m', 'mypy', 'mypy']))))
report('Baseline', baseline)
print('Testing cold cache')
cold_cache = trial(trials, Command((lambda : delete_folder('.mypy_cache')), (lambda : execute(['python3', '-m', 'mypy', '-i', 'mypy']))))
report('Cold cache', cold_cache)
print('Testing warm cache')
execute(['python3', '-m', 'mypy', '-i', 'mypy'])
warm_cache = trial(trials, Command((lambda : None), (lambda : execute(['python3', '-m', 'mypy', '-i', 'mypy']))))
report('Warm cache', warm_cache) |
def test_create_left_lane_merge_first_lane():
lanedef = xodr.LaneDef(10, 20, 2, 1, 1)
lanes = xodr.create_lanes_merge_split(0, [lanedef], 30, xodr.std_roadmark_solid_solid(), 3, 3)
assert (len(lanes.lanesections) == 3)
assert (lanes.lanesections[0].s == 0)
assert (lanes.lanesections[1].s == 10)
assert (lanes.lanesections[2].s == 20)
assert (len(lanes.lanesections[0].rightlanes) == 0)
assert (len(lanes.lanesections[1].rightlanes) == 0)
assert (len(lanes.lanesections[2].rightlanes) == 0)
assert (len(lanes.lanesections[0].leftlanes) == 2)
assert (len(lanes.lanesections[1].leftlanes) == 2)
assert (len(lanes.lanesections[2].leftlanes) == 1)
assert (lanes.lanesections[0].leftlanes[0].roadmark[0].marking_type == xodr.RoadMarkType.broken)
assert (lanes.lanesections[0].leftlanes[1].roadmark[0].marking_type == xodr.RoadMarkType.solid)
assert (lanes.lanesections[0].leftlanes[0].widths[0].a == 3)
assert (lanes.lanesections[0].leftlanes[0].widths[0].c == 0)
assert (lanes.lanesections[0].leftlanes[1].widths[0].a == 3)
assert (lanes.lanesections[0].leftlanes[1].widths[0].c == 0)
assert (lanes.lanesections[1].leftlanes[0].roadmark[0].marking_type == xodr.RoadMarkType.broken)
assert (lanes.lanesections[1].leftlanes[0].widths[0].a == 3)
assert (lanes.lanesections[1].leftlanes[0].widths[0].c != 0)
assert (lanes.lanesections[1].leftlanes[1].roadmark[0].marking_type == xodr.RoadMarkType.solid)
assert (lanes.lanesections[1].leftlanes[1].widths[0].a == 3)
assert (lanes.lanesections[1].leftlanes[1].widths[0].c == 0)
assert (lanes.lanesections[2].leftlanes[0].roadmark[0].marking_type == xodr.RoadMarkType.solid)
assert (lanes.lanesections[2].leftlanes[0].widths[0].a == 3)
assert (lanes.lanesections[2].leftlanes[0].widths[0].c == 0) |
def test_batch():
with assert_raises(TypeError):
table.batch(timestamp='invalid')
b = table.batch()
b.put(b'row1', {b'cf1:col1': b'value1', b'cf1:col2': b'value2'})
b.put(b'row2', {b'cf1:col1': b'value1', b'cf1:col2': b'value2', b'cf1:col3': b'value3'})
b.delete(b'row1', [b'cf1:col4'])
b.delete(b'another-row')
b.send()
b = table.batch(timestamp=1234567)
b.put(b'row1', {b'cf1:col5': b'value5'})
b.send()
with assert_raises(ValueError):
b = table.batch(batch_size=0)
with assert_raises(TypeError):
b = table.batch(transaction=True, batch_size=10) |
(skip_s4_test(), reason='Only works if S4 installed')
def test_arbitrary_pol_rcwa():
import numpy as np
from solcore import material, si
from solcore.structure import Layer, Structure
from solcore.absorption_calculator.rigorous_coupled_wave import calculate_rat_rcwa, calculate_absorption_profile_rcwa
T = 300
Air = material('Air')(T=T)
TiO2 = material('TiO2', sopra=True)(T=T)
GaAs = material('GaAs')(T=T)
th = 50
NP_layer = Layer(si(th, 'nm'), Air, geometry=[{'type': 'ellipse', 'mat': TiO2, 'center': (200, 200), 'halfwidths': [100, 70], 'angle': 40}])
np_struct = Structure([NP_layer])
wl = np.linspace(300, 1000, 10)
step_size = 2
rat_np = calculate_rat_rcwa(np_struct, size=((400, 0), (0, 400)), orders=10, wavelength=wl, substrate=GaAs, incidence=Air, pol=[1, 0])
A_output = rat_np['A_pol']
result = calculate_absorption_profile_rcwa(np_struct, size=((400, 0), (0, 400)), orders=10, wavelength=wl, rat_output_A=A_output, parallel=True, steps_size=step_size, pol=[1, 0])
rat_np_s = calculate_rat_rcwa(np_struct, size=((400, 0), (0, 400)), orders=10, wavelength=wl, substrate=GaAs, incidence=Air, pol='s')
A_output_s = rat_np_s['A_pol']
result_s = calculate_absorption_profile_rcwa(np_struct, size=((400, 0), (0, 400)), orders=10, wavelength=wl, rat_output_A=A_output_s, parallel=True, steps_size=step_size, pol='s')
assert (A_output == approx(A_output_s))
assert (result['absorption'] == approx(result_s['absorption'], rel=0.0001)) |
class Effect5079(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Missile Launcher Operation')), 'kineticDamage', ship.getModifiedItemAttr('shipBonusCF2'), skill='Caldari Frigate', **kwargs) |
def cmp(rpin, rpout):
_check_for_files(rpin, rpout)
if rpin.isreg():
if (not rpout.isreg()):
return None
(fp1, fp2) = (rpin.open('rb'), rpout.open('rb'))
result = _cmp_file_obj(fp1, fp2)
if (fp1.close() or fp2.close()):
raise RPathException('Error closing file')
return result
elif rpin.isdir():
return rpout.isdir()
elif rpin.issym():
return (rpout.issym() and (rpin.readlink() == rpout.readlink()))
elif rpin.isdev():
return (rpout.isdev() and (rpin.getdevnums() == rpout.getdevnums()))
elif rpin.isfifo():
return rpout.isfifo()
elif rpin.issock():
return rpout.issock()
else:
raise RPathException('File {rp!r} has unknown type'.format(rp=rpin)) |
def load_operative_gin_configurations(checkpoint_dir):
gin_log_file = operative_config_path(checkpoint_dir)
with gin.unlock_config():
gin.parse_config_file(gin_log_file)
gin.finalize()
logging.info('Operative Gin configurations loaded from `checkpoint_dir`: %s', gin_log_file) |
def train():
train_set = {}
train_batch = {}
valid_set = {}
valid_loader = {}
batch_per_epo = defaultdict(int)
for task in pretrain_tasks:
train_set[task] = get_dataset(task, images, 'train')
valid_set[task] = get_dataset(task, images, 'val')
r = args.r[task]
train_batch[task] = (get_dataloader(task, train_set[task], args.batch_size, is_train=True), r)
valid_loader[task] = get_dataloader(task, valid_set[task], args.batch_size, is_train=False)
batch_per_epo[task] = ((len(train_set[task]) // args.batch_size) + 1)
train_loader = dataset.MetaLoader(train_batch)
model = modules.Model(ff_dim=args.dim_ff, img_embs=args.img_embs, n_hidden=args.n_embs, n_head=args.n_head, n_block=args.n_block, img_enc_block=args.img_enc_block, vocab_size=vocab_size, dropout=args.dropout, max_len=args.max_len, tasks=pretrain_tasks, tmp=args.tmp, CLS=vocabs['<CLS>'])
global_step = (- 1)
count = defaultdict(int)
if (args.restore != ''):
print('load parameters from {}'.format(args.restore))
checkpoint = torch.load(args.restore)
model.load_state_dict(checkpoint['model_state_dict'])
global_step = checkpoint['global_step']
if ('task_batch_num' in list(checkpoint.keys())):
count = checkpoint['task_batch_num']
model.cuda()
model.train()
optim = Optim.Adam(filter((lambda p: p.requires_grad), model.parameters()), lr=args.lr, weight_decay=args.weight_decay)
best_score = defaultdict(int)
report_loss = defaultdict(int)
n_samples = defaultdict(int)
use_time = defaultdict(float)
for (step, (task, batch)) in enumerate(train_loader):
global_step += 1
start_time = time.time()
model.train()
model.zero_grad()
(raw_loss, _) = model(batch, task, compute_loss=True)
loss = raw_loss.mean()
loss.backward()
lr_this_step = get_lr_sched(global_step, args)
for param_group in optim.param_groups:
param_group['lr'] = lr_this_step
writer.add_scalar('lr', lr_this_step, global_step)
optim.step()
report_loss[task] += loss.item()
n_samples[task] += batch['img1'].size(0)
count[task] += 1
use_time[task] += (time.time() - start_time)
writer.add_scalar(((args.dataset + '_train_batch_loss_sum/') + task), (raw_loss.sum().item() / batch['img1'].size(0)), count[task])
writer.add_scalar(((args.dataset + '_train_batch_loss_mean/') + task), loss.item(), count[task])
if ((count[task] % batch_per_epo[task]) == 0):
print(('task: %s, epoch: %d, global_step: %d, report_loss: %.3f, time: %.2f' % (task, (count[task] // batch_per_epo[task]), global_step, (report_loss[task] / n_samples[task]), use_time[task])))
train_logger.print_train_stats(task, (count[task] // batch_per_epo[task]), global_step, (report_loss[task] / n_samples[task]), use_time[task])
writer.add_scalar(((args.dataset + '_train_epoch_loss/') + task), (report_loss[task] / n_samples[task]), (count[task] // batch_per_epo[task]))
(report_loss[task], n_samples[task], use_time[task]) = (0, 0, 0.0)
if ((global_step % args.valid_steps) == 0):
stats = validate(valid_loader, model, global_step, count)
val_logger.print_eval_stats(global_step, stats)
if ((global_step > args.warmup_steps) and ((global_step % 1000) == 0)):
save_model(os.path.join(checkpoint_path, 'checkpoint.pt'), model, global_step, count)
if (global_step >= args.total_train_steps):
os.rename(os.path.join(checkpoint_path, 'checkpoint.pt'), os.path.join(checkpoint_path, 'checkpoint_{}.pt'.format(global_step)))
break
return 0 |
class QFIBase(DerivativeBase):
def __init__(self, qfi_method: Union[(str, CircuitQFI)]='lin_comb_full'):
if isinstance(qfi_method, CircuitQFI):
self._qfi_method = qfi_method
elif (qfi_method == 'lin_comb_full'):
from .circuit_qfis import LinCombFull
self._qfi_method = LinCombFull()
elif (qfi_method == 'overlap_block_diag'):
from .circuit_qfis import OverlapBlockDiag
self._qfi_method = OverlapBlockDiag()
elif (qfi_method == 'overlap_diag'):
from .circuit_qfis import OverlapDiag
self._qfi_method = OverlapDiag()
else:
raise ValueError("Unrecognized input provided for `qfi_method`. Please provide a CircuitQFI object or one of the pre-defined string arguments: {'lin_comb_full', 'overlap_diag', 'overlap_block_diag'}. ")
def qfi_method(self) -> CircuitQFI:
return self._qfi_method |
class Zero(InitialState):
def __init__(self, num_qubits: int) -> None:
super().__init__()
validate_min('num_qubits', num_qubits, 1)
self._num_qubits = num_qubits
def _replacement():
return 'Zero(num_qubits) is the same as a empty QuantumCircuit(num_qubits).'
def construct_circuit(self, mode='circuit', register=None):
if (mode == 'vector'):
return np.array(([1.0] + ([0.0] * (np.power(2, self._num_qubits) - 1))))
elif (mode == 'circuit'):
if (register is None):
register = QuantumRegister(self._num_qubits, name='q')
quantum_circuit = QuantumCircuit(register)
return quantum_circuit
else:
raise AquaError('Mode should be either "vector" or "circuit"') |
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', dest='kscfg', required=True, help=_('Path to kickstart config file'))
parser.add_argument('-v', '--version', dest='version', default=DEVEL, help=_('Kickstart version to use for interpreting config'))
parser.add_argument('-o', '--output', dest='output', help=_('Write flattened config to OUTPUT'))
return parser.parse_args(argv) |
class CookieJarInterfaceTests(unittest.TestCase):
def test_add_cookie_header(self):
from mechanize import CookieJar
class MockRequest(object):
def __init__(self):
self.added_headers = []
self.called = set()
def log_called(self):
self.called.add(caller())
def get_full_url(self):
self.log_called()
return '
def get_host(self):
self.log_called()
return 'example.com:443'
def get_type(self):
self.log_called()
return '
def has_header(self, header_name):
self.log_called()
return False
def get_header(self, header_name, default=None):
self.log_called()
pass
def header_items(self):
self.log_called()
pass
def add_unredirected_header(self, key, val):
self.log_called()
self.added_headers.append((key, val))
def is_unverifiable(self):
self.log_called()
return False
def unverifiable(self):
return self.is_unverifiable()
def type(self):
return self.get_type()
def host(self):
return self.get_host()
jar = CookieJar()
interact_netscape(jar, ' 'foo=bar; port=443; secure')
request = MockRequest()
jar.add_cookie_header(request)
expect_called = (attribute_names(MockRequest) - {'port', 'get_header', 'header_items', 'log_called', 'unverifiable', 'type', 'host'})
self.assertEqual(request.called, expect_called)
self.assertEqual(request.added_headers, [('Cookie', 'foo=bar')])
def test_extract_cookies(self):
from mechanize import CookieJar
class StubMessage(object):
def getheaders(self, name, default=None):
return ['foo=bar; port=443']
get_all = getheaders
class StubResponse(object):
def info(self):
return StubMessage()
class StubRequest(object):
def __init__(self):
self.added_headers = []
self.called = set()
def log_called(self):
self.called.add(caller())
def get_full_url(self):
self.log_called()
return '
def get_host(self):
self.log_called()
return 'example.com:443'
def is_unverifiable(self):
self.log_called()
return False
def unverifiable(self):
return self.is_unverifiable()
def type(self):
return self.get_type()
def host(self):
return self.get_host()
jar = CookieJar()
response = StubResponse()
request = StubRequest()
jar.extract_cookies(response, request)
expect_called = (attribute_names(StubRequest) - set(['port', 'log_called', 'unverifiable', 'type', 'host']))
self.assertEqual(request.called, expect_called)
self.assertEqual([(cookie.name, cookie.value) for cookie in jar], [('foo', 'bar')])
def test_unverifiable(self):
from mechanize._clientcookie import request_is_unverifiable
class StubRequest(object):
def __init__(self, attrs):
self._attrs = attrs
self.accessed = set()
def __getattr__(self, name):
self.accessed.add(name)
try:
return self._attrs[name]
except KeyError:
raise AttributeError(name)
request = StubRequest(dict(is_unverifiable=(lambda : False)))
self.assertEqual(request_is_unverifiable(request), False)
request = StubRequest(dict(is_unverifiable=(lambda : False), unverifiable=True))
self.assertEqual(request_is_unverifiable(request), False)
request = StubRequest(dict(unverifiable=False))
self.assertEqual(request_is_unverifiable(request), False) |
class TestEncodeDecode(TestNameCheckVisitorBase):
_passes()
def test(self):
def capybara(s: str, b: bytes):
assert_is_value(s.encode('utf-8'), TypedValue(bytes))
assert_is_value(b.decode('utf-8'), TypedValue(str))
_passes()
def test_encode_wrong_type(self):
def capybara():
.encode(42)
_passes()
def test_decode_wrong_type(self):
def capybara():
.decode(42) |
('pyinaturalist.client.get_access_token', return_value='token')
def test_client_auth(get_access_token):
client = iNatClient()
final_params_1 = client.request(request_function, auth=True)
final_params_2 = client.request(request_function)
assert (final_params_1['access_token'] == 'token')
assert ('access_token' not in final_params_2)
get_access_token.assert_called_once() |
class Retry():
def __init__(self, fail_msg='retry failed!', ignore_exceptions=(), dt=sleep_time, tmax=max_sleep, return_on_fail=False):
self.fail_msg = fail_msg
self.ignore_exceptions = ignore_exceptions
self.dt = dt
self.tmax = tmax
self.return_on_fail = return_on_fail
def __call__(self, fn):
(fn)
def wrapper(*args, **kwargs):
tmax = (time.time() + self.tmax)
dt = self.dt
ignore_exceptions = self.ignore_exceptions
while (time.time() <= tmax):
try:
return fn(*args, **kwargs)
except ignore_exceptions:
pass
except AssertionError:
break
time.sleep(dt)
dt *= 1.5
if self.return_on_fail:
return False
else:
raise AssertionError(self.fail_msg)
return wrapper |
.parametrize('text', ('SHOW', 'CREATE', 'ALTER', 'DROP', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'WHERE', 'GROUP', 'ORDER', 'BY', 'AS', 'DISTINCT', 'JOIN', 'WITH', 'RECURSIVE', 'PARTITION', 'NTILE', 'MASTER_PASSWORD', 'XA', 'REQUIRE_TABLE_PRIMARY_KEY_CHECK', 'STREAM'))
def test_keywords(lexer, text):
assert (list(lexer.get_tokens(text))[0] == (Keyword, text)) |
class colors():
if sys.stdout.isatty():
BLUE = '\x1b[94m'
GREEN = '\x1b[92m'
YELLOW = '\x1b[93m'
RED = '\x1b[91m'
ENDC = '\x1b[0m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
else:
HEADER = ''
BLUE = ''
GREEN = ''
YELLOW = ''
RED = ''
ENDC = ''
BOLD = ''
UNDERLINE = '' |
.parametrize('base,one,two', [pytest.param(Version.parse('3.0.0'), Version.parse('3.0.0-1'), Version.parse('3.0.0-2'), id='post'), pytest.param(Version.parse('3.0.0'), Version.parse('3.0.0+local.1'), Version.parse('3.0.0+local.2'), id='local')])
def test_allows_post_releases_explicit_with_min(base: Version, one: Version, two: Version) -> None:
range = VersionRange(min=one, include_min=True)
assert (not range.allows(base))
assert range.allows(two)
range = VersionRange(min=two, include_min=True)
assert (not range.allows(base))
assert (not range.allows(one)) |
class DictConfigSource(ConfigSource):
def __init__(self) -> None:
self._config: dict[(str, Any)] = {}
def config(self) -> dict[(str, Any)]:
return self._config
def add_property(self, key: str, value: Any) -> None:
keys = key.split('.')
config = self._config
for (i, key) in enumerate(keys):
if ((key not in config) and (i < (len(keys) - 1))):
config[key] = {}
if (i == (len(keys) - 1)):
config[key] = value
break
config = config[key]
def remove_property(self, key: str) -> None:
keys = key.split('.')
config = self._config
for (i, key) in enumerate(keys):
if (key not in config):
return
if (i == (len(keys) - 1)):
del config[key]
break
config = config[key] |
def create_sdf_abc(sdfcommand, marching_cube_command, LIB_command, num_sample, bandwidth, res, expand_rate, raw_dirs, iso_val, max_verts, ish5=True, normalize=True, g=0.0, reduce=4):
os.system(LIB_command)
start = 0
for split in ['train', 'test']:
model_dir = os.path.join(raw_dirs['mesh_dir'], split, '2048')
norm_mesh_dir = os.path.join(raw_dirs['norm_mesh_dir'], split)
sdf_dir = os.path.join(raw_dirs['sdf_dir'], split)
if (not os.path.exists(sdf_dir)):
os.makedirs(sdf_dir)
if (not os.path.exists(norm_mesh_dir)):
os.makedirs(norm_mesh_dir)
list_obj = [os.path.join(model_dir, f) for f in os.listdir(model_dir)]
repeat = len(list_obj)
sdfcommand_lst = [sdfcommand for i in range(repeat)]
marching_cube_command_lst = [marching_cube_command for i in range(repeat)]
norm_mesh_dir_lst = [norm_mesh_dir for i in range(repeat)]
sdf_dir_lst = [sdf_dir for i in range(repeat)]
res_lst = [res for i in range(repeat)]
iso_val_lst = [iso_val for i in range(repeat)]
expand_rate_lst = [expand_rate for i in range(repeat)]
indx_lst = [i for i in range(start, (start + repeat))]
ish5_lst = [ish5 for i in range(repeat)]
normalize_lst = [normalize for i in range(repeat)]
num_sample_lst = [num_sample for i in range(repeat)]
bandwidth_lst = [bandwidth for i in range(repeat)]
max_verts_lst = [max_verts for i in range(repeat)]
g_lst = [g for i in range(repeat)]
reduce_lst = [reduce for i in range(repeat)]
with Parallel(n_jobs=5) as parallel:
parallel((delayed(create_sdf_obj)(sdfcommand, marching_cube_command, norm_mesh_dir, sdf_dir, obj, res, iso_val, expand_rate, indx, ish5, norm, num_sample, bandwidth, max_verts, g, reduce) for (sdfcommand, marching_cube_command, norm_mesh_dir, sdf_dir, obj, res, iso_val, expand_rate, indx, ish5, norm, num_sample, bandwidth, max_verts, g, reduce) in zip(sdfcommand_lst, marching_cube_command_lst, norm_mesh_dir_lst, sdf_dir_lst, list_obj, res_lst, iso_val_lst, expand_rate_lst, indx_lst, ish5_lst, normalize_lst, num_sample_lst, bandwidth_lst, max_verts_lst, g_lst, reduce_lst)))
start += repeat
print('finish all') |
def run(scenarios_list, config, wait_duration, failed_post_scenarios, kubeconfig_path, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
scenario_telemetries: list[ScenarioTelemetry] = []
failed_scenarios = []
for scenario_config in scenarios_list:
scenario_telemetry = ScenarioTelemetry()
scenario_telemetry.scenario = scenario_config[0]
scenario_telemetry.startTimeStamp = time.time()
telemetry.set_parameters_base64(scenario_telemetry, scenario_config[0])
try:
if (len(scenario_config) > 1):
pre_action_output = post_actions.run(kubeconfig_path, scenario_config[1])
else:
pre_action_output = ''
with open(scenario_config[0], 'r') as f:
scenario_config_yaml = yaml.full_load(f)
for scenario in scenario_config_yaml['scenarios']:
scenario_namespace = get_yaml_item_value(scenario, 'namespace', '')
scenario_label = get_yaml_item_value(scenario, 'label_selector', '')
if ((scenario_namespace is not None) and (scenario_namespace.strip() != '')):
if ((scenario_label is not None) and (scenario_label.strip() != '')):
logging.error('You can only have namespace or label set in your namespace scenario')
logging.error(("Current scenario config has namespace '%s' and label selector '%s'" % (scenario_namespace, scenario_label)))
logging.error("Please set either namespace to blank ('') or label_selector to blank ('') to continue")
raise RuntimeError()
delete_count = get_yaml_item_value(scenario, 'delete_count', 1)
run_count = get_yaml_item_value(scenario, 'runs', 1)
run_sleep = get_yaml_item_value(scenario, 'sleep', 10)
wait_time = get_yaml_item_value(scenario, 'wait_time', 30)
logging.info((((((str(scenario_namespace) + str(scenario_label)) + str(delete_count)) + str(run_count)) + str(run_sleep)) + str(wait_time)))
logging.info('done')
start_time = int(time.time())
for i in range(run_count):
killed_namespaces = {}
namespaces = kubecli.check_namespaces([scenario_namespace], scenario_label)
for j in range(delete_count):
if (len(namespaces) == 0):
logging.error(("Couldn't delete %s namespaces, not enough namespaces matching %s with label %s" % (str(run_count), scenario_namespace, str(scenario_label))))
raise RuntimeError()
selected_namespace = namespaces[random.randint(0, (len(namespaces) - 1))]
logging.info(('Delete objects in selected namespace: ' + selected_namespace))
try:
objects = delete_objects(kubecli, selected_namespace)
killed_namespaces[selected_namespace] = objects
logging.info(('Deleted all objects in namespace %s was successful' % str(selected_namespace)))
except Exception as e:
logging.info(('Delete all objects in namespace %s was unsuccessful' % str(selected_namespace)))
logging.info(('Namespace action error: ' + str(e)))
raise RuntimeError()
namespaces.remove(selected_namespace)
logging.info(('Waiting %s seconds between namespace deletions' % str(run_sleep)))
time.sleep(run_sleep)
logging.info(('Waiting for the specified duration: %s' % wait_duration))
time.sleep(wait_duration)
if (len(scenario_config) > 1):
try:
failed_post_scenarios = post_actions.check_recovery(kubeconfig_path, scenario_config, failed_post_scenarios, pre_action_output)
except Exception as e:
logging.error(('Failed to run post action checks: %s' % e))
raise RuntimeError()
else:
failed_post_scenarios = check_all_running_deployment(killed_namespaces, wait_time, kubecli)
end_time = int(time.time())
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
except (Exception, RuntimeError):
scenario_telemetry.exitStatus = 1
failed_scenarios.append(scenario_config[0])
log_exception(scenario_config[0])
else:
scenario_telemetry.exitStatus = 0
scenario_telemetry.endTimeStamp = time.time()
scenario_telemetries.append(scenario_telemetry)
return (failed_scenarios, scenario_telemetries) |
class DBusUser(object):
found_users = {}
def __init__(self, user, display):
self.user = user
self.display = display
which = (user, display)
try:
self.environ = self.found_users[which]
return
except KeyError:
pass
for proc in process_dict_iter(('username', 'environ')):
if (proc['username'] != user):
continue
environ = proc['environ']
if (environ.get('DISPLAY', None) != display):
continue
if ('DBUS_SESSION_BUS_ADDRESS' not in environ):
continue
if environ['DBUS_SESSION_BUS_ADDRESS'].startswith('disabled'):
continue
self.found_users[which] = environ
break
self.environ = self.found_users[which]
def __str__(self):
return "<DBusUser('{}', '{}')>".format(self.user, self.display)
def command(self, cmd, stderr=None):
return subprocess.check_output(('su', self.user, '-m', '-c', cmd), env=self.environ, stderr=stderr).decode('utf8') |
def _process_mmcls_checkpoint(checkpoint):
state_dict = checkpoint['state_dict']
new_state_dict = OrderedDict()
for (k, v) in state_dict.items():
if k.startswith('backbone.'):
new_state_dict[k[9:]] = v
new_checkpoint = dict(state_dict=new_state_dict)
return new_checkpoint |
def tensorize_and_pad(batch, device, pad):
device = torch.device(device)
(input_dict, gt_dict, feat_dict) = (dict(), dict(), dict())
(traj_data, feat_list) = list(zip(*batch))
for key in feat_list[0].keys():
feat_dict[key] = [el[key] for el in feat_list]
assert (len(set([t['dataset_name'] for t in traj_data])) == 1)
input_keys = {'lang', 'frames'}
for (k, v) in feat_dict.items():
dict_assign = (input_dict if any([k.startswith(s) for s in input_keys]) else gt_dict)
if k.startswith('lang'):
seqs = [torch.tensor((vv if (vv is not None) else [pad, pad]), device=device).long() for vv in v]
pad_seq = pad_sequence(seqs, batch_first=True, padding_value=pad)
dict_assign[k] = pad_seq
dict_assign[('lengths_' + k)] = torch.tensor(list(map(len, seqs)))
length_max_key = (('length_' + k) + '_max')
if (':' in k):
length_max_key = ((('length_' + k.split(':')[0]) + '_max:') + ':'.join(k.split(':')[1:]))
dict_assign[length_max_key] = max(map(len, seqs))
elif (k in {'object'}):
seqs = [torch.tensor(vv, device=device, dtype=torch.long) for vv in v if (len(vv) > 0)]
dict_assign[k] = seqs
elif (k in {'goal_progress', 'subgoals_completed'}):
seqs = [torch.tensor(vv, device=device, dtype=torch.float) for vv in v]
pad_seq = pad_sequence(seqs, batch_first=True, padding_value=pad)
dict_assign[k] = pad_seq
elif (k in {'frames'}):
seqs = [vv.clone().detach().to(device).type(torch.float) for vv in v]
pad_seq = pad_sequence(seqs, batch_first=True, padding_value=pad)
dict_assign[k] = pad_seq
dict_assign[('lengths_' + k)] = torch.tensor(list(map(len, seqs)))
dict_assign[(('length_' + k) + '_max')] = max(map(len, seqs))
else:
seqs = [torch.tensor(vv, device=device, dtype=torch.long) for vv in v]
pad_seq = pad_sequence(seqs, batch_first=True, padding_value=pad)
dict_assign[k] = pad_seq
return (traj_data, input_dict, gt_dict) |
class Discriminator(nn.Module):
def __init__(self, input_nc):
super(Discriminator, self).__init__()
model = [nn.Conv2d(input_nc, 64, 4, stride=2, padding=1), nn.LeakyReLU(0.2, inplace=True)]
model += [nn.Conv2d(64, 128, 4, stride=2, padding=1), nn.InstanceNorm2d(128), nn.LeakyReLU(0.2, inplace=True)]
model += [nn.Conv2d(128, 256, 4, stride=2, padding=1), nn.InstanceNorm2d(256), nn.LeakyReLU(0.2, inplace=True)]
model += [nn.Conv2d(256, 512, 4, padding=1), nn.InstanceNorm2d(512), nn.LeakyReLU(0.2, inplace=True)]
model += [nn.Conv2d(512, 1, 4, padding=1)]
self.model = nn.Sequential(*model)
def forward(self, x):
x = self.model(x)
return F.avg_pool2d(x, x.size()[2:]).view(x.size()[0], (- 1)) |
def main() -> None:
help_factory: Any = (lambda prog: RawDescriptionHelpFormatter(prog=prog, max_help_position=32))
parser = ArgumentParser(prog='incremental_checker', description=__doc__, formatter_class=help_factory)
parser.add_argument('range_type', metavar='START_TYPE', choices=['last', 'commit'], help="must be one of 'last' or 'commit'")
parser.add_argument('range_start', metavar='COMMIT_ID_OR_NUMBER', help='the commit id to start from, or the number of commits to move back (see above)')
parser.add_argument('-r', '--repo_url', default=MYPY_REPO_URL, metavar='URL', help='the repo to clone and run tests on')
parser.add_argument('-f', '--file-path', default=MYPY_TARGET_FILE, metavar='FILE', help='the name of the file or directory to typecheck')
parser.add_argument('-x', '--exit-on-error', action='store_true', help='Exits as soon as an error occurs')
parser.add_argument('--keep-temporary-files', action='store_true', help='Keep temporary files on exit')
parser.add_argument('--cache-path', default=CACHE_PATH, metavar='DIR', help='sets a custom location to store cache data')
parser.add_argument('--branch', default=None, metavar='NAME', help='check out and test a custom branch uses the default if not specified')
parser.add_argument('--sample', type=int, help='use a random sample of size SAMPLE')
parser.add_argument('--seed', type=str, help='random seed')
parser.add_argument('--limit', type=int, help='maximum number of commits to use (default until end)')
parser.add_argument('--mypy-script', type=str, help='alternate mypy script to run')
parser.add_argument('--daemon', action='store_true', help='use mypy daemon instead of incremental (highly experimental)')
if (len(sys.argv[1:]) == 0):
parser.print_help()
parser.exit()
params = parser.parse_args(sys.argv[1:])
script_path = os.path.abspath(sys.argv[0])
mypy_path = os.path.abspath(os.path.dirname(os.path.dirname(script_path)))
temp_repo_path = os.path.abspath(os.path.join(mypy_path, 'tmp_repo'))
if params.file_path:
target_file_path = os.path.abspath(os.path.join(temp_repo_path, params.file_path))
else:
target_file_path = None
incremental_cache_path = os.path.abspath(params.cache_path)
mypy_cache_path = os.path.abspath(os.path.join(mypy_path, 'misc', '.mypy_cache'))
print(f'Assuming mypy is located at {mypy_path}')
print(f'Temp repo will be cloned at {temp_repo_path}')
print(f'Testing file/dir located at {target_file_path}')
print(f'Using cache data located at {incremental_cache_path}')
print()
test_repo(params.repo_url, temp_repo_path, target_file_path, mypy_path, incremental_cache_path, mypy_cache_path, params.range_type, params.range_start, params.branch, params) |
def require_session_login(func):
(func)
def wrapper(*args, **kwargs):
result = validate_session_cookie()
if result.has_nonrobot_user:
result.apply_to_context()
authentication_count.labels(result.kind, True).inc()
return func(*args, **kwargs)
elif (not result.missing):
authentication_count.labels(result.kind, False).inc()
abort(401, message='Method requires login and no valid login could be loaded.')
return wrapper |
def unary_iteration(l_iter: int, r_iter: int, flanking_ops: List[cirq.Operation], controls: Sequence[cirq.Qid], selection: Sequence[cirq.Qid], qubit_manager: cirq.QubitManager, break_early: Callable[([int, int], bool)]=(lambda l, r: False)) -> Iterator[Tuple[(cirq.OP_TREE, cirq.Qid, int)]]:
assert ((2 ** len(selection)) >= (r_iter - l_iter))
assert (len(selection) > 0)
ancilla = qubit_manager.qalloc(max(0, ((len(controls) + len(selection)) - 1)))
if (len(controls) == 0):
(yield from _unary_iteration_zero_control(flanking_ops, selection, ancilla, l_iter, r_iter, break_early))
elif (len(controls) == 1):
(yield from _unary_iteration_single_control(flanking_ops, controls[0], selection, ancilla, l_iter, r_iter, break_early))
else:
(yield from _unary_iteration_multi_controls(flanking_ops, controls, selection, ancilla, l_iter, r_iter, break_early))
qubit_manager.qfree(ancilla) |
class FunctionMask(MaskBase):
def __init__(self, function):
self._function = function
def _validate_wcs(self, new_data=None, new_wcs=None, **kwargs):
pass
def _include(self, data=None, wcs=None, view=()):
result = self._function(data, wcs, view)
if (result.shape != data[view].shape):
raise ValueError('Function did not return mask with correct shape - expected {0}, got {1}'.format(data[view].shape, result.shape))
return result
def __getitem__(self, slice):
return self
def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None):
return FunctionMask(self._function) |
def log_predictions(args: argparse.Namespace, eval_dataset: NERDataset, outputs: EvalPrediction, prefix: str='eval'):
if (not args.do_train):
wandb.init(reinit=False)
labels = get_ner_labels('')
label_map: Dict[(int, str)] = {i: label for (i, label) in enumerate(labels)}
data = []
out_file = os.path.join(args.output_dir, f'{prefix}_predictions.csv')
with open(out_file, 'w', encoding='utf-8') as f:
f.write('word\tlabel\tpred\n')
for (ex_id, example) in enumerate(eval_dataset.examples):
ex_mask = np.not_equal(outputs.label_ids[ex_id], np.full_like(outputs.label_ids[ex_id], (- 100)))
ex_labels = outputs.label_ids[ex_id][ex_mask].tolist()
ex_preds = [np.argmax(pred) for pred in outputs.predictions[ex_id][ex_mask]]
if (len(example.words) != len(ex_labels)):
logger.warning(f"Fewer labels than words in example {ex_id}: {' '.join(example.words)}")
for word in example.words:
if ((len(ex_preds) == 0) or (len(ex_labels) == 0)):
logger.warning(f"Fewer labels than words in example {ex_id}: {' '.join(example.words)}")
continue
pred = label_map[ex_preds.pop(0)]
label = label_map[ex_labels.pop(0)]
data.append([word, label, pred])
f.write(f'''{word} {label} {pred}
''')
f.write('\n')
logger.info(f'Saved predictions and labels to {out_file}')
logger.info(f'Logging as table to wandb')
preds_table = wandb.Table(columns=['word', 'label', 'pred'], data=data)
wandb.log({f'{prefix}_outputs': preds_table}) |
class CodeMixin():
email = ''
code = ''
verified = False
def verify(self):
self.verified = True
self.save()
def generate_code(cls):
return uuid.uuid4().hex
def make_code(cls, email):
code = cls()
code.email = email
code.code = cls.generate_code()
code.verified = False
code.save()
return code
def get_code(cls, code):
raise NotImplementedError('Implement in subclass') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.