code
stringlengths
281
23.7M
class Effect6807(BaseEffect): type = 'passive' def handler(fit, src, context, projectionRange, **kwargs): lvl = src.level fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Invulnerability Core Operation')), 'buffDuration', (src.getModifiedItemAttr('durationBonus') * lvl), **kwargs) fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Invulnerability Core Operation')), 'duration', (src.getModifiedItemAttr('durationBonus') * lvl), **kwargs)
class Match(): def __init__(self, title: ((str | re.Pattern) | None)=None, wm_class: ((str | re.Pattern) | None)=None, role: ((str | re.Pattern) | None)=None, wm_type: ((str | re.Pattern) | None)=None, wm_instance_class: ((str | re.Pattern) | None)=None, net_wm_pid: (int | None)=None, func: (Callable[([base.Window], bool)] | None)=None, wid: (int | None)=None) -> None: self._rules: dict[(str, Any)] = {} if (title is not None): if isinstance(title, list): title = convert_deprecated_list(title, 'title') self._rules['title'] = title if (wm_class is not None): if isinstance(wm_class, list): wm_class = convert_deprecated_list(wm_class, 'wm_class') self._rules['wm_class'] = wm_class if (wm_instance_class is not None): if isinstance(wm_instance_class, list): wm_instance_class = convert_deprecated_list(wm_instance_class, 'wm_instance_class') self._rules['wm_instance_class'] = wm_instance_class if (wid is not None): self._rules['wid'] = wid if (net_wm_pid is not None): try: self._rules['net_wm_pid'] = int(net_wm_pid) except ValueError: error = ('Invalid rule for net_wm_pid: "%s" only int allowed' % str(net_wm_pid)) raise utils.QtileError(error) if (func is not None): self._rules['func'] = func if (role is not None): if isinstance(role, list): role = convert_deprecated_list(role, 'role') self._rules['role'] = role if (wm_type is not None): if isinstance(wm_type, list): wm_type = convert_deprecated_list(wm_type, 'wm_type') self._rules['wm_type'] = wm_type def _get_property_predicate(name: str, value: Any) -> Callable[(..., bool)]: if ((name == 'net_wm_pid') or (name == 'wid')): return (lambda other: (other == value)) elif (name == 'wm_class'): def predicate(other) -> bool: match = getattr(other, 'match', (lambda v: (v == other))) return (value and any((match(v) for v in value))) return predicate else: def predicate(other) -> bool: match = getattr(other, 'match', (lambda v: (v == other))) return match(value) return predicate def compare(self, client: base.Window) -> bool: value: Any for (property_name, rule_value) in self._rules.items(): if (property_name == 'title'): value = client.name elif ('class' in property_name): wm_class = client.get_wm_class() if (not wm_class): return False if (property_name == 'wm_instance_class'): value = wm_class[0] else: value = wm_class elif (property_name == 'role'): value = client.get_wm_role() elif (property_name == 'func'): return rule_value(client) elif (property_name == 'net_wm_pid'): value = client.get_pid() elif (property_name == 'wid'): value = client.wid else: value = client.get_wm_type() if (value is None): return False match = self._get_property_predicate(property_name, value) if (not match(rule_value)): return False if (not self._rules): return False return True def map(self, callback: Callable[([base.Window], Any)], clients: list[base.Window]) -> None: for c in clients: if self.compare(c): callback(c) def __repr__(self) -> str: return ('<Match %s>' % self._rules)
def fmcw_rx(): angle = np.arange((- 90), 91, 1) pattern = ((20 * np.log10((np.cos(((angle / 180) * np.pi)) + 0.01))) + 6) rx_channel = {'location': (0, 0, 0), 'azimuth_angle': angle, 'azimuth_pattern': pattern, 'elevation_angle': angle, 'elevation_pattern': pattern} return Receiver(fs=2000000.0, noise_figure=12, rf_gain=20, load_resistor=500, baseband_gain=30, channels=[rx_channel])
class MultiReg(ScrimsButton): def __init__(self, ctx: Context, letter: str): super().__init__(emoji=ri(letter)) self.ctx = ctx async def callback(self, interaction: Interaction): (await interaction.response.defer()) self.view.record.multiregister = (not self.view.record.multiregister) (await self.ctx.success(f"Now users **{('can' if self.view.record.multiregister else 'can not')}** register more than once.", 3)) (await self.view.refresh_view()) (await self.view.record.confirm_all_scrims(self.ctx, multiregister=self.view.record.multiregister))
class TestSimpleModule(): (autouse=True, scope='class') def built(self, builder): builder('pyexample', warningiserror=True, confoverrides={'exclude_patterns': ['manualapi.rst']}) def test_integration(self, parse): self.check_integration(parse, '_build/html/autoapi/example/index.html') index_file = parse('_build/html/index.html') toctree = index_file.select('li > a') assert any(((item.text == 'API Reference') for item in toctree)) def check_integration(self, parse, example_path): example_file = parse(example_path) foo_sig = example_file.find(id='example.Foo') assert foo_sig assert (foo_sig.find(class_='sig-name').text == 'Foo') assert (foo_sig.find(class_='sig-param').text == 'attr') foo = foo_sig.parent assert foo.find(id='example.Foo.Meta') attr2 = foo.find(id='example.Foo.attr2') assert ('attr2' in attr2.text) assert attr2.parent.find('dd').text.startswith('This is the docstring of an instance attribute.') method_okay = foo.find(id='example.Foo.method_okay') assert method_okay args = method_okay.find_all(class_='sig-param') assert (len(args) == 2) assert (args[0].text == 'foo=None') assert (args[1].text == 'bar=None') method_multiline = foo.find(id='example.Foo.method_multiline') assert method_multiline args = method_multiline.find_all(class_='sig-param') assert (len(args) == 3) assert (args[0].text == 'foo=None') assert (args[1].text == 'bar=None') assert (args[2].text == 'baz=None') method_tricky = foo.find(id='example.Foo.method_tricky') assert method_tricky args = method_tricky.find_all(class_='sig-param') assert (len(args) == 2) assert (args[0].text == 'foo=None') assert (args[1].text == 'bar=dict(foo=1, bar=2)') init_args = foo.parent.find_next(class_='field-list') assert ('Set an attribute' in init_args.text) assert (len(foo_sig.find_all(class_='sig-param')) == 1) property_simple = foo.find(id='example.Foo.property_simple') assert property_simple assert (property_simple.parent.find('dd').text.strip() == 'This property should parse okay.') bar_method_okay = example_file.find(id='example.Bar.method_okay') assert (bar_method_okay.parent.find('dd').text.strip() == 'This method should parse okay') assert (not os.path.exists('_build/html/autoapi/method_multiline')) two = example_file.find(id='example.Two') assert (two.parent.find('dd').text.count('One __init__') == 1) a_tuple = example_file.find(id='example.A_TUPLE') assert a_tuple.find(class_='property').text.endswith("('a', 'b')") a_list = example_file.find(id='example.A_LIST') assert a_list.find(class_='property').text.endswith("['c', 'd']") assert ('dinglebop' not in example_file.text) index_file = parse('_build/html/index.html') toctree = index_file.select('li > a') assert any(((item.text == 'Foo') for item in toctree)) assert any(((item.text == 'Foo.Meta') for item in toctree)) def test_napoleon_integration_not_loaded(self, parse): example_file = parse('_build/html/autoapi/example/index.html') method_google = example_file.find(id='example.Foo.method_google_docs') assert ('Args' in method_google.parent.find('dd').text) assert ('Returns' in method_google.parent.find('dd').text) def test_show_inheritance(self, parse): example_file = parse('_build/html/autoapi/example/index.html') foo = example_file.find(id='example.Foo') foo_docstring = foo.parent.find('dd').contents[0] assert foo_docstring.text.startswith('Bases:') def test_long_signature(self, parse): example_file = parse('_build/html/autoapi/example/index.html') summary_row = example_file.find_all(class_='autosummary')[1].find_all('tr')[(- 1)] assert summary_row cells = summary_row.find_all('td') assert (cells[0].text.replace('\xa0', ' ') == 'fn_with_long_sig(this, *[, function, has, quite])') assert (cells[1].text.strip() == 'A function with a long signature.')
class MaxPoolingAggregator(Layer): def __init__(self, input_dim, output_dim, model_size='small', neigh_input_dim=None, dropout=0.0, bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs): super(MaxPoolingAggregator, self).__init__(**kwargs) self.dropout = dropout self.bias = bias self.act = act self.concat = concat if (neigh_input_dim is None): neigh_input_dim = input_dim if (name is not None): name = ('/' + name) else: name = '' if (model_size == 'small'): hidden_dim = self.hidden_dim = 512 elif (model_size == 'big'): hidden_dim = self.hidden_dim = 1024 self.mlp_layers = [] self.mlp_layers.append(Dense(input_dim=neigh_input_dim, output_dim=hidden_dim, act=tf.nn.relu, dropout=dropout, sparse_inputs=False, logging=self.logging)) with tf.variable_scope(((self.name + name) + '_vars')): self.vars['neigh_weights'] = glorot([hidden_dim, output_dim], name='neigh_weights') self.vars['self_weights'] = glorot([input_dim, output_dim], name='self_weights') if self.bias: self.vars['bias'] = zeros([self.output_dim], name='bias') if self.logging: self._log_vars() self.input_dim = input_dim self.output_dim = output_dim self.neigh_input_dim = neigh_input_dim print('max pooling dimension {} and {}, {}'.format(self.input_dim, self.output_dim, self.concat)) def _call(self, inputs): (self_vecs, neigh_vecs) = inputs neigh_h = neigh_vecs dims = tf.shape(neigh_h) batch_size = dims[0] num_neighbors = dims[1] h_reshaped = tf.reshape(neigh_h, ((batch_size * num_neighbors), self.neigh_input_dim)) for l in self.mlp_layers: h_reshaped = l(h_reshaped) neigh_h = tf.reshape(h_reshaped, (batch_size, num_neighbors, self.hidden_dim)) neigh_h = tf.reduce_max(neigh_h, axis=1) from_neighs = tf.matmul(neigh_h, self.vars['neigh_weights']) from_self = tf.matmul(self_vecs, self.vars['self_weights']) if (not self.concat): output = tf.add_n([from_self, from_neighs]) else: output = tf.concat([from_self, from_neighs], axis=1) if self.bias: output += self.vars['bias'] return self.act(output)
def L3(mu, C, r, m, n): total_sum = 0 vals = [] for i in range(m): numer = sum([(C[i][j] * mu[j]) for j in range(n)]) denom = sum([(C[h][j] * mu[j]) for j in range(n) for h in range(m)]) total_sum += (r[i] * numpy.log((numer / denom))) vals.append((numer / denom)) return ((- total_sum), vals)
def is_staging_test(test_case): if (not _run_staging): return unittest.skip('test is staging test')(test_case) else: try: import pytest except ImportError: return test_case else: return pytest.mark.is_staging_test()(test_case)
_if_nothing_inferred def instance_class_infer_binary_op(self: nodes.ClassDef, opnode: (nodes.AugAssign | nodes.BinOp), operator: str, other: InferenceResult, context: InferenceContext, method: SuccessfulInferenceResult) -> Generator[(InferenceResult, None, None)]: return method.infer_call_result(self, context)
class BNInception(nn.Module): def __init__(self, channels, init_block_channels_list, mid1_channels_list, mid2_channels_list, bias=True, use_bn=True, in_channels=3, in_size=(224, 224), num_classes=1000): super(BNInception, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module('init_block', StemBlock(in_channels=in_channels, out_channels=init_block_channels_list[1], mid_channels=init_block_channels_list[0], bias=bias, use_bn=use_bn)) in_channels = init_block_channels_list[(- 1)] for (i, channels_per_stage) in enumerate(channels): mid1_channels_list_i = mid1_channels_list[i] mid2_channels_list_i = mid2_channels_list[i] stage = nn.Sequential() for (j, out_channels) in enumerate(channels_per_stage): if ((j == 0) and (i != 0)): stage.add_module('unit{}'.format((j + 1)), ReductionBlock(in_channels=in_channels, mid1_channels_list=mid1_channels_list_i[j], mid2_channels_list=mid2_channels_list_i[j], bias=bias, use_bn=use_bn)) else: avg_pool = ((i != (len(channels) - 1)) or (j != (len(channels_per_stage) - 1))) stage.add_module('unit{}'.format((j + 1)), InceptionBlock(in_channels=in_channels, mid1_channels_list=mid1_channels_list_i[j], mid2_channels_list=mid2_channels_list_i[j], avg_pool=avg_pool, bias=bias, use_bn=use_bn)) in_channels = out_channels self.features.add_module('stage{}'.format((i + 1)), stage) self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=7, stride=1)) self.output = nn.Linear(in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for (name, module) in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if (module.bias is not None): init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), (- 1)) x = self.output(x) return x
class IterDataPipeQueueProtocolClient(ProtocolClient): def request_reset_epoch(self, seed_generator, iter_reset_fn): if (not self.can_take_request()): raise Exception('Can not reset while we are still waiting response for previous request') request = communication.messages.ResetEpochRequest(seed_generator, iter_reset_fn) self.request_queue.put(request) self.request_sent(request) def request_next(self): if (not self.can_take_request()): raise Exception('Can not request next item while we are still waiting response for previous request') request = communication.messages.GetNextRequest() self.request_queue.put(request) self.request_sent(request) def get_response_reset_epoch(self, block=False): try: response = self.response_queue.get(block=block) except EmptyException: raise EmptyQueue('queue is empty') self.request_served(response) if (not isinstance(response, communication.messages.ResetEpochResponse)): raise Exception('Invalid response received') def get_response_limit(self, block=False): try: response = self.response_queue.get(block=block) except EmptyException: raise EmptyQueue('queue is empty') self.request_served(response) if (not isinstance(response, communication.messages.LimitResponse)): raise Exception('Invalid response received when expecting `LimitResponse`') def get_response_pause(self, block=False): try: response = self.response_queue.get(block=block) except EmptyException: raise EmptyQueue('queue is empty') self.request_served(response) if (not isinstance(response, communication.messages.PauseResponse)): raise Exception('Invalid response received when expecting `PauseResponse`') def get_response_resume(self, block=False): try: response = self.response_queue.get(block=block) except EmptyException: raise EmptyQueue('queue is empty') self.request_served(response) if (not isinstance(response, communication.messages.ResumeResponse)): raise Exception('Invalid response received when expecting `ResumeResponse`') def get_response_next(self, block=False, timeout=None): if (not self.waiting_for_response()): raise Exception('Can not expect any response without submitted request') try: response = self.response_queue.get(block=block, timeout=timeout) except EmptyException: raise EmptyQueue('queue is empty') self.request_served(response) return response
class BridgeTowerProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] image_processor_class = 'BridgeTowerImageProcessor' tokenizer_class = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) def __call__(self, images, text: Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[(str, TensorType)]]=None, **kwargs) -> BatchEncoding: encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs) encoding_image_processor = self.image_processor(images, return_tensors=return_tensors, do_normalize=True, do_center_crop=True, **kwargs) encoding.update(encoding_image_processor) return encoding def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys((tokenizer_input_names + image_processor_input_names)))
_fixtures(WebFixture) def test_populating(web_fixture): item_specs = [Bookmark('/', '/href1', 'description1'), Bookmark('/', '/go_to_href', 'description2')] menu = Nav(web_fixture.view).with_bookmarks(item_specs) tester = WidgetTester(menu) [item1, item2] = menu.menu_items assert (item1.a.href.path == '/href1') assert (item1.a.children[0].value == 'description1') assert (item2.a.href.path == '/go_to_href') assert (item2.a.children[0].value == 'description2') a_list = [A.from_bookmark(web_fixture.view, i) for i in item_specs] menu = Nav(web_fixture.view).with_a_list(a_list) [item1, item2] = menu.menu_items assert (item1.a is a_list[0]) assert (item2.a is a_list[1])
def _override_input_dist_forwards(pipelined_modules: List[ShardedModule]) -> None: for module in pipelined_modules: for (child_fqn, child_module) in module.named_modules(): if hasattr(child_module, '_has_uninitialized_input_dist'): assert (not child_module._has_uninitialized_input_dist), f'{child_fqn} has uninitialized input dist' if (not hasattr(child_module, '_input_dists')): continue for input_dist in child_module._input_dists: if hasattr(input_dist, '_dist'): assert isinstance(input_dist._dist, KJTAllToAll) input_dist._dist.forward = KJTAllToAllForward(pg=input_dist._dist._pg, splits=input_dist._dist._splits, stagger=input_dist._dist._stagger)
class SigmaPoint(object): def __init__(self, sensor, down=False): self.sensor = sensor self.down = down self.count = 1 self.time = time.monotonic() def add_measurement(self, sensor, down): self.count += 1 fac = max((1 / self.count), 0.01) self.sensor = avg(fac, self.sensor, sensor) if down: self.down = avg(fac, self.down, down) self.time = time.monotonic()
class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.val = None def update(self, val): if (self.val is None): self.val = val else: self.val = ((self.val * 0.9) + (val * 0.1)) def __call__(self): return self.val
def Myrm(dirstring): root_rp = rpath.RPath(Globals.local_connection, dirstring) for rp in selection.Select(root_rp).get_select_iter(): if rp.isdir(): rp.chmod(448) elif rp.isreg(): rp.chmod(384) path = root_rp.path if os.path.isdir(path): shutil.rmtree(path) elif os.path.isfile(path): os.remove(path)
def handle_long_project_repeating_form_request(**kwargs) -> Any: headers = kwargs['headers'] data = kwargs['data'] resp = None if ('data' in data): repeat_forms = json.loads(data['data'][0]) resp = len(repeat_forms) else: resp = [{'form_name': 'testform', 'custom_form_label': ''}] return (201, headers, json.dumps(resp))
def write_and_fudge_mtime(content: str, target_path: str) -> None: new_time = None if os.path.isfile(target_path): new_time = (os.stat(target_path).st_mtime + 1) dir = os.path.dirname(target_path) os.makedirs(dir, exist_ok=True) with open(target_path, 'w', encoding='utf-8') as target: target.write(content) if new_time: os.utime(target_path, times=(new_time, new_time))
class Var(): def __init__(self, label, type, requires_grad=False, constant=None): if (type == float): type = float32 elif (type == int): type = int32 self.label = label self.type = type self.requires_grad = requires_grad self.constant = constant def __str__(self): return self.label def ctype(self): if isinstance(self.type, array): return f'array_t<{str(self.type.dtype.__name__)}>' if isinstance(self.type, Struct): return self.type.cls.__name__ else: return str(self.type.__name__)
.parametrize('meth', [pytest.param('signal', marks=have_sigalrm), 'thread']) .parametrize('scope', ['function', 'class', 'module', 'session']) def test_fix_finalizer(meth, scope, testdir): testdir.makepyfile("\n import time, pytest\n\n class TestFoo:\n\n \n def fix(self, request):\n print('fix setup')\n def fin():\n print('fix finaliser')\n time.sleep(2)\n request.addfinalizer(fin)\n\n def test_foo(self, fix):\n pass\n ") result = testdir.runpytest('--timeout=1', '-s', f'--timeout-method={meth}') assert (result.ret > 0) assert ('Timeout' in (result.stdout.str() + result.stderr.str()))
class QuoSocket(socketio.AsyncClient): bot: Quotient def __init__(self, **kwargs): super().__init__(**kwargs) async def emit(self, event, data=None, namespace=None, callback=None): return (await super().emit(('response__' + event), data=data, namespace=namespace, callback=callback)) async def request(self, event, data=None, namespace=None, callback=None): return (await super().emit(event, data=data, namespace=namespace, callback=callback)) def int_parse(data): if (not isinstance(data, dict)): return data for (x, y) in data.items(): if (isinstance(y, str) and y.isdigit()): data[x] = int(y) return data
def cmdutils_stub(monkeypatch, stubs): return monkeypatch.setattr(objects, 'commands', {'quit': stubs.FakeCommand(name='quit', desc='quit qutebrowser'), 'open': stubs.FakeCommand(name='open', desc='open a url'), 'prompt-yes': stubs.FakeCommand(name='prompt-yes', deprecated=True), 'scroll': stubs.FakeCommand(name='scroll', desc='Scroll the current tab in the given direction.', modes=()), 'tab-close': stubs.FakeCommand(name='tab-close', desc='Close the current tab.')})
def test_vectorize_test(): a = np.random.random((5, 5)) b = np.random.random((4, 4)) c = np.random.random((3, 3)) at = Tensor(tensor=a, name='a') bt = Tensor(tensor=b, name='b') ct = Tensor(tensor=c, name='c') mt = MultiTensor([at, bt, ct]) vec = np.vstack((at.vectorize(), bt.vectorize())) vec = np.vstack((vec, ct.vectorize())) assert np.allclose(vec, mt.vectorize_tensors()) a = np.random.random((5, 5, 5, 5)) b = np.random.random((4, 4, 4)) c = np.random.random((3, 3)) at = Tensor(tensor=a, name='a') bt = Tensor(tensor=b, name='b') ct = Tensor(tensor=c, name='c') mt = MultiTensor([at, bt, ct]) vec = np.vstack((at.vectorize(), bt.vectorize())) vec = np.vstack((vec, ct.vectorize())) assert np.allclose(vec, mt.vectorize_tensors())
class scan(object): def __init__(self, job, timeout=None): for field in self.get_data_fields(): setattr(self, field, '') setattr(self, 'success', False) self.job = job[0] if (len(job) > 1): self.target = job[1] self.scan_type = _whats_your_name() self.multiple_responses = False def recall_data(self, old_data_dict): for key in old_data_dict.keys(): setattr(self, key, old_data_dict[key]) def execute_scan(self, target, verbose): pass def return_data(self): data_dict = {} for field in self.get_raw_fields(): data_dict[field] = getattr(self, field, '') try: data_dict['sort_field'] = util.ip.get_int_from_ip(self.target) except: data_dict['sort_field'] = self.target return data_dict def get_data_fields(self): return [] def get_raw_fields(self): return [] def gettunnel(self, target, protocol, target_port): if (not protocol.startswith('-')): protocol = ('-' + protocol) redir_cmd = None success = False max_attempts = 5 for i in range(0, max_attempts): redir_cmd = ops.networking.redirect.generate_tunnel_cmd(arg_list=[protocol, '-target', target, target_port, '-lplisten'], random=True) redir_output = ops.networking.redirect.start_tunnel(dsz_cmd=redir_cmd) if ((redir_output is not False) and (type(redir_output) is int)): return redir_cmd return False def find_newest_touch(self, touch_name, touch_ext, touch_type='touches'): list_files = glob.glob(os.path.join(ops.DSZDISKSDIR, touch_type, ('%s-*.%s' % (touch_name, touch_ext)))) newest_file = ('%s-0.0.0.%s' % (touch_name, touch_ext)) for this_file in list_files: this_split = os.path.basename(this_file).split('-')[1].split('.')[:(- 1)] newest_split = os.path.basename(newest_file).split('-')[1].split('.')[:(- 1)] for i in range(0, len(this_split)): if (int(this_split[i]) > int(newest_split[i])): newest_file = this_file break return newest_file def search_project_data(self, macs=None, hostname=None): candidate = None macslist = [] if (macs is not None): for mac in macs: macslist.append(mac.lower()) if (hostname is not None): hostname = hostname.lower() try: candidate = ops.project.matchTarget(macs=macslist, hostname=hostname) except ops.project.MultipleTargetIDException: return 'Multiple possible' if (candidate is None): return '' if ((type(candidate) == type([])) and (len(candidate) > 1)): return 'Multiple possible' if ((type(candidate) == type([])) and (len(candidate) == 0)): return '' return candidate[(- 1)]['target'].crypto_guid def min_time(self): return 5 def min_range(self): return 5
class Attribute(MPTTModel): uri = models.URLField(max_length=640, blank=True, verbose_name=_('URI'), help_text=_('The Uniform Resource Identifier of this attribute (auto-generated).')) uri_prefix = models.URLField(max_length=256, verbose_name=_('URI Prefix'), help_text=_('The prefix for the URI of this attribute.')) key = models.SlugField(max_length=128, blank=True, verbose_name=_('Key'), help_text=_('The internal identifier of this attribute.')) path = models.CharField(max_length=512, db_index=True, verbose_name=_('Path'), help_text=_('The path part of the URI of this attribute (auto-generated).')) comment = models.TextField(blank=True, verbose_name=_('Comment'), help_text=_('Additional information about this attribute.')) locked = models.BooleanField(default=False, verbose_name=_('Locked'), help_text=_('Designates whether this attribute (and its descendants) can be changed.')) editors = models.ManyToManyField(Site, related_name='attributes_as_editor', blank=True, verbose_name=_('Editors'), help_text=_('The sites that can edit this attribute (in a multi site setup).')) parent = TreeForeignKey('self', null=True, blank=True, on_delete=models.CASCADE, related_name='children', db_index=True, verbose_name=_('Parent attribute'), help_text=_('Parent attribute in the domain model.')) class Meta(): ordering = ('uri',) verbose_name = _('Attribute') verbose_name_plural = _('Attributes') def __str__(self): return self.path def save(self, *args, **kwargs): self.path = self.build_path(self.key, self.parent) self.uri = self.build_uri(self.uri_prefix, self.path) super().save(*args, **kwargs) for child in self.children.all(): child.save() def is_locked(self): return self.get_ancestors(include_self=True).filter(locked=True).exists() def build_path(cls, key, parent): assert key path = key while parent: path = ((parent.key + '/') + path) parent = parent.parent return path def build_uri(cls, uri_prefix, path): assert path return join_url((uri_prefix or settings.DEFAULT_URI_PREFIX), '/domain/', path)
def test_self_reference_infer_does_not_trigger_recursion_error() -> None: code = "\n def func(elems):\n return elems\n\n class BaseModel(object):\n\n def __init__(self, *args, **kwargs):\n self._reference = func(*self._reference.split('.'))\n BaseModel()._reference\n " node = extract_node(code) inferred = next(node.infer()) assert (inferred is util.Uninferable)
class Worker(): def __init__(self, target: typing.Callable, timeout: int=1) -> None: self.target = target self.timeout = timeout (self.conn_sender, self.conn_receiver) = multiprocessing.Pipe() self.worker = multiprocessing.Process(target=self.run_worker, args=(target, self.conn_receiver)) self.worker.start() self.busy = False def run_worker(self, function, conn_receiver): while True: try: if conn_receiver.poll(self.timeout): data = conn_receiver.recv() if (data is not None): if (data == 'stop'): break else: result = function(data) conn_receiver.send(result) data = None except: pass conn_receiver.send('stop') def send(self, data): if self.busy: return None self.busy = True self.data = data self.conn_sender.send(data) return self def get(self): if self.conn_sender.poll(self.timeout): results = self.conn_sender.recv() self.busy = False return results else: self.busy = True self.conn_sender.send(self.data) return self def __exit__(self): while True: if self.busy: continue self.conn_sender.send('stop') stop = self.conn_sender.recv() if (stop == 'stop'): self.worker.join() self.worker.terminate() break
class NeuralTSDiag(): def __init__(self, input_dim, lamdba=1, nu=1, style='ucb', init_x=None, init_y=None, diagonalize=True): self.diagonalize = diagonalize torch.manual_seed(0) torch.cuda.manual_seed(0) self.func = extend(Network(input_dim).to(**tkwargs)) self.init_state_dict = deepcopy(self.func.state_dict()) if (init_x is not None): self.context_list = init_x.to(dtype=torch.float32) else: self.context_list = None if (init_y is not None): self.reward = init_y.to(dtype=torch.float32) else: self.reward = None self.len = 0 self.lamdba = lamdba self.total_param = sum((p.numel() for p in self.func.parameters() if p.requires_grad)) if self.diagonalize: self.U = (lamdba * torch.ones((self.total_param,))) else: self.U = (lamdba * torch.diag(torch.ones((self.total_param,)))) self.nu = nu self.style = style self.loss_func = nn.MSELoss() self.mean = None self.std = None def select(self, context, batch_size=300): if (self.mean is not None): context_ = ((context - self.mean) / self.std) else: context_ = context context_size = context_.shape[0] n_batchs = ((context_size // batch_size) + int(((context_size % batch_size) != 0))) g_list = [] mu = [] for i in range(n_batchs): if (i == (n_batchs - 1)): context_batch = context_[(i * batch_size):] else: context_batch = context_[(i * batch_size):((i + 1) * batch_size)] mu_ = self.func(context_batch) sum_mu = torch.sum(mu_) with backpack(BatchGrad()): sum_mu.backward() g_list_ = torch.cat([p.grad_batch.flatten(start_dim=1).detach() for p in self.func.parameters()], dim=1) g_list.append(g_list_.cpu()) mu.append(mu_.cpu()) g_list = torch.vstack(g_list) mu = torch.vstack(mu) if self.diagonalize: sigma = torch.sqrt(torch.sum(((((self.lamdba * self.nu) * g_list) * g_list) / self.U), dim=1)) else: tmp = torch.matmul(g_list, torch.inverse(self.U)) sigma = torch.sqrt(((self.nu * self.lamdba) * torch.matmul(tmp, torch.transpose(g_list, 0, 1)))) sigma = torch.diagonal(sigma, 0) if (self.style == 'ts'): sample_r = torch.normal(mu.view((- 1)), sigma.view((- 1))) elif (self.style == 'ucb'): sample_r = (mu.view((- 1)) + sigma.view((- 1))) arm = torch.argmax(sample_r) if self.diagonalize: self.U += (g_list[arm] * g_list[arm]) else: self.U += torch.outer(g_list[arm], g_list[arm]) return (arm, g_list[arm].norm().item()) def train(self, context, reward, local_training_iter=30): if (self.init_state_dict is not None): self.func.load_state_dict(deepcopy(self.init_state_dict)) if (context is not None): if (self.context_list is None): self.context_list = torch.from_numpy(context.reshape(1, (- 1))).to(**tkwargs) self.reward = torch.tensor([reward]).to(**tkwargs) else: self.context_list = torch.cat((self.context_list, context.reshape(1, (- 1)).to(**tkwargs))) self.reward = torch.cat((self.reward, torch.tensor([reward]).reshape(1, (- 1)).to(**tkwargs))) self.len = self.context_list.shape[0] optimizer = torch.optim.Adam(self.func.parameters(), lr=0.001, weight_decay=(self.lamdba / self.len)) self.std = (self.context_list.std(dim=0) + 1e-30) self.mean = self.context_list.mean(dim=0) standardized_context = ((self.context_list - self.mean) / self.std) standardized_reward = self.reward.reshape((- 1)) for _ in range(local_training_iter): self.func.zero_grad() optimizer.zero_grad() pred = self.func(standardized_context).view((- 1)) loss = self.loss_func(pred, standardized_reward) loss.backward() optimizer.step() print('Training Loss : ', loss.item()) return self.func.state_dict()
def parse_options(): parser = argparse.ArgumentParser(description='Install SMT Solvers.\n\nThis script installs the solvers specified on the command line or in the environment variable PYSMT_SOLVER if not already instaled on the system.') parser.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=pysmt_version)) for i in INSTALLERS: name = i.InstallerClass.SOLVER parser.add_argument(('--%s' % name), dest=name, action='store_true', default=False, help=('Install %s' % name)) parser.add_argument('--all', dest='all_solvers', action='store_true', default=False, help='Install all the solvers') parser.add_argument('--force', dest='force_redo', action='store_true', default=False, help='Forcedly rebuild the solvers even if already found') parser.add_argument('--check', dest='check', action='store_true', default=False, help='Checks the installation of the solvers') parser.add_argument('--env', dest='env', action='store_true', default=False, help='Prints a bash export command to extend the PYTHONPATH') parser.add_argument('--powershell', dest='powershell', action='store_true', default=False, help='In combination with --env under windows, prints the commands in powershell format') parser.add_argument('--confirm-agreement', dest='skip_intro', action='store_true', default=False, help='Confirm that you agree with the licenses of the solvers and skip the interactive question') install_path_default = os.path.join('~', '.smt_solvers') parser.add_argument('--install-path', dest='install_path', type=str, default=install_path_default, help='The folder to use for the installation (defaults to: {!r})'.format(install_path_default)) py_bindings = solver_install_site(plat_specific=True) parser.add_argument('--bindings-path', dest='bindings_path', type=str, default=py_bindings, help='The folder to use for the bindings (defaults to the relevant site-packages directory: {!r})'.format(py_bindings)) options = parser.parse_args() return options
def _check_multi_threading_and_problem_type(problem_type, **kwargs): if (not isinstance(problem_type, SocpType.COLLOCATION)): if ('n_thread' in kwargs): if (kwargs['n_thread'] != 1): raise ValueError('Multi-threading is not possible yet while solving a trapezoidal stochastic ocp.n_thread is set to 1 by default.')
class BLCBatchNorm(nn.BatchNorm1d): def forward(self, x): if (x.dim() == 2): return super().forward(x) if (x.dim() == 3): x = rearrange(x, 'B L C -> B C L') x = super().forward(x) x = rearrange(x, 'B C L -> B L C') return x raise ValueError('Only 2d or 3d tensors are supported')
class RealmTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) if (not os.path.isfile(vocab_file)): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = RealmTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) def do_lower_case(self): return self.basic_tokenizer.do_lower_case def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): if (token in self.basic_tokenizer.never_split): split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def batch_encode_candidates(self, text, **kwargs): kwargs['padding'] = PaddingStrategy.MAX_LENGTH batch_text = text batch_text_pair = kwargs.pop('text_pair', None) return_tensors = kwargs.pop('return_tensors', None) output_data = {'input_ids': [], 'attention_mask': [], 'token_type_ids': []} for (idx, candidate_text) in enumerate(batch_text): if (batch_text_pair is not None): candidate_text_pair = batch_text_pair[idx] else: candidate_text_pair = None encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs) encoded_input_ids = encoded_candidates.get('input_ids') encoded_attention_mask = encoded_candidates.get('attention_mask') encoded_token_type_ids = encoded_candidates.get('token_type_ids') if (encoded_input_ids is not None): output_data['input_ids'].append(encoded_input_ids) if (encoded_attention_mask is not None): output_data['attention_mask'].append(encoded_attention_mask) if (encoded_token_type_ids is not None): output_data['token_type_ids'].append(encoded_token_type_ids) output_data = {key: item for (key, item) in output_data.items() if (len(item) != 0)} return BatchEncoding(output_data, tensor_type=return_tensors) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if (token_ids_1 is None): return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id]) cls = [self.cls_token_id] sep = [self.sep_token_id] return ((((cls + token_ids_0) + sep) + token_ids_1) + sep) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if (token_ids_1 is not None): return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1]) return (([1] + ([0] * len(token_ids_0))) + [1]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1])) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) else: vocab_file = (((filename_prefix + '-') if filename_prefix else '') + save_directory) with open(vocab_file, 'w', encoding='utf-8') as writer: for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])): if (index != token_index): logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write((token + '\n')) index += 1 return (vocab_file,)
def test_softplus(): def softplus(x): return np.log((np.ones_like(x) + np.exp(x))) x = K.placeholder(ndim=2) f = K.function([x], [activations.softplus(x)]) test_values = get_standard_values() result = f([test_values])[0] expected = softplus(test_values) assert_allclose(result, expected, rtol=1e-05)
def test_window_transform(): with rasterio.open('tests/data/RGB.byte.tif') as src: assert (src.window_transform(((0, None), (0, None))) == src.transform) assert (src.window_transform(((None, None), (None, None))) == src.transform) assert (src.window_transform(((1, None), (1, None))).c == (src.bounds.left + src.res[0])) assert (src.window_transform(((1, None), (1, None))).f == (src.bounds.top - src.res[1])) assert (src.window_transform((((- 1), None), ((- 1), None))).c == (src.bounds.left - src.res[0])) assert (src.window_transform((((- 1), None), ((- 1), None))).f == (src.bounds.top + src.res[1]))
def bmshj2018_factorized(quality, metric='mse', pretrained=False, progress=True, **kwargs): if (metric not in ('mse', 'ms-ssim')): raise ValueError(f'Invalid metric "{metric}"') if ((quality < 1) or (quality > 8)): raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)') return _load_model('bmshj2018-factorized', metric, quality, pretrained, progress, **kwargs)
(allow_output_mutation=True, suppress_st_warning=True) def get_cached_mosaiq_connection_in_dict(hostname: str, port: int=1433, database: str='MOSAIQ', alias=None) -> Dict[(Literal['connection'], _connect.Connection)]: return {'connection': get_uncached_mosaiq_connection(hostname=hostname, port=port, database=database, alias=alias)}
class SyncStateControl(ResponseControl): controlType = '1.3.6.1.4.1.4203.1.9.1.2' opnames = ('present', 'add', 'modify', 'delete') def decodeControlValue(self, encodedControlValue): d = decoder.decode(encodedControlValue, asn1Spec=SyncStateValue()) state = d[0].getComponentByName('state') uuid = UUID(bytes=bytes(d[0].getComponentByName('entryUUID'))) cookie = d[0].getComponentByName('cookie') if ((cookie is not None) and cookie.hasValue()): self.cookie = str(cookie) else: self.cookie = None self.state = self.__class__.opnames[int(state)] self.entryUUID = str(uuid)
class BitStage(nn.Module): def __init__(self, config, in_channels, out_channels, stride, dilation, depth, bottle_ratio=0.25, layer_dropout=None): super().__init__() first_dilation = (1 if (dilation in (1, 2)) else 2) if (config.layer_type == 'bottleneck'): layer_cls = BitBottleneckLayer else: layer_cls = BitPreActivationBottleneckLayer prev_chs = in_channels self.layers = nn.Sequential() for layer_idx in range(depth): (stride, drop_path_rate, is_first_layer) = self._get_updated_hyperparameters(layer_idx, stride, layer_dropout) self.layers.add_module(str(layer_idx), layer_cls(config, prev_chs, out_channels, stride=stride, dilation=dilation, bottle_ratio=bottle_ratio, first_dilation=first_dilation, drop_path_rate=drop_path_rate, is_first_layer=is_first_layer)) prev_chs = out_channels first_dilation = dilation def _get_updated_hyperparameters(self, layer_idx, stride, layer_dropout): if layer_dropout: drop_path_rate = layer_dropout[layer_idx] else: drop_path_rate = 0.0 if (layer_idx != 0): stride = 1 is_first_layer = (layer_idx == 0) return (stride, drop_path_rate, is_first_layer) def forward(self, input: Tensor) -> Tensor: hidden_state = input for (_, layer) in enumerate(self.layers): hidden_state = layer(hidden_state) return hidden_state
def break_long_words(data): if verbose: print(('#' * 10), 'Step - Break long words:') temp_vocab = list(set([c for line in data for c in line.split()])) temp_vocab = [k for k in temp_vocab if _check_replace(k)] temp_vocab = [k for k in temp_vocab if (len(k) > 20)] temp_dict = {} for word in temp_vocab: if ('_' in word): temp_dict[word] = re.sub('_', ' ', word) elif ('/' in word): temp_dict[word] = re.sub('/', ' / ', word) elif (len(' '.join(word.split('-')).split()) > 2): temp_dict[word] = re.sub('-', ' ', word) data = list(map((lambda x: ' '.join([_make_dict_cleaning(i, temp_dict) for i in x.split()])), data)) if verbose: _print_dict(temp_dict) return data
(Sponsorship) class SponsorshipAdmin(ImportExportActionModelAdmin, admin.ModelAdmin): change_form_template = 'sponsors/admin/sponsorship_change_form.html' form = SponsorshipReviewAdminForm inlines = [SponsorBenefitInline, AssetsInline] search_fields = ['sponsor__name'] list_display = ['sponsor', 'status', 'package', 'year', 'applied_on', 'approved_on', 'start_date', 'end_date'] list_filter = [SponsorshipStatusListFilter, 'package', 'year', TargetableEmailBenefitsFilter] actions = ['send_notifications'] resource_class = SponsorshipResource fieldsets = [('Sponsorship Data', {'fields': ('for_modified_package', 'sponsor_link', 'status', 'package', 'sponsorship_fee', 'year', 'get_estimated_cost', 'start_date', 'end_date', 'get_contract', 'level_name', 'renewal', 'overlapped_by')}), ('Sponsor Detailed Information', {'fields': ('get_sponsor_name', 'get_sponsor_description', 'get_sponsor_landing_page_url', 'get_sponsor_web_logo', 'get_sponsor_print_logo', 'get_sponsor_primary_phone', 'get_sponsor_mailing_address', 'get_sponsor_contacts')}), ('User Customizations', {'fields': ('get_custom_benefits_added_by_user', 'get_custom_benefits_removed_by_user'), 'classes': ['collapse']}), ('Events dates', {'fields': ('applied_on', 'approved_on', 'rejected_on', 'finalized_on'), 'classes': ['collapse']})] def get_fieldsets(self, request, obj=None): fieldsets = [] for (title, cfg) in super().get_fieldsets(request, obj): if ((title == 'User Customizations') and obj): if (obj.user_customizations['added_by_user'] or obj.user_customizations['removed_by_user']): cfg['classes'] = [] fieldsets.append((title, cfg)) return fieldsets def get_queryset(self, *args, **kwargs): qs = super().get_queryset(*args, **kwargs) return qs.select_related('sponsor', 'package', 'submited_by') def send_notifications(self, request, queryset): return views_admin.send_sponsorship_notifications_action(self, request, queryset) send_notifications.short_description = 'Send notifications to selected' def get_readonly_fields(self, request, obj): readonly_fields = ['for_modified_package', 'sponsor_link', 'status', 'applied_on', 'rejected_on', 'approved_on', 'finalized_on', 'level_name', 'get_estimated_cost', 'get_sponsor_name', 'get_sponsor_description', 'get_sponsor_landing_page_url', 'get_sponsor_web_logo', 'get_sponsor_print_logo', 'get_sponsor_primary_phone', 'get_sponsor_mailing_address', 'get_sponsor_contacts', 'get_contract', 'get_added_by_user', 'get_custom_benefits_added_by_user', 'get_custom_benefits_removed_by_user'] if (obj and (not obj.open_for_editing)): extra = ['start_date', 'end_date', 'package', 'level_name', 'sponsorship_fee'] readonly_fields.extend(extra) if obj.year: readonly_fields.append('year') return readonly_fields def sponsor_link(self, obj): url = reverse('admin:sponsors_sponsor_change', args=[obj.sponsor.id]) return mark_safe(f'<a href={url}>{obj.sponsor.name}</a>') sponsor_link.short_description = 'Sponsor' def get_estimated_cost(self, obj): cost = None html = "This sponsorship has not customizations so there's no estimated cost" if obj.for_modified_package: msg = "This sponsorship has customizations and this cost is a sum of all benefit's internal values from when this sponsorship was created" cost = intcomma(obj.estimated_cost) html = f'{cost} USD <br/><b>Important: </b> {msg}' return mark_safe(html) get_estimated_cost.short_description = 'Estimated cost' def get_contract(self, obj): if (not obj.contract): return '---' url = reverse('admin:sponsors_contract_change', args=[obj.contract.pk]) html = f"<a href='{url}' target='_blank'>{obj.contract}</a>" return mark_safe(html) get_contract.short_description = 'Contract' def get_urls(self): urls = super().get_urls() base_name = get_url_base_name(self.model) my_urls = [path('<int:pk>/reject', self.admin_site.admin_view(self.reject_sponsorship_view), name=f'{base_name}_reject'), path('<int:pk>/approve-existing', self.admin_site.admin_view(self.approve_signed_sponsorship_view), name=f'{base_name}_approve_existing_contract'), path('<int:pk>/approve', self.admin_site.admin_view(self.approve_sponsorship_view), name=f'{base_name}_approve'), path('<int:pk>/enable-edit', self.admin_site.admin_view(self.rollback_to_editing_view), name=f'{base_name}_rollback_to_edit'), path('<int:pk>/list-assets', self.admin_site.admin_view(self.list_uploaded_assets_view), name=f'{base_name}_list_uploaded_assets'), path('<int:pk>/unlock', self.admin_site.admin_view(self.unlock_view), name=f'{base_name}_unlock'), path('<int:pk>/lock', self.admin_site.admin_view(self.lock_view), name=f'{base_name}_lock')] return (my_urls + urls) def get_sponsor_name(self, obj): return obj.sponsor.name get_sponsor_name.short_description = 'Name' def get_sponsor_description(self, obj): return obj.sponsor.description get_sponsor_description.short_description = 'Description' def get_sponsor_landing_page_url(self, obj): return obj.sponsor.landing_page_url get_sponsor_landing_page_url.short_description = 'Landing Page URL' def get_sponsor_web_logo(self, obj): html = "{% load thumbnail %}{% thumbnail sponsor.web_logo '150x150' format='PNG' quality=100 as im %}<img src='{{ im.url}}'/>{% endthumbnail %}" template = Template(html) context = Context({'sponsor': obj.sponsor}) html = template.render(context) return mark_safe(html) get_sponsor_web_logo.short_description = 'Web Logo' def get_sponsor_print_logo(self, obj): img = obj.sponsor.print_logo html = '' if img: html = "{% load thumbnail %}{% thumbnail img '150x150' format='PNG' quality=100 as im %}<img src='{{ im.url}}'/>{% endthumbnail %}" template = Template(html) context = Context({'img': img}) html = template.render(context) return (mark_safe(html) if html else '---') get_sponsor_print_logo.short_description = 'Print Logo' def get_sponsor_primary_phone(self, obj): return obj.sponsor.primary_phone get_sponsor_primary_phone.short_description = 'Primary Phone' def get_sponsor_mailing_address(self, obj): sponsor = obj.sponsor city_row = f'{sponsor.city} - {sponsor.get_country_display()} ({sponsor.country})' if sponsor.state: city_row = f'{sponsor.city} - {sponsor.state} - {sponsor.get_country_display()} ({sponsor.country})' mail_row = sponsor.mailing_address_line_1 if sponsor.mailing_address_line_2: mail_row += f' - {sponsor.mailing_address_line_2}' html = f'<p>{city_row}</p>' html += f'<p>{mail_row}</p>' html += f'<p>{sponsor.postal_code}</p>' return mark_safe(html) get_sponsor_mailing_address.short_description = 'Mailing/Billing Address' def get_sponsor_contacts(self, obj): html = '' contacts = obj.sponsor.contacts.all() primary = [c for c in contacts if c.primary] not_primary = [c for c in contacts if (not c.primary)] if primary: html = '<b>Primary contacts</b><ul>' html += ''.join([f'<li>{c.name}: {c.email} / {c.phone}</li>' for c in primary]) html += '</ul>' if not_primary: html += '<b>Other contacts</b><ul>' html += ''.join([f'<li>{c.name}: {c.email} / {c.phone}</li>' for c in not_primary]) html += '</ul>' return mark_safe(html) get_sponsor_contacts.short_description = 'Contacts' def get_custom_benefits_added_by_user(self, obj): benefits = obj.user_customizations['added_by_user'] if (not benefits): return '---' html = ''.join([f'<p>{b}</p>' for b in benefits]) return mark_safe(html) get_custom_benefits_added_by_user.short_description = 'Added by User' def get_custom_benefits_removed_by_user(self, obj): benefits = obj.user_customizations['removed_by_user'] if (not benefits): return '---' html = ''.join([f'<p>{b}</p>' for b in benefits]) return mark_safe(html) get_custom_benefits_removed_by_user.short_description = 'Removed by User' def rollback_to_editing_view(self, request, pk): return views_admin.rollback_to_editing_view(self, request, pk) def reject_sponsorship_view(self, request, pk): return views_admin.reject_sponsorship_view(self, request, pk) def approve_sponsorship_view(self, request, pk): return views_admin.approve_sponsorship_view(self, request, pk) def approve_signed_sponsorship_view(self, request, pk): return views_admin.approve_signed_sponsorship_view(self, request, pk) def list_uploaded_assets_view(self, request, pk): return views_admin.list_uploaded_assets(self, request, pk) def unlock_view(self, request, pk): return views_admin.unlock_view(self, request, pk) def lock_view(self, request, pk): return views_admin.lock_view(self, request, pk)
def test_dielectric_constant_model(mocker): mocker.patch('builtins.input', return_value='Y') cauchy = Cauchy() model = DielectricConstantModel(e_inf=0, oscillators=[cauchy]) assert (model.dielectric_constants(1000) == 0) model.add_oscillator('drude', An=2, Brn=1) assert (model.dielectric_constants(1240) == ((- 1) + 1j)) model.add_oscillator('lorentz', An=3, En=2, Brn=3) assert (model.dielectric_constants(1240) == (1 + 3j)) model.remove_oscillator(2) assert (model.dielectric_constants(1240) == (2 + 2j))
def compute_complexity(model: nn.Module, compute_fn: Callable, input_shape: Tuple[int], input_key: Optional[Union[(str, List[str])]]=None, patch_attr: str=None, compute_unique: bool=False) -> int: assert isinstance(model, nn.Module) if ((not isinstance(input_shape, abc.Sequence)) and (not isinstance(input_shape, dict))): return None else: input = get_model_dummy_input(model, input_shape, input_key) complexity_computer = ComplexityComputer(compute_fn, compute_unique) modify_forward(model, complexity_computer, patch_attr=patch_attr) try: with eval_model(model), torch.no_grad(): model.forward(input) finally: restore_forward(model, patch_attr=patch_attr) return complexity_computer.count
class Effect553(BaseEffect): type = 'passive' def handler(fit, ship, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Large Hybrid Turret')), 'trackingSpeed', ship.getModifiedItemAttr('shipBonusGB'), skill='Gallente Battleship', **kwargs)
(python=USE_PYTHON_VERSIONS) ('command_a', install_commands) ('command_b', install_commands) def session_pkgutil(session, command_a, command_b): session.install('--upgrade', 'setuptools', 'pip') install_packages(session, 'pkgutil/pkg_a', 'pkgutil/pkg_b', command_a, command_b) session.run('python', 'verify_packages.py')
class Adafactor(Optimizer): def __init__(self, params, lr=None, eps=(1e-30, 0.001), clip_threshold=1.0, decay_rate=(- 0.8), beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False): require_version('torch>=1.5.0') if ((lr is not None) and relative_step): raise ValueError('Cannot combine manual `lr` and `relative_step=True` options') if (warmup_init and (not relative_step)): raise ValueError('`warmup_init=True` requires `relative_step=True`') defaults = {'lr': lr, 'eps': eps, 'clip_threshold': clip_threshold, 'decay_rate': decay_rate, 'beta1': beta1, 'weight_decay': weight_decay, 'scale_parameter': scale_parameter, 'relative_step': relative_step, 'warmup_init': warmup_init} super().__init__(params, defaults) def _get_lr(param_group, param_state): rel_step_sz = param_group['lr'] if param_group['relative_step']: min_step = ((1e-06 * param_state['step']) if param_group['warmup_init'] else 0.01) rel_step_sz = min(min_step, (1.0 / math.sqrt(param_state['step']))) param_scale = 1.0 if param_group['scale_parameter']: param_scale = max(param_group['eps'][1], param_state['RMS']) return (param_scale * rel_step_sz) def _get_options(param_group, param_shape): factored = (len(param_shape) >= 2) use_first_moment = (param_group['beta1'] is not None) return (factored, use_first_moment) def _rms(tensor): return (tensor.norm(2) / (tensor.numel() ** 0.5)) def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=(- 1), keepdim=True)).rsqrt_().unsqueeze((- 1)) c_factor = exp_avg_sq_col.unsqueeze((- 2)).rsqrt() return torch.mul(r_factor, c_factor) def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if (grad.dtype in {torch.float16, torch.bfloat16}): grad = grad.float() if grad.is_sparse: raise RuntimeError('Adafactor does not support sparse gradients.') state = self.state[p] grad_shape = grad.shape (factored, use_first_moment) = self._get_options(group, grad_shape) if (len(state) == 0): state['step'] = 0 if use_first_moment: state['exp_avg'] = torch.zeros_like(grad) if factored: state['exp_avg_sq_row'] = torch.zeros(grad_shape[:(- 1)]).to(grad) state['exp_avg_sq_col'] = torch.zeros((grad_shape[:(- 2)] + grad_shape[(- 1):])).to(grad) else: state['exp_avg_sq'] = torch.zeros_like(grad) state['RMS'] = 0 else: if use_first_moment: state['exp_avg'] = state['exp_avg'].to(grad) if factored: state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad) state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad) else: state['exp_avg_sq'] = state['exp_avg_sq'].to(grad) p_data_fp32 = p.data if (p.data.dtype in {torch.float16, torch.bfloat16}): p_data_fp32 = p_data_fp32.float() state['step'] += 1 state['RMS'] = self._rms(p_data_fp32) lr = self._get_lr(group, state) beta2t = (1.0 - math.pow(state['step'], group['decay_rate'])) update = ((grad ** 2) + group['eps'][0]) if factored: exp_avg_sq_row = state['exp_avg_sq_row'] exp_avg_sq_col = state['exp_avg_sq_col'] exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=(- 1)), alpha=(1.0 - beta2t)) exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=(- 2)), alpha=(1.0 - beta2t)) update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state['exp_avg_sq'] exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t)) update = exp_avg_sq.rsqrt().mul_(grad) update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0)) update.mul_(lr) if use_first_moment: exp_avg = state['exp_avg'] exp_avg.mul_(group['beta1']).add_(update, alpha=(1 - group['beta1'])) update = exp_avg if (group['weight_decay'] != 0): p_data_fp32.add_(p_data_fp32, alpha=((- group['weight_decay']) * lr)) p_data_fp32.add_((- update)) if (p.data.dtype in {torch.float16, torch.bfloat16}): p.data.copy_(p_data_fp32) return loss
class NestedAsyncState(NestedState, AsyncState): async def scoped_enter(self, event_data, scope=None): self._scope = (scope or []) (await self.enter(event_data)) self._scope = [] async def scoped_exit(self, event_data, scope=None): self._scope = (scope or []) (await self.exit(event_data)) self._scope = []
def main(): parser = argparse.ArgumentParser(description='Tooling to ease downloading of components from TaskCluster.') parser.add_argument('--target', required=False, help='Where to put the native client binary files') parser.add_argument('--arch', required=False, help='Which architecture to download binaries for. "arm" for ARM 7 (32-bit), "arm64" for ARM64, "gpu" for CUDA enabled x86_64 binaries, "cpu" for CPU-only x86_64 binaries, "osx" for CPU-only x86_64 OSX binaries. Optional ("cpu" by default)') parser.add_argument('--artifact', required=False, default='native_client.tar.xz', help='Name of the artifact to download. Defaults to "native_client.tar.xz"') parser.add_argument('--source', required=False, default=None, help='Name of the TaskCluster scheme to use.') parser.add_argument('--branch', required=False, help='Branch name to use. Defaulting to current content of VERSION file.') args = parser.parse_args() if ((not args.target) and (not args.decoder)): print('Pass either --target or --decoder.') sys.exit(1) is_arm = ('arm' in platform.machine()) is_mac = ('darwin' in sys.platform) is_64bit = (sys.maxsize > ((2 ** 31) - 1)) is_ucs2 = (sys.maxunicode < 1114111) if (not args.arch): if is_arm: args.arch = ('arm64' if is_64bit else 'arm') elif is_mac: args.arch = 'osx' else: args.arch = 'cpu' if (not args.branch): version_string = read('../VERSION').strip() ds_version = parse_version(version_string) args.branch = 'v{}'.format(version_string) else: ds_version = parse_version(args.branch) if (args.source is not None): if (args.source in DEFAULT_SCHEMES): global TASKCLUSTER_SCHEME TASKCLUSTER_SCHEME = DEFAULT_SCHEMES[args.source] else: print(('No such scheme: %s' % args.source)) sys.exit(1) maybe_download_tc(target_dir=args.target, tc_url=get_tc_url(args.arch, args.artifact, args.branch)) if (args.artifact == 'convert_graphdef_memmapped_format'): convert_graph_file = os.path.join(args.target, args.artifact) final_stat = os.stat(convert_graph_file) os.chmod(convert_graph_file, (final_stat.st_mode | stat.S_IEXEC)) if ('.tar.' in args.artifact): subprocess.check_call(['tar', 'xvf', os.path.join(args.target, args.artifact), '-C', args.target])
class TestTransformerPitch(unittest.TestCase): def test_default(self): tfm = new_transformer() tfm.pitch(0.0) actual_args = tfm.effects expected_args = ['pitch', '0.000000'] self.assertEqual(expected_args, actual_args) actual_log = tfm.effects_log expected_log = ['pitch'] self.assertEqual(expected_log, actual_log) actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE) expected_res = True self.assertEqual(expected_res, actual_res) tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm) def test_n_semitones_valid(self): tfm = new_transformer() tfm.pitch((- 3.0)) actual_args = tfm.effects expected_args = ['pitch', '-300.000000'] self.assertEqual(expected_args, actual_args) actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE) expected_res = True self.assertEqual(expected_res, actual_res) tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm) def test_n_semitones_warning(self): tfm = new_transformer() tfm.pitch(13.0) actual_args = tfm.effects expected_args = ['pitch', '1300.000000'] self.assertEqual(expected_args, actual_args) actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE) expected_res = True self.assertEqual(expected_res, actual_res) tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm) def test_n_semitones_invalid(self): tfm = new_transformer() with self.assertRaises(ValueError): tfm.pitch('a') def test_quick_valid(self): tfm = new_transformer() tfm.pitch(1.0, quick=True) actual_args = tfm.effects expected_args = ['pitch', '-q', '100.000000'] self.assertEqual(expected_args, actual_args) actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE) expected_res = True self.assertEqual(expected_res, actual_res) tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm) def test_quick_invalid(self): tfm = new_transformer() with self.assertRaises(ValueError): tfm.pitch(1.0, quick=1)
class FileAudioDataset(RawAudioDataset): def __init__(self, manifest_path, sample_rate, max_sample_size=None, min_sample_size=None, shuffle=True, min_length=0): super().__init__(sample_rate=sample_rate, max_sample_size=max_sample_size, min_sample_size=min_sample_size, shuffle=shuffle, min_length=min_length) self.fnames = [] with open(manifest_path, 'r') as f: self.root_dir = f.readline().strip() for line in f: items = line.strip().split('\t') assert (len(items) == 2), line self.fnames.append(items[0]) self.sizes.append(int(items[1])) def __getitem__(self, index): import soundfile as sf fname = os.path.join(self.root_dir, self.fnames[index]) (wav, curr_sample_rate) = sf.read(fname) feats = torch.from_numpy(wav).float() feats = self.postprocess(feats, curr_sample_rate) return {'id': index, 'source': feats}
class LogMatchStart(LogMatchEvent): def from_dict(self): super().from_dict() self.blue_zone_custom_options = objects.BlueZoneCustomOptions(self._data.get('blueZoneCustomOptions')) self.camera_view_behaviour = self._data.get('cameraViewBehaviour') self.is_custom_game = self._data.get('isCustomGame') self.is_event_mode = self._data.get('isEventMode') self.map_name = self._data.get('mapName') self.team_size = self._data.get('teamSize') self.weather_id = self._data.get('weatherId')
class MLP(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = ([hidden_dim] * (num_layers - 1)) self.layers = nn.ModuleList((nn.Linear(n, k) for (n, k) in zip(([input_dim] + h), (h + [output_dim])))) def forward(self, x): for (i, layer) in enumerate(self.layers): x = (F.relu(layer(x)) if (i < (self.num_layers - 1)) else layer(x)) return x
class MainWindow(QWidget): STYLESHEET = "\n HintLabel {\n background-color: {{ conf.colors.hints.bg }};\n color: {{ conf.colors.hints.fg }};\n font: {{ conf.fonts.hints }};\n border: {{ conf.hints.border }};\n border-radius: {{ conf.hints.radius }}px;\n padding-top: {{ conf.hints.padding['top'] }}px;\n padding-left: {{ conf.hints.padding['left'] }}px;\n padding-right: {{ conf.hints.padding['right'] }}px;\n padding-bottom: {{ conf.hints.padding['bottom'] }}px;\n }\n\n QToolTip {\n {% if conf.fonts.tooltip %}\n font: {{ conf.fonts.tooltip }};\n {% endif %}\n {% if conf.colors.tooltip.bg %}\n background-color: {{ conf.colors.tooltip.bg }};\n {% endif %}\n {% if conf.colors.tooltip.fg %}\n color: {{ conf.colors.tooltip.fg }};\n {% endif %}\n }\n\n QMenu {\n {% if conf.fonts.contextmenu %}\n font: {{ conf.fonts.contextmenu }};\n {% endif %}\n {% if conf.colors.contextmenu.menu.bg %}\n background-color: {{ conf.colors.contextmenu.menu.bg }};\n {% endif %}\n {% if conf.colors.contextmenu.menu.fg %}\n color: {{ conf.colors.contextmenu.menu.fg }};\n {% endif %}\n }\n\n QMenu::item:selected {\n {% if conf.colors.contextmenu.selected.bg %}\n background-color: {{ conf.colors.contextmenu.selected.bg }};\n {% endif %}\n {% if conf.colors.contextmenu.selected.fg %}\n color: {{ conf.colors.contextmenu.selected.fg }};\n {% endif %}\n }\n\n QMenu::item:disabled {\n {% if conf.colors.contextmenu.disabled.bg %}\n background-color: {{ conf.colors.contextmenu.disabled.bg }};\n {% endif %}\n {% if conf.colors.contextmenu.disabled.fg %}\n color: {{ conf.colors.contextmenu.disabled.fg }};\n {% endif %}\n }\n " def __init__(self, *, private: bool, geometry: Optional[QByteArray]=None, parent: Optional[QWidget]=None) -> None: super().__init__(parent) from qutebrowser.mainwindow import tabbedbrowser from qutebrowser.mainwindow.statusbar import bar self.setAttribute(Qt.WidgetAttribute.WA_DeleteOnClose) if config.val.window.transparent: self.setAttribute(Qt.WidgetAttribute.WA_TranslucentBackground) self.palette().setColor(QPalette.ColorRole.Window, Qt.GlobalColor.transparent) self._overlays: MutableSequence[_OverlayInfoType] = [] self.win_id = next(win_id_gen) self.registry = objreg.ObjectRegistry() objreg.window_registry[self.win_id] = self objreg.register('main-window', self, scope='window', window=self.win_id) tab_registry = objreg.ObjectRegistry() objreg.register('tab-registry', tab_registry, scope='window', window=self.win_id) self.setWindowTitle('qutebrowser') self._vbox = QVBoxLayout(self) self._vbox.setContentsMargins(0, 0, 0, 0) self._vbox.setSpacing(0) self._init_downloadmanager() self._downloadview = downloadview.DownloadView(model=self._download_model) self.is_private = (config.val.content.private_browsing or private) self.tabbed_browser: tabbedbrowser.TabbedBrowser = tabbedbrowser.TabbedBrowser(win_id=self.win_id, private=self.is_private, parent=self) objreg.register('tabbed-browser', self.tabbed_browser, scope='window', window=self.win_id) self._init_command_dispatcher() self.status = bar.StatusBar(win_id=self.win_id, private=self.is_private, parent=self) self._add_widgets() self._downloadview.show() self._init_completion() log.init.debug('Initializing modes...') modeman.init(win_id=self.win_id, parent=self) self._commandrunner = runners.CommandRunner(self.win_id, partial_match=True, find_similar=True) self._keyhint = keyhintwidget.KeyHintView(self.win_id, self) self._add_overlay(self._keyhint, self._keyhint.update_geometry) self._prompt_container = prompt.PromptContainer(self.win_id, self) self._add_overlay(self._prompt_container, self._prompt_container.update_geometry, centered=True, padding=10) objreg.register('prompt-container', self._prompt_container, scope='window', window=self.win_id, command_only=True) self._prompt_container.hide() self._messageview = messageview.MessageView(parent=self) self._add_overlay(self._messageview, self._messageview.update_geometry) self._init_geometry(geometry) self._connect_signals() QTimer.singleShot(0, self._connect_overlay_signals) config.instance.changed.connect(self._on_config_changed) objects.qapp.new_window.emit(self) self._set_decoration(config.val.window.hide_decoration) self.state_before_fullscreen = self.windowState() self.should_raise: bool = False stylesheet.set_register(self) def _init_geometry(self, geometry): if (geometry is not None): self._load_geometry(geometry) elif (self.win_id == 0): self._load_state_geometry() else: self._set_default_geometry() log.init.debug('Initial main window geometry: {}'.format(self.geometry())) def _add_overlay(self, widget, signal, *, centered=False, padding=0): self._overlays.append((widget, signal, centered, padding)) def _update_overlay_geometries(self): for (w, _signal, centered, padding) in self._overlays: self._update_overlay_geometry(w, centered, padding) def _update_overlay_geometry(self, widget, centered, padding): if (not widget.isVisible()): return if (widget.sizePolicy().horizontalPolicy() == QSizePolicy.Policy.Expanding): width = (self.width() - (2 * padding)) if widget.hasHeightForWidth(): height = widget.heightForWidth(width) else: height = widget.sizeHint().height() left = padding else: size_hint = widget.sizeHint() width = min(size_hint.width(), (self.width() - (2 * padding))) height = size_hint.height() left = (((self.width() - width) // 2) if centered else 0) height_padding = 20 status_position = config.val.statusbar.position if (status_position == 'bottom'): if self.status.isVisible(): status_height = self.status.height() bottom = self.status.geometry().top() else: status_height = 0 bottom = self.height() top = ((self.height() - status_height) - height) top = qtutils.check_overflow(top, 'int', fatal=False) topleft = QPoint(left, max(height_padding, top)) bottomright = QPoint((left + width), bottom) elif (status_position == 'top'): if self.status.isVisible(): status_height = self.status.height() top = self.status.geometry().bottom() else: status_height = 0 top = 0 topleft = QPoint(left, top) bottom = (status_height + height) bottom = qtutils.check_overflow(bottom, 'int', fatal=False) bottomright = QPoint((left + width), min((self.height() - height_padding), bottom)) else: raise ValueError('Invalid position {}!'.format(status_position)) rect = QRect(topleft, bottomright) log.misc.debug('new geometry for {!r}: {}'.format(widget, rect)) if rect.isValid(): widget.setGeometry(rect) def _init_downloadmanager(self): log.init.debug('Initializing downloads...') qtnetwork_download_manager = objreg.get('qtnetwork-download-manager') try: webengine_download_manager = objreg.get('webengine-download-manager') except KeyError: webengine_download_manager = None self._download_model = downloads.DownloadModel(qtnetwork_download_manager, webengine_download_manager) objreg.register('download-model', self._download_model, scope='window', window=self.win_id, command_only=True) def _init_completion(self): self._completion = completionwidget.CompletionView(cmd=self.status.cmd, win_id=self.win_id, parent=self) completer_obj = completer.Completer(cmd=self.status.cmd, win_id=self.win_id, parent=self._completion) self._completion.selection_changed.connect(completer_obj.on_selection_changed) objreg.register('completion', self._completion, scope='window', window=self.win_id, command_only=True) self._add_overlay(self._completion, self._completion.update_geometry) def _init_command_dispatcher(self): from qutebrowser.browser import commands self._command_dispatcher = commands.CommandDispatcher(self.win_id, self.tabbed_browser) objreg.register('command-dispatcher', self._command_dispatcher, command_only=True, scope='window', window=self.win_id) widget = self.tabbed_browser.widget widget.destroyed.connect(functools.partial(objreg.delete, 'command-dispatcher', scope='window', window=self.win_id)) def __repr__(self): return utils.get_repr(self) (str) def _on_config_changed(self, option): if (option == 'statusbar.padding'): self._update_overlay_geometries() elif (option == 'downloads.position'): self._add_widgets() elif (option == 'statusbar.position'): self._add_widgets() self._update_overlay_geometries() elif (option == 'window.hide_decoration'): self._set_decoration(config.val.window.hide_decoration) def _add_widgets(self): self._vbox.removeWidget(self.tabbed_browser.widget) self._vbox.removeWidget(self._downloadview) self._vbox.removeWidget(self.status) widgets: List[QWidget] = [self.tabbed_browser.widget] downloads_position = config.val.downloads.position if (downloads_position == 'top'): widgets.insert(0, self._downloadview) elif (downloads_position == 'bottom'): widgets.append(self._downloadview) else: raise ValueError('Invalid position {}!'.format(downloads_position)) status_position = config.val.statusbar.position if (status_position == 'top'): widgets.insert(0, self.status) elif (status_position == 'bottom'): widgets.append(self.status) else: raise ValueError('Invalid position {}!'.format(status_position)) for widget in widgets: self._vbox.addWidget(widget) def _load_state_geometry(self): try: data = configfiles.state['geometry']['mainwindow'] geom = base64.b64decode(data, validate=True) except KeyError: self._set_default_geometry() except binascii.Error: log.init.exception('Error while reading geometry') self._set_default_geometry() else: self._load_geometry(geom) def _save_geometry(self): data = self.saveGeometry().data() geom = base64.b64encode(data).decode('ASCII') configfiles.state['geometry']['mainwindow'] = geom def _load_geometry(self, geom): log.init.debug('Loading mainwindow from {!r}'.format(geom)) ok = self.restoreGeometry(geom) if (not ok): log.init.warning('Error while loading geometry.') self._set_default_geometry() def _connect_overlay_signals(self): for (widget, signal, centered, padding) in self._overlays: signal.connect(functools.partial(self._update_overlay_geometry, widget, centered, padding)) self._update_overlay_geometry(widget, centered, padding) def _set_default_geometry(self): self.setGeometry(QRect(50, 50, 800, 600)) def _connect_signals(self): mode_manager = modeman.instance(self.win_id) self._prompt_container.release_focus.connect(self.tabbed_browser.on_release_focus) self.tabbed_browser.close_window.connect(self.close) mode_manager.entered.connect(hints.on_mode_entered) mode_manager.hintmanager.set_text.connect(self.status.set_text) mode_manager.entered.connect(self.status.on_mode_entered) mode_manager.left.connect(self.status.on_mode_left) mode_manager.left.connect(self.status.cmd.on_mode_left) mode_manager.left.connect(message.global_bridge.mode_left) mode_manager.keystring_updated.connect(self.status.keystring.on_keystring_updated) self.status.cmd.got_cmd[str].connect(self._commandrunner.run_safely) self.status.cmd.got_cmd[(str, int)].connect(self._commandrunner.run_safely) self.status.cmd.returnPressed.connect(self.tabbed_browser.on_cmd_return_pressed) self.status.cmd.got_search.connect(self._command_dispatcher.search) mode_manager.keystring_updated.connect(self._keyhint.update_keyhint) message.global_bridge.show_message.connect(self._messageview.show_message) message.global_bridge.flush() message.global_bridge.clear_messages.connect(self._messageview.clear_messages) self.tabbed_browser.current_tab_changed.connect(self.status.on_tab_changed) self.tabbed_browser.cur_progress.connect(self.status.prog.on_load_progress) self.tabbed_browser.cur_load_started.connect(self.status.prog.on_load_started) self.tabbed_browser.cur_scroll_perc_changed.connect(self.status.percentage.set_perc) self.tabbed_browser.widget.tab_index_changed.connect(self.status.tabindex.on_tab_index_changed) self.tabbed_browser.cur_url_changed.connect(self.status.url.set_url) self.tabbed_browser.cur_url_changed.connect(functools.partial(self.status.backforward.on_tab_cur_url_changed, tabs=self.tabbed_browser)) self.tabbed_browser.cur_link_hovered.connect(self.status.url.set_hover_url) self.tabbed_browser.cur_load_status_changed.connect(self.status.url.on_load_status_changed) self.tabbed_browser.cur_search_match_changed.connect(self.status.search_match.set_match) self.tabbed_browser.cur_caret_selection_toggled.connect(self.status.on_caret_selection_toggled) self.tabbed_browser.cur_fullscreen_requested.connect(self._on_fullscreen_requested) self.tabbed_browser.cur_fullscreen_requested.connect(self.status.maybe_hide) self.tabbed_browser.cur_fullscreen_requested.connect(self._downloadview.on_fullscreen_requested) mode_manager.entered.connect(self.tabbed_browser.on_mode_entered) mode_manager.left.connect(self.tabbed_browser.on_mode_left) self.status.cmd.clear_completion_selection.connect(self._completion.on_clear_completion_selection) self.status.cmd.hide_completion.connect(self._completion.hide) self.status.cmd.hide_cmd.connect(self.tabbed_browser.on_release_focus) def _set_decoration(self, hidden): if machinery.IS_QT5: window_flags = cast(Qt.WindowFlags, Qt.WindowType.Window) else: window_flags = Qt.WindowType.Window refresh_window = self.isVisible() if hidden: modifiers = (Qt.WindowType.CustomizeWindowHint | Qt.WindowType.NoDropShadowWindowHint) window_flags |= modifiers self.setWindowFlags(window_flags) if (utils.is_mac and hidden and (not qtutils.version_check('6.3', compiled=False))): from ctypes import c_void_p from objc import objc_object from AppKit import NSWindowStyleMaskResizable win = objc_object(c_void_p=c_void_p(int(self.winId()))).window() win.setStyleMask_((win.styleMask() | NSWindowStyleMaskResizable)) if refresh_window: self.show() (bool) def _on_fullscreen_requested(self, on): if (not config.val.content.fullscreen.window): if on: self.state_before_fullscreen = self.windowState() self.setWindowState((Qt.WindowState.WindowFullScreen | self.state_before_fullscreen)) elif self.isFullScreen(): self.setWindowState(self.state_before_fullscreen) log.misc.debug('on: {}, state before fullscreen: {}'.format(on, debug.qflags_key(Qt, self.state_before_fullscreen))) (instance='main-window', scope='window') () def close(self): super().close() def resizeEvent(self, e): super().resizeEvent(e) self._update_overlay_geometries() self._downloadview.updateGeometry() self.tabbed_browser.widget.tab_bar().refresh() def showEvent(self, e): super().showEvent(e) objreg.register('last-visible-main-window', self, update=True) def _confirm_quit(self): tab_count = self.tabbed_browser.widget.count() window_count = len(objreg.window_registry) download_count = self._download_model.running_downloads() quit_texts = [] if (('multiple-tabs' in config.val.confirm_quit) and (tab_count > 1)): quit_texts.append('{} tabs are open.'.format(tab_count)) if (('downloads' in config.val.confirm_quit) and (download_count > 0) and (window_count <= 1)): quit_texts.append('{} {} running.'.format(download_count, ('download is' if (download_count == 1) else 'downloads are'))) if (quit_texts or ('always' in config.val.confirm_quit)): msg = jinja.environment.from_string('\n <ul>\n {% for text in quit_texts %}\n <li>{{text}}</li>\n {% endfor %}\n </ul>\n '.strip()).render(quit_texts=quit_texts) confirmed = message.ask('Really quit?', msg, mode=usertypes.PromptMode.yesno, default=True) if (not confirmed): log.destroy.debug('Cancelling closing of window {}'.format(self.win_id)) return False return True def maybe_raise(self) -> None: if self.should_raise: raise_window(self) self.should_raise = False def closeEvent(self, e): if crashsignal.crash_handler.is_crashing: e.accept() return if (not self._confirm_quit()): e.ignore() return e.accept() for key in ['last-visible-main-window', 'last-focused-main-window']: try: win = objreg.get(key) if (self is win): objreg.delete(key) except KeyError: pass sessions.session_manager.save_last_window_session() self._save_geometry() if (self.is_private and (len(objreg.window_registry) > 1) and (len([window for window in objreg.window_registry.values() if window.is_private]) == 1)): log.destroy.debug('Wiping private data before closing last private window') websettings.clear_private_data() log.destroy.debug('Closing window {}'.format(self.win_id)) self.tabbed_browser.shutdown()
class DataTrainingArguments(): data_dir: Optional[str] = field(default=None, metadata={'help': 'Directory to a Universal Dependencies data folder.'}) max_seq_length: Optional[int] = field(default=196, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
class LogicalOrExpressionNode(ExpressionNode): def __init__(self, left, right): self.left = left self.right = right def evaluate(self, context): return (self.left.evaluate(context) or self.right.evaluate(context)) def __str__(self): return ('(%s || %s)' % (self.left, self.right))
def test_solver_can_resolve_sdist_dependencies(solver: Solver, repo: Repository, package: ProjectPackage, fixture_dir: FixtureDirGetter) -> None: pendulum = get_package('pendulum', '2.0.3') repo.add_package(pendulum) path = (fixture_dir('distributions') / 'demo-0.1.0.tar.gz').as_posix() package.add_dependency(Factory.create_dependency('demo', {'path': path})) transaction = solver.solve() demo = Package('demo', '0.1.0', source_type='file', source_url=path) ops = check_solver_result(transaction, [{'job': 'install', 'package': pendulum}, {'job': 'install', 'package': demo}]) op = ops[1] assert (op.package.name == 'demo') assert (op.package.version.text == '0.1.0') assert (op.package.source_type == 'file') assert (op.package.source_url == path)
_bp.route(MANIFEST_DIGEST_ROUTE, methods=['GET']) _for_account_recovery_mode _repository_name() _registry_jwt_auth(scopes=['pull']) _repo_read(allow_for_superuser=True) _protect _registry_model() def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref, registry_model): try: repository_ref = registry_model.lookup_repository(namespace_name, repo_name, raise_on_error=True, manifest_ref=manifest_ref) except RepositoryDoesNotExist as e: image_pulls.labels('v2', 'manifest', 404).inc() raise NameUnknown('repository not found') try: manifest = registry_model.lookup_manifest_by_digest(repository_ref, manifest_ref, raise_on_error=True, allow_hidden=True) except ManifestDoesNotExist as e: image_pulls.labels('v2', 'manifest', 404).inc() raise ManifestUnknown(str(e)) track_and_log('pull_repo', repository_ref, manifest_digest=manifest_ref) image_pulls.labels('v2', 'manifest', 200).inc() return Response(manifest.internal_manifest_bytes.as_unicode(), status=200, headers={'Content-Type': manifest.media_type, 'Docker-Content-Digest': manifest.digest})
def compute_sublist_prob(sub_list): if (len(sub_list) == 0): sys.exit('compute_sentence_probs_arpa.py: Ngram substring not found in arpa language model, please check.') sub_string = ' '.join(sub_list) if (sub_string in ngram_dict): return (- float(ngram_dict[sub_string][0][1:])) else: backoff_substring = ' '.join(sub_list[:(- 1)]) backoff_weight = (0.0 if ((backoff_substring not in ngram_dict) or (len(ngram_dict[backoff_substring]) < 2)) else (- float(ngram_dict[backoff_substring][1][1:]))) return (compute_sublist_prob(sub_list[1:]) + backoff_weight)
def send_request(path: string, method: string, body: string=None, token: string=None) -> dict: current_time = str(int(time.time())) nonce = ''.join(random.choices((string.ascii_lowercase + string.digits), k=32)) raw = ((((path + current_time) + nonce) + method) + api_key) raw = raw.lower() h = hmac.new(api_secret.encode(), digestmod=hashlib.sha256) h.update(raw.encode()) signature = h.hexdigest() headers = static_headers.copy() headers['time'] = current_time headers['nonce'] = nonce headers['signature'] = signature if (body is not None): headers['Content-Type'] = 'application/json; charset=UTF-8' if (token is not None): headers['authorization'] = token connection = client.HTTPSConnection(pica_api_host) connection.request(method, ('/' + path), body, headers) response = connection.getresponse().read().decode('utf-8') json_object = json.loads(response) if (json_object['code'] != 200): if (SEND_KEY != ''): sendNotify.send(title=u'', msg=' ') print(json_object['message']) exit(0) raise RuntimeError(json_object['message']) return json_object
def get_clients_at_depth(fgraph: FunctionGraph, node: Apply, depth: int) -> Generator[(Apply, None, None)]: for var in node.outputs: if (depth > 0): for (out_node, _) in fgraph.clients[var]: if (out_node == 'output'): continue (yield from get_clients_at_depth(fgraph, cast(Apply, out_node), (depth - 1))) else: assert (var.owner is not None) (yield var.owner)
.unit() .parametrize(('expr', 'column', 'message'), [('(', 2, 'expected not OR left parenthesis OR identifier; got end of input'), (' (', 3, 'expected not OR left parenthesis OR identifier; got end of input'), (')', 1, 'expected not OR left parenthesis OR identifier; got right parenthesis'), (') ', 1, 'expected not OR left parenthesis OR identifier; got right parenthesis'), ('not', 4, 'expected not OR left parenthesis OR identifier; got end of input'), ('not not', 8, 'expected not OR left parenthesis OR identifier; got end of input'), ('(not)', 5, 'expected not OR left parenthesis OR identifier; got right parenthesis'), ('and', 1, 'expected not OR left parenthesis OR identifier; got and'), ('ident and', 10, 'expected not OR left parenthesis OR identifier; got end of input'), ('ident and or', 11, 'expected not OR left parenthesis OR identifier; got or'), ('ident ident', 7, 'expected end of input; got identifier')]) def test_syntax_errors(expr: str, column: int, message: str) -> None: with pytest.raises(ParseError) as excinfo: evaluate(expr, (lambda ident: True)) assert (excinfo.value.column == column) assert (excinfo.value.message == message)
def format_subnet(subnet): try: subnet_obj = ipaddress.ip_network(subnet) except: raise BadParam(('invalid subnet: %s' % subnet), msg_ch=(': %s' % subnet)) start_ip = subnet_obj.network_address end_ip = subnet_obj.broadcast_address is_ipv6 = (subnet_obj.version == 6) return (str(subnet_obj), is_ipv6, float(int(start_ip)), float(int(end_ip)))
class VGG16_DM(nn.Module): def __init__(self, load_weights=True): super(VGG16_DM, self).__init__() self.layer5 = self.VGG_make_layers([64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']) self.reg_layer = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(128, 1, 1)) if load_weights: mod = models.vgg16(pretrained=False) pretrain_path = './models/Pretrain_Model/vgg16-397923af.pth' mod.load_state_dict(torch.load(pretrain_path)) print(('loaded pretrain model: ' + pretrain_path)) self._initialize_weights() self.layer5.load_state_dict(mod.features[0:31].state_dict()) def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, std=0.01) if (m.bias is not None): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, x): x = self.layer5(x) x = self.reg_layer(x) x = F.upsample_bilinear(x, scale_factor=4) return x def VGG_make_layers(self, cfg, in_channels=3, batch_norm=False, dilation=1): d_rate = dilation layers = [] for v in cfg: if (v == 'M'): layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=d_rate, dilation=d_rate) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers)
_bp.route('/<repopath:repository>/blobs/uploads/', methods=['POST']) _for_account_recovery_mode _repository_name() _registry_jwt_auth(scopes=['pull', 'push']) _repo_write(allow_for_superuser=True, disallow_for_restricted_users=True) _protect _readonly def start_blob_upload(namespace_name, repo_name): repository_ref = registry_model.lookup_repository(namespace_name, repo_name) if (repository_ref is None): raise NameUnknown('repository not found') if app.config.get('FEATURE_QUOTA_MANAGEMENT', False): quota = namespacequota.verify_namespace_quota(repository_ref) if (quota['severity_level'] == 'Reject'): namespacequota.notify_organization_admins(repository_ref, 'quota_error', {'severity': 'Reject'}) raise QuotaExceeded mount_blob_digest = request.args.get('mount', None) if (mount_blob_digest is not None): response = _try_to_mount_blob(repository_ref, mount_blob_digest) if (response is not None): return response blob_uploader = create_blob_upload(repository_ref, storage, _upload_settings()) if (blob_uploader is None): logger.debug('Could not create a blob upload for `%s/%s`', namespace_name, repo_name) raise InvalidRequest(message='Unable to start blob upload for unknown repository') digest = request.args.get('digest', None) if (digest is None): return Response(status=202, headers={'Docker-Upload-UUID': blob_uploader.blob_upload_id, 'Range': _render_range(0), 'Location': (get_app_url() + url_for('v2.upload_chunk', repository=('%s/%s' % (namespace_name, repo_name)), upload_uuid=blob_uploader.blob_upload_id))}) with complete_when_uploaded(blob_uploader): _upload_chunk(blob_uploader, digest) return Response(status=201, headers={'Docker-Content-Digest': digest, 'Location': (get_app_url() + url_for('v2.download_blob', repository=('%s/%s' % (namespace_name, repo_name)), digest=digest))})
_attr(allow_interpreted_subclasses=True) class FileSystemCache(): def __init__(self) -> None: self.package_root: list[str] = [] self.flush() def set_package_root(self, package_root: list[str]) -> None: self.package_root = package_root def flush(self) -> None: self.stat_cache: dict[(str, os.stat_result)] = {} self.stat_error_cache: dict[(str, OSError)] = {} self.listdir_cache: dict[(str, list[str])] = {} self.listdir_error_cache: dict[(str, OSError)] = {} self.isfile_case_cache: dict[(str, bool)] = {} self.exists_case_cache: dict[(str, bool)] = {} self.read_cache: dict[(str, bytes)] = {} self.read_error_cache: dict[(str, Exception)] = {} self.hash_cache: dict[(str, str)] = {} self.fake_package_cache: set[str] = set() def stat(self, path: str) -> os.stat_result: if (path in self.stat_cache): return self.stat_cache[path] if (path in self.stat_error_cache): raise copy_os_error(self.stat_error_cache[path]) try: st = os.stat(path) except OSError as err: if self.init_under_package_root(path): try: return self._fake_init(path) except OSError: pass self.stat_error_cache[path] = copy_os_error(err) raise err self.stat_cache[path] = st return st def init_under_package_root(self, path: str) -> bool: if (not self.package_root): return False (dirname, basename) = os.path.split(path) if (basename != '__init__.py'): return False if (not os.path.basename(dirname).isidentifier()): return False try: st = self.stat(dirname) except OSError: return False else: if (not stat.S_ISDIR(st.st_mode)): return False ok = False (drive, path) = os.path.splitdrive(path) if os.path.isabs(path): path = os.path.relpath(path) path = os.path.normpath(path) for root in self.package_root: if path.startswith(root): if (path == (root + basename)): ok = False break else: ok = True return ok def _fake_init(self, path: str) -> os.stat_result: (dirname, basename) = os.path.split(path) assert (basename == '__init__.py'), path assert (not os.path.exists(path)), path dirname = os.path.normpath(dirname) st = self.stat(dirname) seq: list[float] = list(st) seq[stat.ST_MODE] = (stat.S_IFREG | 292) seq[stat.ST_INO] = 1 seq[stat.ST_NLINK] = 1 seq[stat.ST_SIZE] = 0 st = os.stat_result(seq) self.stat_cache[path] = st self.fake_package_cache.add(dirname) return st def listdir(self, path: str) -> list[str]: path = os.path.normpath(path) if (path in self.listdir_cache): res = self.listdir_cache[path] if ((path in self.fake_package_cache) and ('__init__.py' not in res)): res.append('__init__.py') return res if (path in self.listdir_error_cache): raise copy_os_error(self.listdir_error_cache[path]) try: results = os.listdir(path) except OSError as err: self.listdir_error_cache[path] = copy_os_error(err) raise err self.listdir_cache[path] = results if ((path in self.fake_package_cache) and ('__init__.py' not in results)): results.append('__init__.py') return results def isfile(self, path: str) -> bool: try: st = self.stat(path) except OSError: return False return stat.S_ISREG(st.st_mode) def isfile_case(self, path: str, prefix: str) -> bool: if (not self.isfile(path)): return False if (path in self.isfile_case_cache): return self.isfile_case_cache[path] (head, tail) = os.path.split(path) if (not tail): self.isfile_case_cache[path] = False return False try: names = self.listdir(head) res = (tail in names) except OSError: res = False if res: res = self.exists_case(head, prefix) self.isfile_case_cache[path] = res return res def exists_case(self, path: str, prefix: str) -> bool: if (path in self.exists_case_cache): return self.exists_case_cache[path] (head, tail) = os.path.split(path) if ((not head.startswith(prefix)) or (not tail)): self.exists_case_cache[path] = True return True try: names = self.listdir(head) res = (tail in names) except OSError: res = False if res: res = self.exists_case(head, prefix) self.exists_case_cache[path] = res return res def isdir(self, path: str) -> bool: try: st = self.stat(path) except OSError: return False return stat.S_ISDIR(st.st_mode) def exists(self, path: str) -> bool: try: self.stat(path) except FileNotFoundError: return False return True def read(self, path: str) -> bytes: if (path in self.read_cache): return self.read_cache[path] if (path in self.read_error_cache): raise self.read_error_cache[path] self.stat(path) (dirname, basename) = os.path.split(path) dirname = os.path.normpath(dirname) if ((basename == '__init__.py') and (dirname in self.fake_package_cache)): data = b'' else: try: with open(path, 'rb') as f: data = f.read() except OSError as err: self.read_error_cache[path] = err raise self.read_cache[path] = data self.hash_cache[path] = hash_digest(data) return data def hash_digest(self, path: str) -> str: if (path not in self.hash_cache): self.read(path) return self.hash_cache[path] def samefile(self, f1: str, f2: str) -> bool: s1 = self.stat(f1) s2 = self.stat(f2) return os.path.samestat(s1, s2)
class DataCollatorForWav2Vec2Pretraining(): model: Wav2Vec2ForPreTraining feature_extractor: Wav2Vec2FeatureExtractor padding: Union[(bool, str)] = 'longest' pad_to_multiple_of: Optional[int] = None mask_time_prob: Optional[float] = 0.65 mask_time_length: Optional[int] = 10 def __call__(self, features: List[Dict[(str, Union[(List[int], torch.Tensor)])]]) -> Dict[(str, torch.Tensor)]: batch = self.feature_extractor.pad(features, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt') device = batch['input_values'].device batch_size = batch['input_values'].shape[0] mask_indices_seq_length = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[(- 1)]) mask_indices_seq_length = int(mask_indices_seq_length) if (batch.get('attention_mask') is not None): batch['sub_attention_mask'] = self.model._get_feature_vector_attention_mask(mask_indices_seq_length, batch['attention_mask']) features_shape = (batch_size, mask_indices_seq_length) mask_time_indices = _compute_mask_indices(features_shape, self.mask_time_prob, self.mask_time_length, attention_mask=batch.get('sub_attention_mask')) sampled_negative_indices = _sample_negative_indices(features_shape, self.model.config.num_negatives, mask_time_indices=mask_time_indices) batch['mask_time_indices'] = torch.tensor(mask_time_indices, dtype=torch.long, device=device) batch['sampled_negative_indices'] = torch.tensor(sampled_negative_indices, dtype=torch.long, device=device) return batch
.parametrize('username,password', users) def test_create_update(db, client, username, password, json_data): client.login(username=username, password=password) url = reverse(urlnames['list']) response = client.post(url, json_data, content_type='application/json') assert (response.status_code == status_map['create'][username]), response.json() if (response.status_code == 200): for element in response.json(): assert (element.get('created') is False) assert ((element.get('updated') is False) if (username in ['reviewer', 'user']) else True)
def MNIST(train=True, batch_size=None, augm_flag=True): if (batch_size == None): if train: batch_size = train_batch_size else: batch_size = test_batch_size transform_base = [transforms.ToTensor()] transform_train = transforms.Compose(([transforms.RandomCrop(28, padding=2)] + transform_base)) transform_test = transforms.Compose(transform_base) transform_train = transforms.RandomChoice([transform_train, transform_test]) transform = (transform_train if (augm_flag and train) else transform_test) dataset = datasets.MNIST(path, train=train, transform=transform) loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=train, num_workers=4) return loader
class DBusProperty(): IFACE = 'org.freedesktop.DBus.Properties' ISPEC = '\n<method name="Get">\n <arg type="s" name="interface_name" direction="in"/>\n <arg type="s" name="property_name" direction="in"/>\n <arg type="v" name="value" direction="out"/>\n</method>\n<method name="GetAll">\n <arg type="s" name="interface_name" direction="in"/>\n <arg type="a{sv}" name="properties" direction="out"/>\n</method>\n<method name="Set">\n <arg type="s" name="interface_name" direction="in"/>\n <arg type="s" name="property_name" direction="in"/>\n <arg type="v" name="value" direction="in"/>\n</method>\n<signal name="PropertiesChanged">\n <arg type="s" name="interface_name"/>\n <arg type="a{sv}" name="changed_properties"/>\n <arg type="as" name="invalidated_properties"/>\n</signal>' def __init__(self): self.__props = {} self.__impl = {} self.set_introspection(DBusProperty.IFACE, DBusProperty.ISPEC) def set_properties(self, interface, ispec, bl=None, wl=None): ispec = filter_property_spec(ispec, wl=wl, bl=bl) self.__props[interface] = list_spec_properties(ispec) self.__impl.setdefault(interface, []) self.set_introspection(interface, ispec) def get_properties(self, interface): result = [(interface, p) for p in self.__props[interface].keys()] for sub in self.__impl[interface]: result.extend(self.get_properties(sub)) return result def get_value(self, interface, prop, path='/'): interface = self.get_interface(interface, prop) if self.SUPPORTS_MULTIPLE_OBJECT_PATHS: value = self.get_property(interface, prop, path) else: value = self.get_property(interface, prop) prop_sig = self.__props[interface][prop]['type'] return apply_signature(value, prop_sig) def get_interface(self, interface, prop): if (prop in self.__props[interface]): return interface for sub in self.__impl[interface]: if self.get_interface(sub, prop): return sub def implement_interface(self, iface, sub_iface): self.__props.setdefault(iface, {}) self.__props.setdefault(sub_iface, {}) self.__impl.setdefault(iface, []).append(sub_iface) def emit_properties_changed(self, interface, properties, path='/'): combos = {} for prop in properties: iface = self.get_interface(interface, prop) if (iface is None): raise ValueError(('Property %s not registered' % prop)) combos.setdefault(iface, []).append(prop) for (iface, props) in combos.items(): values = {} inval = [] for prop in props: emit = self.__props[iface][prop]['emit'] if (emit == 'false'): raise ValueError(("Can't emit changed signal for %s" % prop)) elif (emit == 'true'): values[prop] = self.get_value(iface, prop, path) elif (emit == 'invalidates'): inval.append(prop) if self.SUPPORTS_MULTIPLE_OBJECT_PATHS: self.PropertiesChanged(iface, values, inval, rel=path) else: self.PropertiesChanged(iface, values, inval) .method(dbus_interface=IFACE, in_signature='ss', out_signature='v', rel_path_keyword='path') def Get(self, interface, prop, path): return self.get_value(interface, prop, path) .method(dbus_interface=IFACE, in_signature='ssv', out_signature='', rel_path_keyword='path') def Set(self, interface, prop, value, path): interface = self.get_interface(interface, prop) if self.SUPPORTS_MULTIPLE_OBJECT_PATHS: self.set_property(interface, prop, value, path) else: self.set_property(interface, prop, value) .method(dbus_interface=IFACE, in_signature='s', out_signature='a{sv}', rel_path_keyword='path') def GetAll(self, interface, path): values = {} for (iface, prop) in self.get_properties(interface): values[prop] = self.get_value(iface, prop, path) return values .signal(IFACE, signature='sa{sv}as', rel_path_keyword='rel') def PropertiesChanged(self, interface, changed, invalidated, rel=''): pass
class ImageNetTrainer(): def __init__(self, tfrecord_dir: str, training_inputs: List[str], data_inputs: List[str], validation_inputs: List[str], image_size: int=224, batch_size: int=128, num_epochs: int=1, format_bgr: bool=False, model_type: str='resnet'): if (not data_inputs): raise ValueError('data_inputs list cannot be empty for imagenet') self._data_inputs = data_inputs if (not validation_inputs): raise ValueError('validation_inputs list cannot be empty for imagenet') self._validation_inputs = validation_inputs if (not training_inputs): raise ValueError('training_inputs list cannot be empty for imagenet') self._training_inputs = training_inputs self._train_data_loaders = ImageNetDataLoader(tfrecord_dir=tfrecord_dir, image_size=image_size, batch_size=batch_size, num_epochs=num_epochs, format_bgr=format_bgr, is_training=True, model_type=model_type) self._tfrecord_dir = tfrecord_dir self._image_size = image_size self._batch_size = batch_size self._num_epochs = num_epochs self._format_bgr = format_bgr self._model_type = model_type def remove_disconnected_ops(ops_list): connected_ops = [] for op in ops_list: stack = [op] seen = set(op.name) while stack: n_op = stack.pop() for z in n_op.outputs: if (z.op.name not in seen): stack.append(z.op) seen.add(z.op.name) for z in n_op.inputs: if (z.op.name not in seen): stack.append(z.op) seen.add(z.op.name) if ('input_1' in seen): connected_ops.append(op) return connected_ops def _evaluate_(self, session: tf.Session) -> float: image_net_eval = ImageNetEvaluator(tfrecord_dir=self._tfrecord_dir, training_inputs=self._training_inputs, data_inputs=self._data_inputs, validation_inputs=self._validation_inputs, image_size=self._image_size, batch_size=self._batch_size, format_bgr=self._format_bgr, model_type=self._model_type) return image_net_eval.evaluate(session, iterations=None) def train(self, session: tf.Session, update_ops_name: List[str]=None, iterations: int=None, learning_rate: float=0.001, decay_rate: float=0.1, decay_steps: int=None, debug_steps: int=1000): if (iterations is None): iterations = (image_net_config.dataset['test_images_len'] // self._batch_size) input_label_tensors = [session.graph.get_tensor_by_name(input_label) for input_label in (tuple(self._data_inputs) + tuple(self._validation_inputs))] train_tensors = [session.graph.get_tensor_by_name(training_input) for training_input in self._training_inputs] train_tensors_dict = dict.fromkeys(train_tensors, True) if (not update_ops_name): update_ops_name = [] graph_all_ops_name = [op.name for op in session.graph.get_operations()] update_ops_name = set(update_ops_name).intersection(graph_all_ops_name) update_ops = [session.graph.get_operation_by_name(op_name) for op_name in update_ops_name] with session.graph.as_default(): loss_op = tf.get_collection(tf.GraphKeys.LOSSES)[0] global_step_op = tf.train.get_global_step() if (global_step_op is None): global_step_op = tf.train.create_global_step() if decay_steps: learning_rate_op = tf.train.exponential_decay(learning_rate, global_step=global_step_op, decay_steps=(decay_steps * iterations), decay_rate=decay_rate, staircase=True, name='exponential_decay_learning_rate') else: learning_rate_op = learning_rate optimizer_op = tf.train.MomentumOptimizer(learning_rate=learning_rate_op, momentum=0.9) update_ops = set(update_ops).union(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) connected_update_ops = self.remove_disconnected_ops(update_ops) with tf.control_dependencies(connected_update_ops): train_op = optimizer_op.minimize(loss_op, global_step=global_step_op) graph_eval.initialize_uninitialized_vars(session) logger.info('Training graph for %d iterations with batch_size %d for %d Epochs', iterations, self._batch_size, self._num_epochs) for current_epoch in range(1, (self._num_epochs + 1)): avg_loss = 0.0 curr_iter = 0 with progressbar.ProgressBar(max_value=iterations) as progress_bar: for input_label in self._train_data_loaders: input_label_tensors_dict = dict(zip(input_label_tensors, input_label)) feed_dict = {**input_label_tensors_dict, **train_tensors_dict} with session.graph.as_default(): (batch_loss_val, _) = session.run([loss_op, train_op], feed_dict=feed_dict) avg_loss += batch_loss_val curr_iter += 1 progress_bar.update(curr_iter) if (((curr_iter - 1) % debug_steps) == 0): eval_accuracy = self._evaluate_(session) logger.info('Epoch #%d/%d: iteration #%d/%d: Global Avg Loss=%f, Eval Accuracy=%f', current_epoch, self._num_epochs, curr_iter, iterations, (avg_loss / curr_iter), eval_accuracy) if (curr_iter >= iterations): break eval_accuracy = self._evaluate_(session) logger.info('At the end of Epoch #%d/%d: Global Avg Loss=%f, Eval Accuracy=%f', current_epoch, self._num_epochs, (avg_loss / curr_iter), eval_accuracy)
class LeadingOrderDifferential(BaseLeadingOrderSurfaceForm): def __init__(self, param, domain, options=None): super().__init__(param, domain, options) def set_rhs(self, variables): domain = self.domain sum_a_j = variables[f'Sum of x-averaged {domain} electrode volumetric interfacial current densities [A.m-3]'] sum_a_j_av = variables[f'X-averaged {domain} electrode total volumetric interfacial current density [A.m-3]'] delta_phi = variables[f'X-averaged {domain} electrode surface potential difference [V]'] T = variables[f'X-averaged {domain} electrode temperature [K]'] C_dl = self.domain_param.C_dl(T) self.rhs[delta_phi] = ((1 / C_dl) * (sum_a_j_av - sum_a_j))
(frozen=True) class ExportedPickupDetails(): index: PickupIndex name: str description: str collection_text: list[str] conditional_resources: list[ConditionalResources] conversion: list[ResourceConversion] model: PickupModel original_model: PickupModel other_player: bool original_pickup: PickupEntry
class PyrockoRingfaultDelegate(SourceDelegate): __represents__ = 'PyrockoRingfaultSource' display_backend = 'pyrocko' display_name = 'Ringfault' parameters = ['store_dir', 'easting', 'northing', 'depth', 'diameter', 'strike', 'dip', 'magnitude', 'npointsources'] ro_parameters = [] class RingfaultDialog(PyrockoSourceDialog): def __init__(self, *args, **kwargs): PyrockoSourceDialog.__init__(self, *args, ui_file='pyrocko_ringfault.ui', **kwargs) class RingfaultROI(PointSourceROI): def __init__(self, *args, **kwargs): PointSourceROI.__init__(self, *args, **kwargs) self.addScaleRotateHandle([0.5, 1.0], [0.5, 0.5]) self.updateROIPosition() () def setSourceParametersFromROI(self): angle = self.angle() strike = (float((- angle)) % 360) (vec_x, vec_y) = self._vectorToCenter(strike) parameters = {'easting': float((self.pos().x() + vec_x)), 'northing': float((self.pos().y() + vec_y)), 'diameter': self.size().y(), 'strike': strike} self.newSourceParameters.emit(parameters) () def updateROIPosition(self): source = self.source self.setAngle((- source.strike), finish=False) self.setSize(source.diameter, finish=False) (vec_x, vec_y) = self._vectorToCenter(source.strike) self.setPos(QtCore.QPointF((source.easting - vec_x), (source.northing - vec_y)), finish=False) def _vectorToCenter(self, angle): rangle = (angle * d2r) sdx = (self.size().x() / 2) sdy = (self.size().y() / 2) return (((sdx * np.sin(rangle)) + (sdy * np.cos(rangle))), ((sdx * np.cos(rangle)) - (sdy * np.sin(rangle)))) EditDialog = RingfaultDialog ROIWidget = RingfaultROI def __init__(self, model, source, index): QtCore.QObject.__init__(self) self.source = source self.model = model self.index = index self.rois = [] self.editing_dialog = None if (model.selection_model is not None): self.setSelectionModel() self.model.selectionModelChanged.connect(self.setSelectionModel) def getRepresentedSource(sandbox): return PyrockoRingfaultSource(diameter=10000.0, store_dir=(getConfig().default_gf_dir or '')) def formatListItem(self): item = '\n<span style="font-weight: bold; font-style: oblique">\n {idx}. {delegate.display_name}\n <span style="color: #616161;">\n ({delegate.display_backend})\n </span>\n</span>\n<table style="color: #616161; font-size: small;">\n<tr>\n <td>Depth:</td><td>{source.depth:.2f} m</td>\n</tr><tr>\n <td>Diameter:</td><td>{source.diameter:.2f} m</td>\n</tr><tr>\n <td>Strike:</td><td>{source.strike:.2f}&deg;</td>\n</tr><tr>\n <td>Dip:</td><td>{source.dip:.2f}&deg;</td>\n</tr><tr style="font-weight: bold;">\n <td>M<sub>W</sub>:</td><td>{source.magnitude:.2f}</td>\n</tr>\n</table>\n' return item.format(idx=(self.index.row() + 1), delegate=self, source=self.source)
def test_multiple_services(): import threading as mt import queue q = queue.Queue() def _test_ms(i): cfg = config() try: helper_multiple_services(i) q.put(True) except rs.NotImplemented as ni: assert cfg.notimpl_warn_only, ('%s ' % ni) if cfg.notimpl_warn_only: print(('%s ' % ni)) q.put(True) except rs.SagaException as se: q.put(False) assert False, ('Unexpected exception: %s' % se) except: q.put(False) assert False, 'Unexpected exception' threads = list() for i in range(0, NUM_SERVICES): t = mt.Thread(target=_test_ms, args=[i]) t.start() threads.append(t) for t in threads: t.join() for t in threads: ret = q.get_nowait() assert (ret is True), 'test failed'
def construct_prompt_token(params, tokenizer: GPT2TokenizerFast, train_datasets: list, only_train_last: bool=False, max_len: int=1000): if only_train_last: print('Only set the last in-context example for training.') newline = tokenizer.encode('\n', add_special_tokens=False) newlines = tokenizer.encode('\n\n', add_special_tokens=False) all_prompt_data = list() param_unified = {'prompt_prefix': 'Answer the question.\n\n', 'q_prefix': '', 'a_prefix': 'Answer: '} (qa_data_num, cls_data_num) = (0, 0) for task_dict in tqdm(train_datasets): task_name = task_dict['task_name'] task_type = task_dict['task_type'] task_data = task_dict['task_data'] shuffle(task_data) param = (params[task_name] if (task_name in params.keys()) else param_unified) prompt_prefix = (tokenizer.encode(param['prompt_prefix'], add_special_tokens=False) if ('prompt_prefix' in param.keys()) else []) a_prefix = (tokenizer.encode(param['a_prefix'], add_special_tokens=False) if ('a_prefix' in param.keys()) else []) cur_prompt = [] cur_label_mask = [] cur_prompt += prompt_prefix cur_label_mask.append(([0] * len(prompt_prefix))) for example in task_data: text = example['text'].replace('[SEP]', '\t') target = example['target'] if ('label_word' in param.keys()): label_word = param['label_word'] target = label_word[target] text = (param['q_prefix'] + text) text_token_ids = tokenizer.encode(text, add_special_tokens=False) if (len(text_token_ids) > max_len): text_token_ids = text_token_ids[:(max_len - 100)] target_token_ids = tokenizer.encode(target, add_special_tokens=False) cur_sent_prompt = (((text_token_ids + newline) + a_prefix) + target_token_ids) cur_sent_label_mask = ([0] * ((len(text_token_ids) + len(newline)) + len(a_prefix))) cur_sent_label_mask += ([1] * len(target_token_ids)) if (len(cur_sent_prompt) > max_len): cur_sent_prompt = cur_sent_prompt[:(max_len - 20)] cur_sent_label_mask = cur_sent_label_mask[:(max_len - 20)] cur_sent_prompt += newlines cur_sent_label_mask += ([0] * len(newlines)) assert (len(cur_sent_prompt) == len(cur_sent_label_mask)) if ((len(cur_prompt) + len(cur_sent_prompt)) > max_len): final_label_mask = list() if (only_train_last and (len(cur_label_mask) > 1)): for lm in cur_label_mask[:(- 1)]: final_label_mask.extend(([0] * len(lm))) elif ((not only_train_last) and (len(cur_label_mask) > 1)): for lm in cur_label_mask[:(- 1)]: final_label_mask.extend(lm) final_label_mask.extend(cur_label_mask[(- 1)]) all_prompt_data.append({'token_ids': cur_prompt, 'label_masks': final_label_mask}) if (task_type == 'qa'): qa_data_num += 1 else: cls_data_num += 1 cur_prompt = [] cur_label_mask = [] cur_prompt += prompt_prefix cur_label_mask.append(([0] * len(prompt_prefix))) cur_prompt.extend(cur_sent_prompt) cur_label_mask.append(cur_sent_label_mask) print('cls data num: {}'.format(cls_data_num)) print('qa data num: {}'.format(qa_data_num)) return all_prompt_data
def XGBoost(filename, x_predict, model_name, xgb_outputname, set_now, game_name, change_side): data = pd.read_csv(filename) data = data[needed] data.dropna(inplace=True) data.reset_index(drop=True, inplace=True) data = data[(data.type != '')] data = data[(data.type != '')] data = data[(data.type != '')] data = data[(data.type != '')] label = [1, 2] model = joblib.load(model_name) prediction = model.predict(x_predict) result = pd.DataFrame([]) result['Real'] = list(data['hit_height']) result['Predict'] = prediction result.to_csv(xgb_outputname, index=None) cnt = 0 for i in range(len(result['Real'])): if (result['Real'][i] == result['Predict'][i]): cnt += 1 print(('XGBoost Total correct: ' + str(cnt))) print(('XGBoost Total number: ' + str(len(prediction)))) print(('XGBoost Accuracy: ' + str(accuracy_score(data['hit_height'], prediction)))) print(('XGBoost Overall precision: ' + str(precision_score(data['hit_height'], prediction, labels=label, average='micro')))) print(('XGBoost Overall recall: ' + str(recall_score(data['hit_height'], prediction, labels=label, average='micro'))))
def train(train_loader, model, criterion, optimizer, epoch, cfg, logger, writer): model.eval() batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() num_iter = len(train_loader) end = time.time() time1 = time.time() for (idx, (images, labels)) in enumerate(train_loader): images = images.cuda(non_blocking=True) labels = labels.cuda(non_blocking=True) bsz = labels.shape[0] logits = model(images) loss = criterion(logits, labels) (acc1, acc5) = accuracy(logits, labels, topk=(1, 5)) losses.update(loss.item(), bsz) top1.update(acc1.item(), bsz) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update((time.time() - end)) end = time.time() if ((((idx + 1) % cfg.log_interval) == 0) and (logger is not None)): lr = optimizer.param_groups[0]['lr'] logger.info(f'Epoch [{epoch}][{(idx + 1)}/{num_iter}] - batch_time: {batch_time.avg:.3f}, lr: {lr:.5f}, loss: {losses.avg:.3f}, : {top1.avg:.3f}') time2 = time.time() epoch_time = format_time((time2 - time1)) if (logger is not None): logger.info(f'Epoch [{epoch}] - epoch_time: {epoch_time}, train_loss: {losses.avg:.3f}, train_: {top1.avg:.3f}') if (writer is not None): lr = optimizer.param_groups[0]['lr'] writer.add_scalar('Linear/lr', lr, epoch) writer.add_scalar('Linear/train/loss', losses.avg, epoch) writer.add_scalar('Linear/train/acc', top1.avg, epoch) return (losses.avg, top1.avg)
class PutObjectAction(BaseAction): valid_actions = {'PutObject', 'OpenObject', 'CloseObject'} def get_reward(self, state, prev_state, expert_plan, goal_idx, low_idx=None): if (low_idx is None): subgoal = expert_plan[goal_idx]['planner_action'] else: subgoal = expert_plan[goal_idx]['planner_action']['parameter'][low_idx] (reward, done) = (self.rewards['neutral'], False) target_object_id = subgoal['objectId'] if ('receptacleObjectId' not in subgoal): print(subgoal) recep_object = get_object(subgoal['receptacleObjectId'], state.metadata) if (recep_object is not None): is_target_in_recep = (target_object_id in recep_object['receptacleObjectIds']) (reward, done) = ((self.rewards['positive'], True) if is_target_in_recep else (self.rewards['negative'], False)) return (reward, done)
def return_canoncailised_smiles_str(molecule, remove_am=True, allHsExplicit=False, kekuleSmiles=True) -> str: mol_copy = Chem.RWMol(molecule) if remove_am: for atom in mol_copy.GetAtoms(): atom.ClearProp('molAtomMapNumber') smiles = Chem.MolToSmiles(mol_copy, allHsExplicit=allHsExplicit, kekuleSmiles=kekuleSmiles, canonical=True) return smiles
def usage(): print('Usage:', os.path.basename(sys.argv[0]), '[options] file') print('Options:') print(' -d, --dcalls Apply clause D calls') print(' -e, --enum=<string> How many solutions to compute') print(' Available values: [1 .. all] (default: 1)') print(' -h, --help') print(' -s, --solver SAT solver to use') print(' Available values: g3, g4, lgl, mcb, mcm, mpl, m22, mc, mgh (default = m22)') print(' -v, --verbose Be verbose')
class TED_eval(Dataset): def __init__(self, transform=None): self.ds_path = './datasets/ted/test/' self.videos = os.listdir(self.ds_path) self.transform = transform def __getitem__(self, idx): vid_name = self.videos[idx] video_path = os.path.join(self.ds_path, vid_name) frames_paths = sorted(glob.glob((video_path + '/*.png'))) vid_target = [self.transform(Image.open(p).convert('RGB')) for p in frames_paths] return (vid_name, vid_target) def __len__(self): return len(self.videos)
_db def test_query_events_image(rf, graphql_client, conference_factory, event_factory): now = timezone.now() request = rf.get('/') conference = conference_factory(start=now, end=(now + timezone.timedelta(days=3))) event = event_factory(conference=conference, latitude=1, longitude=1) resp = graphql_client.query('query($code: String!) {\n conference(code: $code) {\n events {\n image\n }\n }\n }', variables={'code': conference.code}) assert (not resp.get('errors')) assert (len(resp['data']['conference']['events']) == 1) events = resp['data']['conference']['events'] (events[0]['image'] == get_image_url_from_request(request, event.image))
class CnfWrapper(): def __init__(self, grammar): super(CnfWrapper, self).__init__() self.grammar = grammar self.rules = grammar.rules self.terminal_rules = defaultdict(list) self.nonterminal_rules = defaultdict(list) for r in self.rules: assert isinstance(r.lhs, NT), r if (len(r.rhs) not in [1, 2]): raise ParseError("CYK doesn't support empty rules") if ((len(r.rhs) == 1) and isinstance(r.rhs[0], T)): self.terminal_rules[r.rhs[0]].append(r) elif ((len(r.rhs) == 2) and all((isinstance(x, NT) for x in r.rhs))): self.nonterminal_rules[tuple(r.rhs)].append(r) else: assert False, r def __eq__(self, other): return (self.grammar == other.grammar) def __repr__(self): return repr(self.grammar)
def test_load_initial_conftest_last_ordering(_config_for_test): pm = _config_for_test.pluginmanager class My(): def pytest_load_initial_conftests(self): pass m = My() pm.register(m) hc = pm.hook.pytest_load_initial_conftests hookimpls = [(hookimpl.function.__module__, ('wrapper' if (hookimpl.wrapper or hookimpl.hookwrapper) else 'nonwrapper')) for hookimpl in hc.get_hookimpls()] assert (hookimpls == [('_pytest.config', 'nonwrapper'), (m.__module__, 'nonwrapper'), ('_pytest.legacypath', 'nonwrapper'), ('_pytest.python_path', 'nonwrapper'), ('_pytest.capture', 'wrapper'), ('_pytest.warnings', 'wrapper')])
class UfsFileSystem(MountFileSystem): type = 'ufs' aliases = ['4.2bsd', 'ufs2', 'ufs 2'] _mount_opts = 'ufstype=ufs2' def detect(cls, source, description): res = super().detect(source, description) if (('BSD' in description) and ('4.2BSD' not in description) and ('UFS' not in description)): res.update({cls: (- 20), VolumeSystemFileSystem: 20}) return res
def optimize(instance, max_time=10000, time_limit=100, threads=1): model = CpoModel('BlockingJobShop') interval_vars = dict() for task in instance.tasks: interval_vars[task] = interval_var(start=(0, max_time), end=(0, max_time), size=(task.length, max_time), name=('interval' + str(task.name))) for task in instance.tasks: if task.next_task: model.add((start_of(interval_vars[task.next_task]) == end_of(interval_vars[task]))) for machine in instance.machines: machine_sequence = sequence_var([interval_vars[task] for task in machine.tasks]) model.add(no_overlap(machine_sequence)) obj_var = integer_var(0, max_time, 'makespan') for task in instance.tasks: model.add((obj_var >= end_of(interval_vars[task]))) model.minimize(obj_var) sol = model.solve(TimeLimit=time_limit, Workers=threads) solution = Solution(instance) for job in instance.jobs: for task in job.tasks: print(task.name) start = sol.get_value(interval_vars[task])[0] end = sol.get_value(interval_vars[task])[1] print(('Start: %f' % start)) print(('End: %f' % end)) solution.add(task, start, end) return solution
class CbamModule(nn.Module): def __init__(self, channels, rd_ratio=(1.0 / 16), rd_channels=None, rd_divisor=1, spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(CbamModule, self).__init__() self.channel = ChannelAttn(channels, rd_ratio=rd_ratio, rd_channels=rd_channels, rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer) def forward(self, x): x = self.channel(x) x = self.spatial(x) return x
def bootstrap_stderr(f, xs, iters): import multiprocessing as mp pool = mp.Pool(mp.cpu_count()) res = [] chunk_size = min(1000, iters) from tqdm import tqdm print('bootstrapping for stddev:', f.__name__) for bootstrap in tqdm(pool.imap(_bootstrap_internal(f, chunk_size), [(i, xs) for i in range((iters // chunk_size))]), total=(iters // chunk_size)): res.extend(bootstrap) pool.close() return sample_stddev(res)
class InMemoryLoggerTest(unittest.TestCase): def test_in_memory_log(self) -> None: logger = InMemoryLogger() logger.log(name='metric1', data=123.0, step=0) logger.log(name='metric1', data=456.0, step=1) logger.log(name='metric1', data=789.0, step=2) with captured_output() as (out, err): logger.flush() out = cast(StringIO, out) err = cast(StringIO, err) self.assertTrue(out.getvalue().startswith('OrderedDict([')) self.assertEqual(err.getvalue(), '') logger.log_dict(payload={'metric2': 1.0, 'metric3': 2.0}, step=3) buf = logger.log_buffer self.assertEqual(len(buf), 4) self.assertEqual(buf[0]['metric1'], 123.0) self.assertEqual(buf[0]['step'], 0) self.assertEqual(buf[1]['metric1'], 456.0) self.assertEqual(buf[1]['step'], 1) self.assertEqual(buf[2]['metric1'], 789.0) self.assertEqual(buf[2]['step'], 2) self.assertEqual(buf[3]['metric2'], 1.0) self.assertEqual(buf[3]['metric3'], 2.0) self.assertEqual(buf[3]['step'], 3) with captured_output() as (out, err): logger.flush() out = cast(StringIO, out) err = cast(StringIO, err) self.assertTrue(out.getvalue().startswith('OrderedDict([')) self.assertEqual(err.getvalue(), '') logger.close() self.assertEqual(logger.log_buffer, OrderedDict([]))
def get_cls_loss(pred, label, select): if (not bool(select.numel())): return 0 try: pred = torch.index_select(pred, 0, select) label = torch.index_select(label, 0, select) out = F.nll_loss(pred, label) except: print('error:\n', pred, label, pred.size(), label.size()) out = 0 return out
def test_compatible_with_numpy_configuration(tmp_path): files = ['dir1/__init__.py', 'dir2/__init__.py', 'file.py'] _populate_project_dir(tmp_path, files, {}) dist = Distribution({}) dist.configuration = object() dist.set_defaults() assert (dist.py_modules is None) assert (dist.packages is None)
.memory def test_being_calc_next_time(): _takes_time.clear_cache() _being_calc_next_time(0.13, 0.02) sleep(1.1) res_queue = queue.Queue() thread1 = threading.Thread(target=_calls_being_calc_next_time, kwargs={'res_queue': res_queue}, daemon=True) thread2 = threading.Thread(target=_calls_being_calc_next_time, kwargs={'res_queue': res_queue}, daemon=True) thread1.start() sleep(0.5) thread2.start() thread1.join(timeout=3) thread2.join(timeout=3) assert (res_queue.qsize() == 2) res1 = res_queue.get() res2 = res_queue.get() assert (res1 == res2)
_fixtures(SqlAlchemyFixture) def demo_setup(sql_alchemy_fixture): sql_alchemy_fixture.commit = True Address(email_address='', name='Friend1').save() Address(email_address='', name='Friend2').save() Address(email_address='', name='Friend3').save() Address(email_address='', name='Friend4').save()
def test_multi_hook(): r2p = r2pipe.open('test/tests/multibranch', flags=['-2']) r2p.cmd('s sym.check; aei; aeim; aer rdi=22021') esilsolver = ESILSolver(r2p, debug=False, trace=False) state = esilsolver.init_state() state.set_symbolic_register('rdi') rdi = state.registers['rdi'] state.solver.add((rdi >= 0)) def success(state): sat = state.solver.check() m = state.solver.model() print(('ARG1: %d' % m.eval(rdi).as_long())) return True esilsolver.register_hook(1697, success) esilsolver.run(target=1697, avoid=[1704])
class KernelNet(nn.Module): def __init__(self, in_channels, init_n_kernels, out_channels, depth, n_nodes, channel_change): super().__init__() c0 = c1 = (n_nodes * init_n_kernels) c_node = init_n_kernels self.stem0 = ConvOps(in_channels, c0, kernel_size=1, ops_order='weight_norm') self.stem1 = ConvOps(in_channels, c1, kernel_size=3, stride=2, ops_order='weight_norm') assert (depth >= 2), 'depth must >= 2' self.down_cells = nn.ModuleList() self.up_cells = nn.ModuleList() down_channels = [c0, c1] for i in range(depth): c_node = ((2 * c_node) if channel_change else c_node) down_cell = Cell(n_nodes, c0, c1, c_node) self.down_cells += [down_cell] (c0, c1) = (c1, down_cell.out_channels) down_channels.append(c1) down_channels.pop() for i in range((depth + 1)): c0 = down_channels.pop() up_cell = Cell(n_nodes, c0, c1, c_node, downward=False) self.up_cells += [up_cell] c1 = up_cell.out_channels c_node = ((c_node // 2) if channel_change else c_node) self.last_conv = nn.Sequential(ConvOps(c1, out_channels, kernel_size=1, dropout_rate=0.1, ops_order='weight'), nn.Sigmoid()) def forward(self, x, alpha1_down, alpha1_up, alpha2_down, alpha2_up): (s0, s1) = (self.stem0(x), self.stem1(x)) down_outputs = [s0, s1] for (i, cell) in enumerate(self.down_cells): (s0, s1) = (s1, cell(s0, s1, alpha1_down, alpha2_down)) down_outputs.append(s1) if FLAG_DEBUG: print('x.shape = ', x.shape) for i in down_outputs: print(i.shape) down_outputs.pop() for (i, cell) in enumerate(self.up_cells): s0 = down_outputs.pop() s1 = cell(s0, s1, alpha1_up, alpha2_up) if FLAG_DEBUG: print(s1.shape) return self.last_conv(s1)
class ModelArguments(): model_name_or_path: Optional[str] = field(default=None, metadata={'help': "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."}) model_type: Optional[str] = field(default=None, metadata={'help': ('If training from scratch, pass a model type from the list: ' + ', '.join(MODEL_TYPES))}) config_overrides: Optional[str] = field(default=None, metadata={'help': 'Override some existing default config settings when a model is trained from scratch. Example: n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}) use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'}) def __post_init__(self): if ((self.config_overrides is not None) and ((self.config_name is not None) or (self.model_name_or_path is not None))): raise ValueError("--config_overrides can't be used in combination with --config_name or --model_name_or_path")
def collect_raw_osm_stats(rulename='download_osm_data', metric_crs='EPSG:3857'): snakemake = _mock_snakemake('download_osm_data') options_raw = dict(snakemake.output) options_raw.pop('generators_csv') df_raw_osm_stats = collect_osm_stats(rulename, only_basic=True, metric_crs=metric_crs, **options_raw) add_computational_stats(df_raw_osm_stats, snakemake) return df_raw_osm_stats
class RoIAlignRotated(nn.Module): def __init__(self, out_size, spatial_scale, sample_num=0, aligned=True, clockwise=False): super(RoIAlignRotated, self).__init__() self.out_size = out_size self.spatial_scale = float(spatial_scale) self.sample_num = int(sample_num) self.aligned = aligned self.clockwise = clockwise def forward(self, features, rois): return RoIAlignRotatedFunction.apply(features, rois, self.out_size, self.spatial_scale, self.sample_num, self.aligned, self.clockwise)
def show_results(experiment): results = experiment['results'] labels = experiment['labels'] for (i, (dataset, d_value)) in enumerate(results.items()): f = plt.figure(figsize=(16.0, 10.0)) plt.suptitle(dataset, fontsize=20, fontweight='bold') girds = {6: (2, 3), 12: (3, 4)}[len(results[dataset])] for (idx, (n, n_value)) in enumerate(results[dataset].items()): plt.subplot(girds[0], girds[1], (idx + 1)) plt.pie(n_value, explode=(0.05, 0), labels=labels, colors=['coral', 'skyblue'], autopct='%.1f%%', shadow=False, startangle=90, textprops={'fontsize': 12, 'color': 'k'}) plt.title(n, fontsize=14, fontweight='bold') plt.show()
def inference(args): audio_path = args.audio_path output_midi_path = args.output_midi_path device = ('cuda' if (args.cuda and torch.cuda.is_available()) else 'cpu') (audio, _) = load_audio(audio_path, sr=sample_rate, mono=True) transcriptor = PianoTranscription(device=device, checkpoint_path=None) transcribe_time = time.time() transcribed_dict = transcriptor.transcribe(audio, output_midi_path) print('Transcribe time: {:.3f} s'.format((time.time() - transcribe_time)))