code
stringlengths
281
23.7M
class AsyncRelationalParent(models.Model): done = models.BooleanField(default=True) many_to_many = models.ManyToManyField(AsyncRelationalChild, related_name='many_to_many') one_to_one = models.OneToOneField(AsyncRelationalChild, related_name='one_to_one', on_delete=models.SET_NULL, null=True)
class WholePQPixelCNN(nn.Module): def __init__(self, m, k, channel, withGroup, withAtt, target, alias, ema): super().__init__() self._levels = len(k) self._compressor = PQCompressorBig(m, k, channel, withGroup, withAtt, False, alias, ema) self._cLoss = nn.CrossEntropyLoss() self._pixelCNN = nn.ModuleList((PixelCNN(m, ki, channel) for ki in k)) def test(self, image): (restored, allCodes, allHards) = self._compressor.test(image) for (i, (pixelCNN, hard, code)) in enumerate(zip(self._pixelCNN, allHards, allCodes)): logits = pixelCNN(hard) correct = (logits.argmax(1) == code) code[correct] = (- 1) code += 1 return (restored, allCodes) def forward(self, image, temp, **_): with torch.no_grad(): (allZs, allHards, allCodes, allResiduals) = self._compressor.getLatents(image) predictLoss = list() ratios = list() for (i, (pixelCNN, hard, code)) in enumerate(zip(self._pixelCNN, allHards, allCodes)): (n, m, c, h, w) = hard.shape logits = pixelCNN(hard.reshape(n, (m * c), h, w)) dLoss = self._cLoss(logits, code) predictLoss.append(dLoss) ratios.append((logits.argmax(1) == code).float().mean()) return (sum(predictLoss), (sum(ratios) / len(ratios)))
def get_faces_in_selection_bounds(bm): faces = [f for f in bm.faces if f.select] normal = faces[0].normal.copy() (L, R) = (normal.cross(VEC_UP), normal.cross(VEC_DOWN)) faces = sort_faces(faces, R) (start, finish) = (faces[0].calc_center_median(), faces[(- 1)].calc_center_median()) faces_left = filter((lambda f: (L.dot(f.calc_center_median()) < L.dot(start))), bm.faces) faces_mid = filter((lambda f: (R.dot(f.calc_center_median()) < R.dot(finish))), faces_left) valid_normals = [normal.to_tuple(2), L.to_tuple(2), R.to_tuple(2), VEC_UP.to_tuple(2), VEC_DOWN.to_tuple(2)] faces_correct_normal = filter((lambda f: (f.normal.to_tuple(2) in valid_normals)), faces_mid) def calc_face_bounds_dist(f): vts = sort_verts(f.verts, R) return (vts[0].co - vts[(- 1)].co).length bounds_distance = (start - finish).length faces_within_distance = filter((lambda f: (calc_face_bounds_dist(f) < bounds_distance)), faces_correct_normal) select(faces, False) return (list(faces_within_distance) + faces)
class VisualGenomeCaptions(): def __init__(self, ann_dir): super().__init__() escapes = ''.join([chr(char) for char in range(0, 32)]) self.translator = str.maketrans('', '', escapes) self.caps = self.parse_annotations(Path(ann_dir)) def combination(l1, l2): return [' '.join(x) for x in itertools.product(l1, l2)] def process_word(self, s): return s.lower().strip().translate(self.translator) def process_synset(self, s): return s.lower().strip().translate(self.translator).split('.')[0] def parse_annotations(self, ann_dir): print('loading object attributes...') objs = {} with open((ann_dir / 'attributes.json'), 'r') as f: attributes = json.load(f) for x in tqdm(attributes, dynamic_ncols=True): for a in x['attributes']: _names = set((self.process_synset(y) for y in a.get('synsets', list()))) _attrs = set((self.process_word(y) for y in a.get('attributes', list()))) for n in _names: try: objs[n] |= _attrs except KeyError: objs[n] = _attrs del attributes print('loading object relationships...') rels = set() with open((ann_dir / 'relationships.json'), 'r') as f: relationships = json.load(f) for x in tqdm(relationships, dynamic_ncols=True): for r in x['relationships']: _pred = self.process_word(r['predicate']) _subj = set((self.process_synset(y) for y in r['subject']['synsets'])) _obj = set((self.process_synset(y) for y in r['object']['synsets'])) for s in _subj: for o in _obj: rels.add(f'{s}<sep>{_pred}<sep>{o}') del relationships print('parsing object attributes...') caps_obj = [] for o in tqdm(objs.keys()): for a in objs[o]: if (a != ''): caps_obj.append(f'{a} {o}') print('parsing object relationships...') caps_rel = [] for r in tqdm(rels): (s, p, o) = r.split('<sep>') caps_rel.append(f'{s} {p} {o}') caps = np.unique((caps_obj + caps_rel)).tolist() return caps
class InliningTracer(torch.fx.Tracer): FNS_TO_INLINE = [add_lowp] def create_node(self, kind, target, args, kwargs, name=None, type_expr=None): if ((kind == 'call_function') and (target in self.FNS_TO_INLINE)): tracer = torch.fx.proxy.GraphAppendingTracer(self.graph) proxy_args = torch.fx.node.map_arg(args, (lambda x: torch.fx.Proxy(x, tracer))) proxy_kwargs = torch.fx.node.map_arg(kwargs, (lambda x: torch.fx.Proxy(x, tracer))) return target(*proxy_args, **proxy_kwargs).node else: return super().create_node(kind, target, args, kwargs, name, type_expr)
def get_mypyc_attrs(stmt: (ClassDef | Decorator)) -> dict[(str, Any)]: attrs: dict[(str, Any)] = {} for dec in stmt.decorators: d = get_mypyc_attr_call(dec) if d: for (name, arg) in zip(d.arg_names, d.args): if (name is None): if isinstance(arg, StrExpr): attrs[arg.value] = True else: attrs[name] = get_mypyc_attr_literal(arg) return attrs
class ParsedItem(dict): def __init__(self, json_object, name, required, level): super(ParsedItem, self).__init__() self['name'] = name self['title'] = json_object.get('title', '') self['type'] = json_object.get('type') self['description'] = json_object.get('description', '') self['level'] = level self['required'] = required self['x-reference'] = json_object.get('x-reference', '') self['x-example'] = json_object.get('x-example', '') self['pattern'] = json_object.get('pattern', '') self['enum'] = json_object.get('enum', '')
class TestProcedure(Procedure): iterations = IntegerParameter('Loop Iterations', default=100) delay = FloatParameter('Delay Time', units='s', default=0.2) seed = Parameter('Random Seed', default='12345') DATA_COLUMNS = ['Iteration', 'Random Number'] def startup(self): log.info('Setting up random number generator') random.seed(self.seed) def execute(self): log.info('Starting to generate numbers') for i in range(self.iterations): data = {'Iteration': i, 'Random Number': random.random()} log.debug(('Produced numbers: %s' % data)) self.emit('results', data) self.emit('progress', ((100 * i) / self.iterations)) sleep(self.delay) if self.should_stop(): log.warning('Catch stop command in procedure') break def get_estimates(self, sequence_length=None, sequence=None): duration = (self.iterations * self.delay) estimates = list() estimates.append(('Duration', ('%d s' % int(duration)))) estimates.append(('Number of lines', ('%d' % int(self.iterations)))) estimates.append(('Sequence length', str(sequence_length))) estimates.append(('Measurement finished at', str((datetime.now() + timedelta(seconds=duration)))[:(- 7)])) estimates.append(('Sequence finished at', str((datetime.now() + timedelta(seconds=(duration * sequence_length))))[:(- 7)])) return estimates def shutdown(self): log.info('Finished')
class PrepareTFirstQuantizationWithProj(Bloq): num_bits_p: int num_bits_n: int eta: int num_bits_rot_aa: int = 8 adjoint: bool = False _property def signature(self) -> Signature: return Signature.build(w=2, w_mean=2, r=self.num_bits_n, s=self.num_bits_n) def build_call_graph(self, ssa: 'SympySymbolAllocator') -> Set['BloqCountT']: uni_prep_w = (Toffoli(), 13) ctrl_mom = (PreparePowerTwoStateWithProj(bitsize_n=self.num_bits_n, bitsize_p=self.num_bits_p, adjoint=self.adjoint), 2) if self.adjoint: k_k_proj = (Toffoli(), 0) else: k_k_proj = (Toffoli(), 16) ctrl_swap = (Toffoli(), 2) return {uni_prep_w, ctrl_mom, k_k_proj, ctrl_swap}
class Migration(migrations.Migration): dependencies = [('schedule', '0005_scheduleitem_highlight_color')] operations = [migrations.AlterField(model_name='scheduleitem', name='highlight_color', field=models.CharField(blank=True, choices=[('blue', 'blue'), ('yellow', 'yellow'), ('orange', 'orange'), ('cinderella', 'cinderella'), ('violet', 'violet'), ('green', 'green')], max_length=15, verbose_name='highlight color'))]
def parse_test_result(lines: LineStream) -> TestResult: consume_non_diagnostic(lines) if ((not lines) or (not parse_tap_header(lines))): return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines) expected_test_suite_num = parse_test_plan(lines) if (expected_test_suite_num == 0): return TestResult(TestStatus.NO_TESTS, [], lines) elif (expected_test_suite_num is None): return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines) test_suites = [] for i in range(1, (expected_test_suite_num + 1)): test_suite = parse_test_suite(lines, i) if test_suite: test_suites.append(test_suite) else: print_with_timestamp(((((red('[ERROR] ') + ' expected ') + str(expected_test_suite_num)) + ' test suites, but got ') + str((i - 2)))) break test_suite = parse_test_suite(lines, (- 1)) if test_suite: print_with_timestamp(((red('[ERROR] ') + 'got unexpected test suite: ') + test_suite.name)) if test_suites: return TestResult(bubble_up_suite_errors(test_suites), test_suites, lines) else: return TestResult(TestStatus.NO_TESTS, [], lines)
class IWICBitmapEncoder(com.pIUnknown): _methods_ = [('Initialize', com.STDMETHOD(IWICStream, WICBitmapEncoderCacheOption)), ('GetContainerFormat', com.STDMETHOD()), ('GetEncoderInfo', com.STDMETHOD()), ('SetColorContexts', com.STDMETHOD()), ('SetPalette', com.STDMETHOD()), ('SetThumbnail', com.STDMETHOD()), ('SetPreview', com.STDMETHOD()), ('CreateNewFrame', com.STDMETHOD(POINTER(IWICBitmapFrameEncode), POINTER(IPropertyBag2))), ('Commit', com.STDMETHOD()), ('GetMetadataQueryWriter', com.STDMETHOD())]
def range_len(slc): from pytensor.tensor import and_, gt, lt, switch (start, stop, step) = tuple((as_index_constant(a) for a in [slc.start, slc.stop, slc.step])) return switch(and_(gt(step, 0), lt(start, stop)), (1 + (((stop - 1) - start) // step)), switch(and_(lt(step, 0), gt(start, stop)), (1 + (((start - 1) - stop) // (- step))), ps.ScalarConstant(ps.int64, 0)))
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() norm_func = (ll.FrozenBatchNorm2d if config.MODEL.FIXNORM else ll.BatchNorm2d) self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = norm_func(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = norm_func(planes) self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False) self.bn3 = norm_func((planes * 4)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.out_channels = (planes * 4) def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
(eq=False, repr=False) class MemoryReceiveChannel(ReceiveChannel[ReceiveType], metaclass=NoPublicConstructor): _state: MemoryChannelState[ReceiveType] = attr.ib() _closed: bool = attr.ib(default=False) _tasks: set[trio._core._run.Task] = attr.ib(factory=set) def __attrs_post_init__(self) -> None: self._state.open_receive_channels += 1 def statistics(self) -> MemoryChannelStats: return self._state.statistics() def __repr__(self) -> str: return '<receive channel at {:#x}, using buffer at {:#x}>'.format(id(self), id(self._state)) _ki_protection def receive_nowait(self) -> ReceiveType: if self._closed: raise trio.ClosedResourceError if self._state.send_tasks: (task, value) = self._state.send_tasks.popitem(last=False) task.custom_sleep_data._tasks.remove(task) trio.lowlevel.reschedule(task) self._state.data.append(value) if self._state.data: return self._state.data.popleft() if (not self._state.open_send_channels): raise trio.EndOfChannel raise trio.WouldBlock _ki_protection async def receive(self) -> ReceiveType: (await trio.lowlevel.checkpoint_if_cancelled()) try: value = self.receive_nowait() except trio.WouldBlock: pass else: (await trio.lowlevel.cancel_shielded_checkpoint()) return value task = trio.lowlevel.current_task() self._tasks.add(task) self._state.receive_tasks[task] = None task.custom_sleep_data = self def abort_fn(_: RaiseCancelT) -> Abort: self._tasks.remove(task) del self._state.receive_tasks[task] return trio.lowlevel.Abort.SUCCEEDED return (await trio.lowlevel.wait_task_rescheduled(abort_fn)) _ki_protection def clone(self) -> MemoryReceiveChannel[ReceiveType]: if self._closed: raise trio.ClosedResourceError return MemoryReceiveChannel._create(self._state) def __enter__(self) -> Self: return self def __exit__(self, exc_type: (type[BaseException] | None), exc_value: (BaseException | None), traceback: (TracebackType | None)) -> None: self.close() _ki_protection def close(self) -> None: if self._closed: return self._closed = True for task in self._tasks: trio.lowlevel.reschedule(task, Error(trio.ClosedResourceError())) del self._state.receive_tasks[task] self._tasks.clear() self._state.open_receive_channels -= 1 if (self._state.open_receive_channels == 0): assert (not self._state.receive_tasks) for task in self._state.send_tasks: task.custom_sleep_data._tasks.remove(task) trio.lowlevel.reschedule(task, Error(trio.BrokenResourceError())) self._state.send_tasks.clear() self._state.data.clear() _ki_protection async def aclose(self) -> None: self.close() (await trio.lowlevel.checkpoint())
def call_tox(toxenv: str, *args: str, python: pathlib.Path=pathlib.Path(sys.executable), debug: bool=False) -> None: env = os.environ.copy() env['PYTHON'] = str(python) env['PATH'] = ((os.environ['PATH'] + os.pathsep) + str(python.parent)) if debug: env['PYINSTALLER_DEBUG'] = '1' subprocess.run([sys.executable, '-m', 'tox', '-vv', '-e', toxenv, *args], env=env, check=True)
def access_handler(args): combine_secret_key() selectors = get_selectors() try: db = get_db() spec = {'$or': [{s.enc_mongo: {'$exists': True}} for s in selectors]} printed_header = (not args.audit_trail) for (keys, dct, tuples) in decrypt_iterator(db.clients.find(spec), ('_id', 'hostname'), full_documents=args.full, selectors=selectors): if dct: if (not printed_header): print('Clients:\n') printed_header = True pprint.pprint({**keys, **dct}) log.info('Displayed encrypted data in document {} (host {})', keys['_id'], keys['hostname']) if args.audit_trail: if printed_header: print('') spec = {'key': {'$in': [s.enc_mongo for s in selectors]}} printed_header = False for (keys, dct, tuples) in decrypt_iterator(db.audit_trail.find(spec, sort=(('_id', DESCENDING),)), selectors=audit_trail_selectors, full_documents=True): if dct: if (not printed_header): print('Audit trail:\n') printed_header = True dct['key'] = next((s.plain_mongo for s in selectors if (s.enc_mongo == dct['key']))) pprint.pprint(dct) log.info('Displayed encrypted audit trail in document {} (host {})', dct['_id'], dct['hostname']) finally: delete_secret_key()
def cdelt_derivative(crval, cdelt, intype, outtype, linear=False, rest=None): if (intype == outtype): return cdelt elif (set((outtype, intype)) == set(('length', 'frequency'))): return (((- constants.c) / (crval ** 2)) * cdelt).to(PHYS_UNIT_DICT[outtype]) elif ((outtype in ('frequency', 'length')) and ('speed' in intype)): if linear: numer = (cdelt * rest.to(PHYS_UNIT_DICT[outtype], u.spectral())) denom = constants.c else: numer = ((cdelt * constants.c) * rest.to(PHYS_UNIT_DICT[outtype], u.spectral())) denom = ((constants.c + crval) * (((constants.c ** 2) - (crval ** 2)) ** 0.5)) if (outtype == 'frequency'): return ((- numer) / denom).to(PHYS_UNIT_DICT[outtype], u.spectral()) else: return (numer / denom).to(PHYS_UNIT_DICT[outtype], u.spectral()) elif (('speed' in outtype) and (intype in ('frequency', 'length'))): if linear: numer = (cdelt * constants.c) denom = rest.to(PHYS_UNIT_DICT[intype], u.spectral()) else: numer = ((((4 * constants.c) * crval) * (rest.to(crval.unit, u.spectral()) ** 2)) * cdelt) denom = (((crval ** 2) + (rest.to(crval.unit, u.spectral()) ** 2)) ** 2) if (intype == 'frequency'): return ((- numer) / denom).to(PHYS_UNIT_DICT[outtype], u.spectral()) else: return (numer / denom).to(PHYS_UNIT_DICT[outtype], u.spectral()) elif (intype == 'air wavelength'): raise TypeError('Air wavelength should be converted to vacuum earlier.') elif (outtype == 'air wavelength'): raise TypeError('Conversion to air wavelength not supported.') else: raise ValueError('Invalid in/out frames')
def compute_validation_result(args, best_metric, epoch): tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL) logging.getLogger().setLevel(logging.INFO) filter_keras_warnings() tf.compat.v1.disable_eager_execution() evaluation = create_evaluation(args, best_metric) return ValidationResult(evaluation, epoch)
class TestElementBase(unittest.TestCase): def test_remove_csrf_checks(self): token = 'token' e = pywebcopy.parsers.ElementBase('link') e.set('href', '#') e.set('crossorigin', token) self.assertEqual(e.attrib.get('crossorigin'), token) e.remove_csrf_checks() self.assertEqual(e.attrib.get('crossorigin'), None) def test_replace_url_in_attrib(self): e = pywebcopy.parsers.ElementBase('link') url = '#' e.set('href', url) e.set('crossorigin', 'hash') e.replace_url(url, 'new', 'href', 0) self.assertEqual(e.attrib.get('href'), 'new') self.assertEqual(e.attrib.get('crossorigin'), None) def test_replace_url_in_text(self): e = pywebcopy.parsers.ElementBase('style') url = '"#"' e.text = ('html {background: url(%s);}' % url) e.replace_url(url, '"new"', None, 22) self.assertEqual(e.text, 'html {background: url("new");}') def test_replace_url_in_style_tag(self): e = pywebcopy.parsers.ElementBase('style') e.text = "\n -face {\n font-family:'fontawesome';\n src:url('../lib/fonts/fontawesome.eot?#iefix') format('embedded-opentype'),\n url('../lib/fonts/fontawesome.woff?') format('woff'),\n url('../lib/fonts/fontawesome.ttf?') format('truetype'),\n url('../lib/fonts/fontawesome.svg?#fontawesome') format('svg');\n font-style:normal;\n }\n " e.replace_url('../lib/fonts/fontawesome.svg?#fontawesome', '#', None, 305) self.assertEqual(e.text, "\n -face {\n font-family:'fontawesome';\n src:url('../lib/fonts/fontawesome.eot?#iefix') format('embedded-opentype'),\n url('../lib/fonts/fontawesome.woff?') format('woff'),\n url('../lib/fonts/fontawesome.ttf?') format('truetype'),\n url('#') format('svg');\n font-style:normal;\n }\n ")
.tf2 class TransformerQuantizationAcceptanceTests(unittest.TestCase): def test_hf_bert_with_tokenizer(self): tf.compat.v1.reset_default_graph() tokenizer = BertTokenizer.from_pretrained('./data/huggingface/bert-base-uncased') configuration = BertConfig(num_hidden_layers=1) model = TFBertModel(configuration) input_ids = tf.keras.Input([512], dtype=tf.int32, name='input_ids') encoded = transformers.BatchEncoding({'input_ids': input_ids}) outputs = model(encoded) sess = tf.compat.v1.Session() initialize_uninitialized_vars(sess) sim = QuantizationSimModel(sess, [input_ids.op.name], [outputs['pooler_output'].op.name], use_cuda=False) def dummy_forward_pass(sess, args): model_output = sess.graph.get_tensor_by_name('tf_bert_model/bert/pooler/dense/Tanh_quantized:0') encoded_value = tokenizer('Hello World', return_tensors='np', padding='max_length') model_inputs = {sess.graph.get_tensor_by_name('input_ids:0'): encoded_value['input_ids']} sess.run(model_output, model_inputs) sim.compute_encodings(dummy_forward_pass, None) quant_ops = {op.name: op for op in sim.session.graph.get_operations() if (op.type == 'QcQuantize')} quant_ops_to_check = [] embedding_path = 'tf_bert_model/bert/embeddings' embedding_add_1_quant = ('{}/add/add_1_quantized'.format(embedding_path), True) embedding_add_quant = ('{}/add/add_quantized'.format(embedding_path), True) embedding_token_quant = ('{}/Gather_2_quantized'.format(embedding_path), True) embedding_position_quant = ('{}/Identity_1_quantized'.format(embedding_path), True) embedding_word_quant = ('{}/Gather_quantized'.format(embedding_path), True) quant_ops_to_check += [embedding_add_1_quant, embedding_add_quant, embedding_word_quant, embedding_token_quant, embedding_position_quant] layernorm_paths = ['tf_bert_model/bert/embeddings', 'tf_bert_model/bert/encoder/layer_._0/attention/output', 'tf_bert_model/bert/encoder/layer_._0/output'] for layernorm_path in layernorm_paths: output_quant_op = ('{}/LayerNorm/batchnorm/add_1_quantized'.format(layernorm_path), True) beta_quant_op = ('{}/LayerNorm/batchnorm/ReadVariableOp_quantized'.format(layernorm_path), False) gamma_quant_op = ('{}/LayerNorm/batchnorm/mul/ReadVariableOp_quantized'.format(layernorm_path), True) quant_ops_to_check += [output_quant_op, beta_quant_op, gamma_quant_op] gelu_path = 'tf_bert_model/bert/encoder/layer_._0/intermediate' output_quant_op = ('{}/Gelu/mul_1_quantized'.format(gelu_path), True) quant_ops_to_check += [output_quant_op] self_attention_path = 'tf_bert_model/bert/encoder/layer_._0/attention/self' for dense_type in ['query', 'key', 'value']: output_quant_op = ('{}/{}/BiasAdd_quantized'.format(self_attention_path, dense_type), True) parameter_quant_op = ('{}/{}/Tensordot/ReadVariableOp_quantized'.format(self_attention_path, dense_type), True) bias_quant_op = ('{}/{}/BiasAdd/ReadVariableOp_quantized'.format(self_attention_path, dense_type), False) quant_ops_to_check += [output_quant_op, parameter_quant_op, bias_quant_op] for (quant_op_name, enabled) in quant_ops_to_check: quant_op = quant_ops.pop(quant_op_name) self.assertTrue(quant_op) self.assertTrue(check_encoding(quant_op, sim, enabled)) for quant_op_name in quant_ops.keys(): self.assertTrue(all(((x not in quant_op_name) for x in ['LayerNorm', 'Gelu', 'query', 'key', 'value']))) del sim sess.close() def test_hf_distilbert_with_tokenizer(self): tf.compat.v1.reset_default_graph() tokenizer = DistilBertTokenizer.from_pretrained('./data/huggingface/distilbert-base-uncased') configuration = DistilBertConfig(n_layers=1) model = TFDistilBertModel(configuration) input_ids = tf.keras.Input([512], dtype=tf.int32, name='input_ids') encoded = transformers.BatchEncoding({'input_ids': input_ids}) outputs = model(encoded) sess = tf.compat.v1.Session() initialize_uninitialized_vars(sess) sim = QuantizationSimModel(sess, [input_ids.op.name], [outputs['last_hidden_state'].op.name], use_cuda=False) def dummy_forward_pass(sess, args): model_output = sess.graph.get_tensor_by_name((outputs['last_hidden_state'].op.name + '_quantized:0')) encoded_value = tokenizer('Hello World', return_tensors='np', padding='max_length') model_inputs = {sess.graph.get_tensor_by_name((input_ids.op.name + ':0')): encoded_value['input_ids']} sess.run(model_output, model_inputs) sim.compute_encodings(dummy_forward_pass, None) quant_ops = {op.name: op for op in sim.session.graph.get_operations() if (op.type == 'QcQuantize')} quant_ops_to_check = [] layernorm_paths = ['tf_distil_bert_model/distilbert/embeddings/LayerNorm', 'tf_distil_bert_model/distilbert/transformer/layer_._0/sa_layer_norm', 'tf_distil_bert_model/distilbert/transformer/layer_._0/output_layer_norm'] for layernorm_path in layernorm_paths: output_quant_op = ('{}/batchnorm/add_1_quantized'.format(layernorm_path), True) beta_quant_op = ('{}/batchnorm/ReadVariableOp_quantized'.format(layernorm_path), False) gamma_quant_op = ('{}/batchnorm/mul/ReadVariableOp_quantized'.format(layernorm_path), True) quant_ops_to_check += [output_quant_op, beta_quant_op, gamma_quant_op] gelu_path = 'tf_distil_bert_model/distilbert/transformer/layer_._0/ffn/Gelu' output_quant_op = ('{}/mul_1_quantized'.format(gelu_path), True) quant_ops_to_check += [output_quant_op] self_attention_path = 'tf_distil_bert_model/distilbert/transformer/layer_._0/attention' for dense_type in ['q_lin', 'k_lin', 'v_lin']: output_quant_op = ('{}/{}/BiasAdd_quantized'.format(self_attention_path, dense_type), True) parameter_quant_op = ('{}/{}/Tensordot/ReadVariableOp_quantized'.format(self_attention_path, dense_type), True) bias_quant_op = ('{}/{}/BiasAdd/ReadVariableOp_quantized'.format(self_attention_path, dense_type), False) quant_ops_to_check += [output_quant_op, parameter_quant_op, bias_quant_op] for (quant_op_name, enabled) in quant_ops_to_check: quant_op = quant_ops.pop(quant_op_name) self.assertTrue(quant_op) self.assertTrue(check_encoding(quant_op, sim, enabled)) for quant_op_name in quant_ops.keys(): self.assertTrue(all(((x not in quant_op_name) for x in ['layer_norm', 'Gelu', 'q_lin', 'k_lin', 'v_lin']))) del sim sess.close()
class ViTFeatureExtractionTester(unittest.TestCase): def __init__(self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=18, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_feat_extract_dict(self): return {'image_mean': self.image_mean, 'image_std': self.image_std, 'do_normalize': self.do_normalize, 'do_resize': self.do_resize, 'size': self.size}
class CmdUnconnectedHelp(MuxCommand): key = 'help' aliases = ['h', '?'] locks = 'cmd:all()' def func(self): string = '\nYou are not yet logged into the game. Commands available at this point:\n |wcreate, connect, look, help, quit|n\n\nTo login to the system, you need to do one of the following:\n\n|w1)|n If you have no previous account, you need to use the \'create\'\n command like this:\n\n |wcreate "Anna the Barbarian" c67jHL8p|n\n\n It\'s always a good idea (not only here, but everywhere on the net)\n to not use a regular word for your password. Make it longer than\n 3 characters (ideally 6 or more) and mix numbers and capitalization\n into it.\n\n|w2)|n If you have an account already, either because you just created\n one in |w1)|n above or you are returning, use the \'connect\' command:\n\n |wconnect c67jHL8p|n\n\n This should log you in. Run |whelp|n again once you\'re logged in\n to get more aid. Hope you enjoy your stay!\n\nYou can use the |wlook|n command if you want to see the connect screen again.\n' self.caller.msg(string)
class PreReleaseFilter(FilterReleasePlugin): name = 'prerelease_release' PRERELEASE_PATTERNS = ('.+rc\\d+$', '.+a(lpha)?\\d+$', '.+b(eta)?\\d+$', '.+dev\\d+$') patterns: list[Pattern] = [] package_names: list[str] = [] def initialize_plugin(self) -> None: if (not self.patterns): self.patterns = [re.compile(pattern_string) for pattern_string in self.PRERELEASE_PATTERNS] logger.info(f'Initialized prerelease plugin with {self.patterns}') if (not self.package_names): try: lines = self.configuration['filter_prerelease']['packages'] self.package_names = [canonicalize_name(package_line.strip()) for package_line in lines.split('\n') if package_line.strip()] except KeyError: pass logger.info((f'Initialized prerelease plugin {self.name}, filtering ' + f"{(self.package_names if self.package_names else 'all packages')}")) def filter(self, metadata: dict) -> bool: name = metadata['info']['name'] version = metadata['version'] if (self.package_names and (name not in self.package_names)): return True return (not any((pattern.match(version) for pattern in self.patterns)))
def get_example_reana_yaml_file_path(example, workflow_engine, compute_backend): reana_yaml_filename = EXAMPLE_NON_STANDARD_REANA_YAML_FILENAME.get(example, {}).get(workflow_engine, {}).get(compute_backend, {}) if (not reana_yaml_filename): reana_yaml_filename = 'reana{workflow_engine}{compute_backend}.yaml'.format(workflow_engine=('' if (workflow_engine == 'serial') else '-{}'.format(workflow_engine)), compute_backend=('' if (compute_backend == 'kubernetes') else '-{}'.format(compute_backend))) reana_yaml_filename_path = ((get_srcdir(example) + os.sep) + reana_yaml_filename) try: subprocess.check_output(['grep', '-q', 'type: {}'.format(workflow_engine), reana_yaml_filename_path], stderr=subprocess.DEVNULL) return reana_yaml_filename_path except subprocess.CalledProcessError: return ''
def main(): bench_name = os.environ['PYBENCH_NAME'] func = load_by_object_ref(os.environ['PYBENCH_ENTRYPOINT']) params = json.loads(os.environ['PYBENCH_PARAMS']) benchmark_plan = func(*params) gc.collect() runner = pyperf.Runner() runner.bench_func(bench_name, benchmark_plan.func, *benchmark_plan.args)
class Datasets(Dataset): def __init__(self, config, train=False): if train: self.data_dir = config.train_data_dir (_, self.im_height, self.im_width) = config.image_dims transforms_list = [transforms.RandomCrop((self.im_height, self.im_width)), transforms.ToTensor()] self.transform = transforms.Compose(transforms_list) else: self.data_dir = config.test_data_dir self.transform = transforms.Compose([transforms.ToTensor()]) self.imgs = [] for dir in self.data_dir: self.imgs += glob(os.path.join(dir, '*.jpg')) self.imgs += glob(os.path.join(dir, '*.png')) self.imgs.sort() def __getitem__(self, item): image_ori = self.imgs[item] image = Image.open(image_ori).convert('RGB') img = self.transform(image) return img def __len__(self): return len(self.imgs)
def freeze_except_bn(model): for module in model.modules(): if isinstance(module, torch.nn.BatchNorm2d): if hasattr(module, 'weight'): module.weight.requires_grad_(True) if hasattr(module, 'bias'): module.bias.requires_grad_(True) module.train() else: for param in module.parameters(): param.requires_grad_(False) module.eval()
def colorize_strings(l): p = l.find("'") if (p >= 0): (yield l[:p]) l = l[(p + 1):] p = l.find("'") if (p >= 0): (yield ((((CYA + "'") + subst_path(l[:p])) + "'") + RST)) for x in colorize_strings(l[(p + 1):]): (yield x) else: (yield ("'" + l)) else: (yield l)
def cli_run(): print('\nFreesurfer QC module') from visualqc.utils import run_common_utils_before_starting run_common_utils_before_starting() wf = make_workflow_from_user_options() if (wf.vis_type is not None): import matplotlib matplotlib.interactive(True) wf.run() else: raise ValueError('Invalid state for visualQC!\n\t Ensure proper combination of arguments is used.') return
def test_ResultBase_repr(): class TestResult(ResultABC): _skcriteria_result_series = 'foo' def _validate_result(self, values): pass method = 'test_method' alternatives = ['a', 'b', 'c'] rank = [1, 2, 3] extra = {'alfa': 1} result = TestResult(method=method, alternatives=alternatives, values=rank, extra=extra) expected = 'Alternatives a b c\nfoo 1 2 3\n[Method: test_method]' assert (repr(result) == expected)
def detect_compute_compatibility(CUDA_HOME, so_file): try: cuobjdump = os.path.join(CUDA_HOME, 'bin', 'cuobjdump') if os.path.isfile(cuobjdump): output = subprocess.check_output("'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True) output = output.decode('utf-8').strip().split('\n') arch = [] for line in output: line = re.findall('\\.sm_([0-9]*)\\.', line)[0] arch.append('.'.join(line)) arch = sorted(set(arch)) return ', '.join(arch) else: return (so_file + '; cannot find cuobjdump') except Exception: return so_file
class YieldFromCollector(FuncCollectorBase): def __init__(self) -> None: super().__init__() self.in_assignment = False self.yield_from_expressions: list[tuple[(YieldFromExpr, bool)]] = [] def visit_assignment_stmt(self, stmt: AssignmentStmt) -> None: self.in_assignment = True super().visit_assignment_stmt(stmt) self.in_assignment = False def visit_yield_from_expr(self, expr: YieldFromExpr) -> None: self.yield_from_expressions.append((expr, self.in_assignment))
def make_md(lst, method, split='train', image_size=126, **kwargs): if (split == 'train'): SPLIT = learning_spec.Split.TRAIN elif (split == 'val'): SPLIT = learning_spec.Split.VALID elif (split == 'test'): SPLIT = learning_spec.Split.TEST ALL_DATASETS = lst all_dataset_specs = [] for dataset_name in ALL_DATASETS: dataset_records_path = os.path.join(BASE_PATH, dataset_name) dataset_spec = dataset_spec_lib.load_dataset_spec(dataset_records_path) all_dataset_specs.append(dataset_spec) if (method == 'episodic'): use_bilevel_ontology_list = ([False] * len(ALL_DATASETS)) use_dag_ontology_list = ([False] * len(ALL_DATASETS)) for (i, s) in enumerate(ALL_DATASETS): if (s == 'ilsvrc_2012'): use_dag_ontology_list[i] = True if (s == 'omniglot'): use_bilevel_ontology_list[i] = True variable_ways_shots = config.EpisodeDescriptionConfig(num_query=None, num_support=None, num_ways=None) dataset_episodic = pipeline.make_multisource_episode_pipeline(dataset_spec_list=all_dataset_specs, use_dag_ontology_list=use_dag_ontology_list, use_bilevel_ontology_list=use_bilevel_ontology_list, episode_descr_config=variable_ways_shots, split=SPLIT, image_size=image_size) return dataset_episodic elif (method == 'batch'): BATCH_SIZE = kwargs['batch_size'] ADD_DATASET_OFFSET = False dataset_batch = pipeline.make_multisource_batch_pipeline(dataset_spec_list=all_dataset_specs, batch_size=BATCH_SIZE, split=SPLIT, image_size=image_size, add_dataset_offset=ADD_DATASET_OFFSET) return dataset_batch
.skipif((not torch.cuda.is_available()), reason='requires CUDA support') .parametrize('loss_class', [BCConvexGIoULoss, ConvexGIoULoss, KLDRepPointsLoss]) def test_convex_regression_losses(loss_class): pred = torch.rand((10, 18)).cuda() target = torch.rand((10, 8)).cuda() weight = torch.rand((10,)).cuda() loss = loss_class()(pred, target) assert isinstance(loss, torch.Tensor) loss = loss_class()(pred, target, weight) assert isinstance(loss, torch.Tensor) loss = loss_class()(pred, target, reduction_override='mean') assert isinstance(loss, torch.Tensor) loss = loss_class()(pred, target, avg_factor=10) assert isinstance(loss, torch.Tensor)
.parametrize('filename,feedback_to_output', [('bol_eol.txt', False), ('characterclass.txt', False), ('dotstar.txt', False), ('extension_notation.txt', False), ('from_cmdloop.txt', True), ('multiline_no_regex.txt', False), ('multiline_regex.txt', False), ('no_output.txt', False), ('no_output_last.txt', False), ('regex_set.txt', False), ('singleslash.txt', False), ('slashes_escaped.txt', False), ('slashslash.txt', False), ('spaces.txt', False), ('word_boundaries.txt', False)]) def test_transcript(request, capsys, filename, feedback_to_output): test_dir = os.path.dirname(request.module.__file__) transcript_file = os.path.join(test_dir, 'transcripts', filename) testargs = ['prog', '-t', transcript_file] with mock.patch.object(sys, 'argv', testargs): app = CmdLineApp() app.feedback_to_output = feedback_to_output sys_exit_code = app.cmdloop() assert (sys_exit_code == 0) expected_start = '.\n\nRan 1 test in' expected_end = 's\n\nOK\n' (_, err) = capsys.readouterr() assert err.startswith(expected_start) assert err.endswith(expected_end)
def default_setup(cfg, args): output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) rank = comm.get_rank() logger = setup_logger(output_dir, rank, file_name='log_{}.txt'.format(cfg.START_TIME)) logger.info('Using {} GPUs'.format(args.num_gpus)) logger.info('Collecting environment info') logger.info(('\n' + collect_env_info())) logger.info(args) logger.info('Loaded configuration file {}'.format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = ('\n' + cf.read()) logger.info(config_str) logger.info('Running with config:\n{}'.format(cfg)) seed_all_rng((None if (cfg.SEED < 0) else (cfg.SEED + rank))) if (not (hasattr(args, 'eval_only') and args.eval_only)): torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
def transform_func_def(builder: IRBuilder, fdef: FuncDef) -> None: (func_ir, func_reg) = gen_func_item(builder, fdef, fdef.name, builder.mapper.fdef_to_sig(fdef)) if func_reg: builder.assign(get_func_target(builder, fdef), func_reg, fdef.line) maybe_insert_into_registry_dict(builder, fdef) builder.add_function(func_ir, fdef.line)
def make_optimizer(model: nn.Module) -> Optimizer: if (configs.optimizer.name == 'sgd'): optimizer = torch.optim.SGD(model.parameters(), lr=configs.optimizer.lr, momentum=configs.optimizer.momentum, weight_decay=configs.optimizer.weight_decay, nesterov=configs.optimizer.nesterov) elif (configs.optimizer.name == 'adam'): optimizer = torch.optim.Adam(model.parameters(), lr=configs.optimizer.lr, weight_decay=configs.optimizer.weight_decay) elif (configs.optimizer.name == 'adamw'): optimizer = torch.optim.AdamW(model.parameters(), lr=configs.optimizer.lr, weight_decay=configs.optimizer.weight_decay) else: raise NotImplementedError(configs.optimizer.name) return optimizer
class XWBOExchangeCalendar(TradingCalendar): name = 'XWBO' tz = timezone('Europe/Vienna') open_times = ((None, time(9, 1)),) close_times = ((None, time(17, 30)),) def regular_holidays(self): return HolidayCalendar([NewYearsDay, Epiphany, GoodFriday, EasterMonday, AscensionDay, WhitMonday, CorpusChristi, LabourDay, AssumptionDay, NationalHoliday, AllSaintsDay, ImmaculateConception, ChristmasEve, Christmas, SaintStephensDay, NewYearsEveThrough2015, NewYearsEve2016Onwards])
(scope='module') def venue(): return Venue(TestVenueBase.location, TestVenueBase.title, TestVenueBase.address, foursquare_id=TestVenueBase.foursquare_id, foursquare_type=TestVenueBase.foursquare_type, google_place_id=TestVenueBase.google_place_id, google_place_type=TestVenueBase.google_place_type)
def sub_new(): if (len(sys.argv) < 3): print('*** Error, missing argument.\n') print(subcommands_help['new']) return 1 session = sys.argv[2] if (len(sys.argv) > 3): playlist_file = sys.argv[3] else: playlist_file = None fs.new_session(session, playlist_file) return 0
def extract_smis(library, smiles_col=0, title_line=True) -> List: if (Path(library).suffix == '.gz'): open_ = partial(gzip.open, mode='rt') else: open_ = open with open_(library) as fid: reader = csv.reader(fid) if title_line: next(reader) smis = [] for row in tqdm(reader, desc='Getting smis', leave=False): try: smis.append(row[smiles_col]) except ValueError: continue return smis
class ToyDiscriminator(nn.Module): def __init__(self): super(ToyDiscriminator, self).__init__() self.conv0 = nn.Conv2d(3, 4, 3, 1, 1, bias=True) self.bn0 = nn.BatchNorm2d(4, affine=True) self.conv1 = nn.Conv2d(4, 4, 3, 1, 1, bias=True) self.bn1 = nn.BatchNorm2d(4, affine=True) self.linear = nn.Linear(((4 * 6) * 6), 1) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): feat = self.lrelu(self.bn0(self.conv0(x))) feat = self.lrelu(self.bn1(self.conv1(feat))) feat = feat.view(feat.size(0), (- 1)) out = torch.sigmoid(self.linear(feat)) return out
def process_npy(): if (not os.path.exists(os.path.join(config.save_dir, 'npy'))): os.makedirs(os.path.join(config.save_dir, 'npy')) for tag in ['Tr', 'Va']: img_ids = [] for path in tqdm(glob.glob(os.path.join(config.base_dir, f'images{tag}', '*.nii.gz'))): print(path) img_id = path.split('/')[(- 1)].split('.')[0] print(img_id) img_ids.append(img_id) image_path = os.path.join(config.base_dir, f'images{tag}', f'{img_id}.nii.gz') label_path = os.path.join(config.base_dir, f'labels{tag}', f'{img_id}.nii.gz') resize_shape = ((config.patch_size[0] + (config.patch_size[0] // 4)), (config.patch_size[1] + (config.patch_size[1] // 4)), (config.patch_size[2] + (config.patch_size[2] // 4))) image = read_nifti(image_path) label = read_nifti(label_path) image = image.astype(np.float32) label = label.astype(np.int8) image = torch.FloatTensor(image).unsqueeze(0).unsqueeze(0) label = torch.FloatTensor(label).unsqueeze(0).unsqueeze(0) image = F.interpolate(image, size=resize_shape, mode='trilinear', align_corners=False) label = F.interpolate(label, size=resize_shape, mode='nearest') image = image.squeeze().numpy() label = label.squeeze().numpy() np.save(os.path.join(config.save_dir, 'npy', f'{img_id}_image.npy'), image) np.save(os.path.join(config.save_dir, 'npy', f'{img_id}_label.npy'), label)
class Proxy(BaseType): def __init__(self, *, none_ok: bool=False, completions: _Completions=None) -> None: super().__init__(none_ok=none_ok, completions=completions) self.valid_values = ValidValues(('system', 'Use the system wide proxy.'), ('none', "Don't use any proxy"), others_permitted=True) def to_py(self, value: _StrUnset) -> Union[(_UnsetNone, QNetworkProxy, _SystemProxy, pac.PACFetcher)]: self._basic_py_validation(value, str) if isinstance(value, usertypes.Unset): return value elif (not value): return None try: if (value == 'system'): return SYSTEM_PROXY if (value == 'none'): url = QUrl('direct://') else: assert (self.valid_values is not None) assert (value not in self.valid_values), value url = QUrl(value) return urlutils.proxy_from_url(url) except (urlutils.InvalidUrlError, urlutils.InvalidProxyTypeError) as e: raise configexc.ValidationError(value, e) def complete(self) -> _Completions: if (self._completions is not None): return self._completions assert (self.valid_values is not None) out = [] for val in self.valid_values: out.append((val, self.valid_values.descriptions[val])) out.append((' 'HTTP proxy URL')) out.append(('socks://', 'SOCKS proxy URL')) out.append(('socks://localhost:9050/', 'Tor via SOCKS')) out.append((' 'Local HTTP proxy')) out.append(('pac+ 'Proxy autoconfiguration file URL')) return out
class CertificateSigningRequestBuilder(): def __init__(self, subject_name: (Name | None)=None, extensions: list[Extension[ExtensionType]]=[], attributes: list[tuple[(ObjectIdentifier, bytes, (int | None))]]=[]): self._subject_name = subject_name self._extensions = extensions self._attributes = attributes def subject_name(self, name: Name) -> CertificateSigningRequestBuilder: if (not isinstance(name, Name)): raise TypeError('Expecting x509.Name object.') if (self._subject_name is not None): raise ValueError('The subject name may only be set once.') return CertificateSigningRequestBuilder(name, self._extensions, self._attributes) def add_extension(self, extval: ExtensionType, critical: bool) -> CertificateSigningRequestBuilder: if (not isinstance(extval, ExtensionType)): raise TypeError('extension must be an ExtensionType') extension = Extension(extval.oid, critical, extval) _reject_duplicate_extension(extension, self._extensions) return CertificateSigningRequestBuilder(self._subject_name, [*self._extensions, extension], self._attributes) def add_attribute(self, oid: ObjectIdentifier, value: bytes, *, _tag: (_ASN1Type | None)=None) -> CertificateSigningRequestBuilder: if (not isinstance(oid, ObjectIdentifier)): raise TypeError('oid must be an ObjectIdentifier') if (not isinstance(value, bytes)): raise TypeError('value must be bytes') if ((_tag is not None) and (not isinstance(_tag, _ASN1Type))): raise TypeError('tag must be _ASN1Type') _reject_duplicate_attribute(oid, self._attributes) if (_tag is not None): tag = _tag.value else: tag = None return CertificateSigningRequestBuilder(self._subject_name, self._extensions, [*self._attributes, (oid, value, tag)]) def sign(self, private_key: CertificateIssuerPrivateKeyTypes, algorithm: (_AllowedHashTypes | None), backend: typing.Any=None, *, rsa_padding: ((padding.PSS | padding.PKCS1v15) | None)=None) -> CertificateSigningRequest: if (self._subject_name is None): raise ValueError('A CertificateSigningRequest must have a subject') if (rsa_padding is not None): if (not isinstance(rsa_padding, (padding.PSS, padding.PKCS1v15))): raise TypeError('Padding must be PSS or PKCS1v15') if (not isinstance(private_key, rsa.RSAPrivateKey)): raise TypeError('Padding is only supported for RSA keys') return rust_x509.create_x509_csr(self, private_key, algorithm, rsa_padding)
class LogHandler(Handler): class Emitter(QtCore.QObject): record = QtCore.Signal(object) def __init__(self): super().__init__() self.emitter = self.Emitter() def connect(self, *args, **kwargs): return self.emitter.record.connect(*args, **kwargs) def emit(self, record): self.emitter.record.emit(self.format(record))
def test_no_ub_terms_default(methanol): assert (methanol.UreyBradleyForce.n_parameters == 0) ff = methanol._build_forcefield().getroot() force = ff.find('AmoebaUreyBradleyForce') assert (force is None) for angle in methanol.angles: methanol.UreyBradleyForce.create_parameter(angle, k=1, d=2) ff = methanol._build_forcefield().getroot() force = ff.find('AmoebaUreyBradleyForce') assert (force.tag == 'AmoebaUreyBradleyForce')
class ResidualAttentionNet_56(nn.Module): def __init__(self, feature_dim=512, drop_ratio=0.4): super(ResidualAttentionNet_56, self).__init__() self.conv1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True)) self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.residual_block1 = ResidualBlock(64, 256) self.attention_module1 = AttentionModule_stage1(256, 256) self.residual_block2 = ResidualBlock(256, 512, 2) self.attention_module2 = AttentionModule_stage2(512, 512) self.residual_block3 = ResidualBlock(512, 512, 2) self.attention_module3 = AttentionModule_stage3(512, 512) self.residual_block4 = ResidualBlock(512, 512, 2) self.residual_block5 = ResidualBlock(512, 512) self.residual_block6 = ResidualBlock(512, 512) self.output_layer = nn.Sequential(nn.BatchNorm2d(512), nn.Dropout(drop_ratio), Flatten(), nn.Linear(((512 * 7) * 7), feature_dim), nn.BatchNorm1d(feature_dim)) def forward(self, x): out = self.conv1(x) out = self.mpool1(out) out = self.residual_block1(out) out = self.attention_module1(out) out = self.residual_block2(out) out = self.attention_module2(out) out = self.residual_block3(out) out = self.attention_module3(out) out = self.residual_block4(out) out = self.residual_block5(out) out = self.residual_block6(out) out = self.output_layer(out) return out
class Dereferer(SimpleDecrypter): __name__ = 'Dereferer' __type__ = 'decrypter' __version__ = '0.27' __status__ = 'testing' __pattern__ = ' __config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('folder_per_package', 'Default;Yes;No', 'Create folder for each package', 'Default'), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)] __description__ = 'Universal link dereferer' __license__ = 'GPLv3' __authors__ = [('Walter Purcaro', '')] PLUGIN_DOMAIN = None PLUGIN_NAME = None DIRECT_LINK = False def _log(self, level, plugintype, pluginname, args, kwargs): args = ((self.PLUGIN_NAME,) + args) return super()._log(level, plugintype, pluginname, args, kwargs) def init(self): self.__pattern__ = self.pyload.plugin_manager.decrypter_plugins[self.classname]['pattern'] self.PLUGIN_DOMAIN = re.match(self.__pattern__, self.pyfile.url).group('DOMAIN').lower() self.PLUGIN_NAME = ''.join((part.capitalize() for part in re.split('\\.|\\d+|-', self.PLUGIN_DOMAIN) if (part != '.'))) def get_links(self): return [self.info['pattern']['LINK']]
def readFile(handle, firstBytesOnly=False): logging.debug('Getting data on given handle (firstBytesOnly == {0})'.format(firstBytesOnly)) BUFSIZE = 5242880 data = b'' buf = create_string_buffer(BUFSIZE) bytesRead = c_uint() while True: retVal = ReadFile(handle, byref(buf), sizeof(buf), byref(bytesRead), None) if retVal: data += buf.raw[:bytesRead.value] elif (GetLastError() == ERROR_MORE_DATA): data += buf.raw[:bytesRead.value] elif (GetLastError() == ERROR_BROKEN_PIPE): logging.debug('ERROR_BROKEN_PIPE, communication stopped') break else: logging.error('Error reading from pipe: {0}'.format(getLastErrorMessage())) return None logging.debug('Data received from handle for the moment: {0}'.format(repr(data))) if (firstBytesOnly == True): logging.debug('firstBytesOnly is enabled, stop getting data') break logging.debug('Data received from handle: {0}'.format(repr(data))) return data
class TestRandomAccessIntVectorVectorReader(_TestRandomAccessReaders, unittest.TestCase, IntVectorVectorExampleMixin): def checkRead(self, reader): self.assertEqual([[1]], reader['one']) self.assertEqual([], reader['three']) self.assertEqual([[1, 2], [3, 4]], reader['two']) with self.assertRaises(KeyError): reader['four']
.parametrize('file', ['CEUTrio.20.21.gatk3.4.g.vcf.bgz', 'CEUTrio.20.21.gatk3.4.g.vcf.bgz.tbi']) .parametrize('is_path', [True, False]) def test_read_csi__invalid_csi(shared_datadir, file, is_path): with pytest.raises(ValueError, match='File not in CSI format.'): read_csi(path_for_test(shared_datadir, file, is_path))
.parametrize('key', FUNCTION_METHODS) def test_given_function_set_then_autorange_enabled(resetted_dmm6500, key): if (key[(- 2):] == 'ac'): getattr(resetted_dmm6500, FUNCTION_METHODS[key])(ac=True) elif (key[(- 2):] == '4W'): getattr(resetted_dmm6500, FUNCTION_METHODS[key])(wires=4) else: getattr(resetted_dmm6500, FUNCTION_METHODS[key])() assert (len(resetted_dmm6500.check_errors()) == 0) assert (resetted_dmm6500.auto_range_status() is False) resetted_dmm6500.auto_range() if (key in FUNCTIONS_HAVE_AUTORANGE): assert (resetted_dmm6500.auto_range_status() is True) else: assert (resetted_dmm6500.auto_range_status() is False)
def test_feature_no_src_layout(hatch, helpers, config_file, temp_dir): config_file.model.template.plugins['default']['src-layout'] = False config_file.save() project_name = 'My.App' with temp_dir.as_cwd(): result = hatch('new', project_name) path = (temp_dir / 'my-app') expected_files = helpers.get_template_files('new.feature_no_src_layout', project_name) helpers.assert_files(path, expected_files) assert (result.exit_code == 0), result.output assert (remove_trailing_spaces(result.output) == helpers.dedent('\n my-app\n my_app\n __about__.py\n __init__.py\n tests\n __init__.py\n LICENSE.txt\n README.md\n pyproject.toml\n '))
class CustomColorizationTrain(CustomBase): def __init__(self, size, test_images_list_file): super().__init__() with open(test_images_list_file, 'r') as f: paths = f.read().splitlines() self.data = ColorizationImagePaths(paths=paths, size=size, random_crop=False)
_metaclass(ABCMeta) class RepositoryDataInterface(object): def get_repo(self, namespace_name, repository_name, user, include_tags=True, max_tags=500): def repo_exists(self, namespace_name, repository_name): def create_repo(self, namespace, name, creating_user, description, visibility='private', repo_kind='image'): def get_repo_list(self, starred, user, repo_kind, namespace, username, public, page_token, last_modified, popularity): def set_repository_visibility(self, namespace_name, repository_name, visibility): def set_trust(self, namespace_name, repository_name, trust): def set_description(self, namespace_name, repository_name, description): def mark_repository_for_deletion(self, namespace_name, repository_name, repository_gc_queue): def check_repository_usage(self, user_name, plan_found): def set_repository_state(self, namespace_name, repository_name, state): def add_quota_view(self, repos):
class Color(BaseOption): def validate(self, value, **kwargs): return validatorfuncs.color(value, option_key=self.key, **kwargs) def display(self, **kwargs): return f'{self.value} - |{self.value}this|n' def deserialize(self, save_data): if ((not save_data) or (len(strip_ansi(f'|{save_data}|n')) > 0)): raise ValueError(f"{self.key} expected Color Code, got '{save_data}'") return save_data
def get_model(p): if ('VOCSegmentation' in p['train_db_name']): if (('use_fcn' in p['model_kwargs']) and p['model_kwargs']['use_fcn']): print('Using FCN for PASCAL') from models.fcn_model import Model return Model(get_backbone(p), (p['num_classes'] + int(p['has_bg']))) else: from models.deeplabv3_model import DeepLabV3 print('Using DeepLab v3 for PASCAL') return DeepLabV3(get_backbone(p), (p['num_classes'] + int(p['has_bg']))) else: raise ValueError('No model for train dataset {}'.format(p['train_db_name']))
class TestPEP673(TestNameCheckVisitorBase): _passes() def test_instance_attribute(self): from typing_extensions import Self class X(): parent: Self def prop(self) -> Self: raise NotImplementedError class Y(X): pass def capybara(x: X, y: Y): assert_is_value(x.parent, TypedValue(X)) assert_is_value(y.parent, TypedValue(Y)) assert_is_value(x.prop, TypedValue(X)) assert_is_value(y.prop, TypedValue(Y)) _passes() def test_method(self): from typing_extensions import Self class X(): def ret(self) -> Self: return self def from_config(cls) -> Self: return cls() class Y(X): pass def capybara(x: X, y: Y): assert_is_value(x.ret(), TypedValue(X)) assert_is_value(y.ret(), TypedValue(Y)) assert_is_value(X.from_config(), TypedValue(X)) assert_is_value(Y.from_config(), TypedValue(Y)) _passes() def test_parameter_type(self): from typing import Callable from typing_extensions import Self class Shape(): def difference(self, other: Self) -> float: raise NotImplementedError def apply(self, f: Callable[([Self], None)]) -> None: raise NotImplementedError class Circle(Shape): pass def difference(): s = Shape() s.difference(s) s.difference(1.0) s.difference(Circle()) c = Circle() c.difference(c) c.difference(s) c.difference('x') def takes_shape(s: Shape) -> None: pass def takes_circle(c: Circle) -> None: pass def takes_int(i: int) -> None: pass def apply(): s = Shape() c = Circle() s.apply(takes_shape) s.apply(takes_circle) s.apply(takes_int) c.apply(takes_shape) c.apply(takes_circle) c.apply(takes_int) _passes() def test_linked_list(self): from dataclasses import dataclass from typing import Generic, Optional, TypeVar from typing_extensions import Self T = TypeVar('T') class LinkedList(Generic[T]): value: T next: Optional[Self] = None class OrdinalLinkedList(LinkedList[int]): pass def capybara(o: OrdinalLinkedList): assert_is_value(o.next, (KnownValue(None) | TypedValue(OrdinalLinkedList))) _passes() def test_generic(self): from typing import Generic, TypeVar from typing_extensions import Self T = TypeVar('T') class Container(Generic[T]): value: T def set_value(self, value: T) -> Self: return self def capybara(c: Container[int]): assert_is_value(c.value, TypedValue(int)) assert_is_value(c.set_value(3), GenericValue(Container, [TypedValue(int)])) _passes() def test_classvar(self): from typing import ClassVar, List from typing_extensions import Self class Registry(): children: ClassVar[List[Self]] def capybara(): assert_is_value(Registry.children, GenericValue(list, [TypedValue(Registry)])) _passes() def test_stub(self): def capybara(): from typing_extensions import assert_type from _pyanalyze_tests.self import X, Y x = X() y = Y() assert_type(x, X) assert_type(y, Y) def want_x(x: X): pass def want_y(y: Y): pass want_x(x.ret()) want_y(y.ret()) want_x(X.from_config()) want_y(Y.from_config()) _passes() def test_typeshed_self(self): def capybara(): from typing_extensions import assert_type from _pyanalyze_tests.tsself import X x = X() assert_type(x, X)
def test_simple_while_no_else() -> None: src = '\n while n > 10:\n print(n)\n ' cfg = build_cfg(src) expected_blocks = [['n > 10'], ['print(n)'], []] assert (expected_blocks == _extract_blocks(cfg)) expected_edges = [[['n > 10'], ['print(n)']], [['print(n)'], ['n > 10']], [['n > 10'], []]] assert (expected_edges == _extract_edges(cfg))
class TestCloneReplace(): def test_cloning_no_replace_strict_copy_inputs(self): x = vector('x') y = vector('y') z = shared(0.25) f1 = ((z * ((x + y) ** 2)) + 5) f2 = clone_replace(f1, replace=None, rebuild_strict=True, copy_inputs_over=True) f2_inp = graph_inputs([f2]) assert (z in f2_inp) assert (x in f2_inp) assert (y in f2_inp) def test_cloning_no_replace_strict_not_copy_inputs(self): x = vector('x') y = vector('y') z = shared(0.25) f1 = ((z * ((x + y) ** 2)) + 5) f2 = clone_replace(f1, replace=None, rebuild_strict=True, copy_inputs_over=False) f2_inp = graph_inputs([f2]) assert (z not in f2_inp) assert (x not in f2_inp) assert (y not in f2_inp) def test_cloning_replace_strict_copy_inputs(self): x = vector('x') y = vector('y') y2 = vector('y2') z = shared(0.25) f1 = ((z * ((x + y) ** 2)) + 5) f2 = clone_replace(f1, replace={y: y2}, rebuild_strict=True, copy_inputs_over=True) f2_inp = graph_inputs([f2]) assert (z in f2_inp) assert (x in f2_inp) assert (y2 in f2_inp) def test_cloning_replace_not_strict_copy_inputs(self): x = vector('x') y = fvector('y') y2 = dvector('y2') z = shared(0.25) f1 = ((z * ((x + y) ** 2)) + 5) f2 = clone_replace(f1, replace={y: y2}, rebuild_strict=False, copy_inputs_over=True) f2_inp = graph_inputs([f2]) assert (z in f2_inp) assert (x in f2_inp) assert (y2 in f2_inp) def test_cloning_replace_strict_not_copy_inputs(self): x = vector('x') y = vector('y') y2 = vector('y2') z = shared(0.25) f1 = ((z * ((x + y) ** 2)) + 5) f2 = clone_replace(f1, replace=[(y, y2)], rebuild_strict=True, copy_inputs_over=False) f2_inp = graph_inputs([f2]) assert (z not in f2_inp) assert (x not in f2_inp) assert (y2 not in f2_inp) def test_cloning_replace_not_strict_not_copy_inputs(self): x = vector('x') y = fvector('y') y2 = dvector('y2') z = shared(0.25) f1 = ((z * ((x + y) ** 2)) + 5) f2 = clone_replace(f1, replace=[(y, y2)], rebuild_strict=False, copy_inputs_over=False) f2_inp = graph_inputs([f2]) assert (z not in f2_inp) assert (x not in f2_inp) assert (y2 not in f2_inp) def test_clone(self): def test(x, y, mention_y): if mention_y: d = (0.1 + (0 * y)) else: d = 0.1 out = clone_replace(y, replace={x: (x + d)}) return function([], out)() x = shared(np.asarray(0.0, dtype=config.floatX)) utt.assert_allclose(test(x, pt.sum(((x + 1) ** 2)), mention_y=False), 1.) utt.assert_allclose(test(x, pt.sum(((x + 1) ** 2)), mention_y=True), 1.)
class CocoaDisplay(Display): def get_screens(self): maxDisplays = 256 activeDisplays = (CGDirectDisplayID * maxDisplays)() count = c_uint32() quartz.CGGetActiveDisplayList(maxDisplays, activeDisplays, byref(count)) return [CocoaScreen(self, displayID) for displayID in list(activeDisplays)[:count.value]]
class ModFile(AudioFile): format = 'MOD/XM/IT' def __init__(self, filename): with translate_errors(): data = open(filename, 'rb').read() f = _modplug.ModPlug_Load(data, len(data)) if (not f): raise OSError(('%r not a valid MOD file' % filename)) self['~#length'] = (_modplug.ModPlug_GetLength(f) // 1000) title = (_modplug.ModPlug_GetName(f) or os.path.basename(filename)) try: self['title'] = title.decode('utf-8') except UnicodeError: self['title'] = title.decode('iso-8859-1') _modplug.ModPlug_Unload(f) self.sanitize(filename) def write(self): pass def reload(self, *args): artist = self.get('artist') super().reload(*args) if (artist is not None): self.setdefault('artist', artist) def can_change(self, k=None): if (k is None): return ['artist'] else: return (k == 'artist')
class GroupBoardListManager(CRUDMixin, RESTManager): _path = '/groups/{group_id}/boards/{board_id}/lists' _obj_cls = GroupBoardList _from_parent_attrs = {'group_id': 'group_id', 'board_id': 'id'} _create_attrs = RequiredOptional(exclusive=('label_id', 'assignee_id', 'milestone_id')) _update_attrs = RequiredOptional(required=('position',)) def get(self, id: Union[(str, int)], lazy: bool=False, **kwargs: Any) -> GroupBoardList: return cast(GroupBoardList, super().get(id=id, lazy=lazy, **kwargs))
def mat2euler(M, cy_thresh=None): M = np.asarray(M) if (cy_thresh is None): try: cy_thresh = (np.finfo(M.dtype).eps * 4) except ValueError: cy_thresh = _FLOAT_EPS_4 (r11, r12, r13, r21, r22, r23, r31, r32, r33) = M.flat cy = math.sqrt(((r33 * r33) + (r23 * r23))) if (cy > cy_thresh): z = math.atan2((- r12), r11) y = math.atan2(r13, cy) x = math.atan2((- r23), r33) else: z = math.atan2(r21, r22) y = math.atan2(r13, cy) x = 0.0 return (z, y, x)
class TokenClassificationArgumentHandler(ArgumentHandler): def __call__(self, inputs: Union[(str, List[str])], **kwargs): if ((inputs is not None) and isinstance(inputs, (list, tuple)) and (len(inputs) > 0)): inputs = list(inputs) batch_size = len(inputs) elif isinstance(inputs, str): inputs = [inputs] batch_size = 1 elif (((Dataset is not None) and isinstance(inputs, Dataset)) or isinstance(inputs, types.GeneratorType)): return (inputs, None) else: raise ValueError('At least one input is required.') offset_mapping = kwargs.get('offset_mapping') if offset_mapping: if (isinstance(offset_mapping, list) and isinstance(offset_mapping[0], tuple)): offset_mapping = [offset_mapping] if (len(offset_mapping) != batch_size): raise ValueError('offset_mapping should have the same batch size as the input') return (inputs, offset_mapping)
class PeptidesFunctionalDataset(InMemoryDataset): def __init__(self, root='datasets', smiles2graph=smiles2graph, transform=None, pre_transform=None): self.original_root = root self.smiles2graph = smiles2graph self.folder = osp.join(root, 'peptides-functional') self.url = ' self.version = '701eb743e899f4d793f0e13c8fa5a1b4' self.url_stratified_split = ' self.md5sum_stratified_split = '5a0114bdadc80b94fc7ae974f13ef061' release_tag = osp.join(self.folder, self.version) if (osp.isdir(self.folder) and (not osp.exists(release_tag))): print(f'{self.__class__.__name__} has been updated.') if (input('Will you update the dataset now? (y/N)\n').lower() == 'y'): shutil.rmtree(self.folder) super().__init__(self.folder, transform, pre_transform) (self.data, self.slices) = torch.load(self.processed_paths[0]) def raw_file_names(self): return 'peptide_multi_class_dataset.csv.gz' def processed_file_names(self): return 'geometric_data_processed.pt' def _md5sum(self, path): hash_md5 = hashlib.md5() with open(path, 'rb') as f: buffer = f.read() hash_md5.update(buffer) return hash_md5.hexdigest() def download(self): if decide_download(self.url): path = download_url(self.url, self.raw_dir) hash = self._md5sum(path) if (hash != self.version): raise ValueError('Unexpected MD5 hash of the downloaded file') open(osp.join(self.root, hash), 'w').close() path_split1 = download_url(self.url_stratified_split, self.root) assert (self._md5sum(path_split1) == self.md5sum_stratified_split) else: print('Stop download.') exit((- 1)) def process(self): data_df = pd.read_csv(osp.join(self.raw_dir, 'peptide_multi_class_dataset.csv.gz')) smiles_list = data_df['smiles'] print('Converting SMILES strings into graphs...') data_list = [] for i in tqdm(range(len(smiles_list))): data = Data() smiles = smiles_list[i] graph = self.smiles2graph(smiles) assert (len(graph['edge_feat']) == graph['edge_index'].shape[1]) assert (len(graph['node_feat']) == graph['num_nodes']) data.__num_nodes__ = int(graph['num_nodes']) data.edge_index = torch.from_numpy(graph['edge_index']).to(torch.int64) data.edge_attr = torch.from_numpy(graph['edge_feat']).to(torch.int64) data.x = torch.from_numpy(graph['node_feat']).to(torch.int64) data.y = torch.Tensor([eval(data_df['labels'].iloc[i])]) data_list.append(data) if (self.pre_transform is not None): data_list = [self.pre_transform(data) for data in data_list] (data, slices) = self.collate(data_list) print('Saving...') torch.save((data, slices), self.processed_paths[0]) def get_idx_split(self): split_file = osp.join(self.root, 'splits_random_stratified_peptide.pickle') with open(split_file, 'rb') as f: splits = pickle.load(f) split_dict = replace_numpy_with_torchtensor(splits) return split_dict
def vote(request, question_id): question = get_object_or_404(Question, pk=question_id) try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): return render(request, 'polls/detail.html', {'question': question, 'error_message': "You didn't select a choice."}) else: selected_choice.votes += 1 selected_choice.save() return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
def _date_range_in_single_index(dt1, dt2): assert (isinstance(dt1, date) and isinstance(dt2, date)) dt = (dt2 - dt1) if ((not isinstance(dt1, datetime)) and (not isinstance(dt2, datetime))): return (dt == timedelta(days=1)) if ((dt < timedelta(days=1)) and (dt >= timedelta(days=0))): return (dt2.day == dt1.day) if (dt == timedelta(days=1)): return ((dt1.hour == 0) and (dt1.minute == 0) and (dt1.second == 0) and (dt1.microsecond == 0)) return False
class TestDataloaderAsyncGPUWrapper(unittest.TestCase): (torch.cuda.is_available(), 'This test needs a gpu to run') def test_dataset_async(self): NUM_SAMPLES = 1024 dataset = ZeroImageDataset(crop_size=224, num_channels=3, num_classes=1000, num_samples=NUM_SAMPLES) base_dataloader = DataLoader(dataset=dataset, pin_memory=True, num_workers=20) dataloader = DataloaderAsyncGPUWrapper(base_dataloader) i = 0 for sample in dataloader: self.assertTrue((sample['input'].nonzero(as_tuple=False).numel() == 0)) for k in sample.keys(): self.assertTrue((sample[k].device.type == 'cuda')) sample['input'].fill_(3.14) i += 1 self.assertEqual(i, NUM_SAMPLES)
class Trainer(Base): def __init__(self, cfg): self.cfg = cfg super(Trainer, self).__init__(cfg.log_dir, log_name='train_logs.txt') def get_optimizer(self, model): base_params = list(map(id, model.module.backbone.parameters())) other_params = filter((lambda p: (id(p) not in base_params)), model.module.parameters()) optimizer = torch.optim.AdamW([{'params': model.module.backbone.parameters(), 'lr': self.cfg.lr_backbone}, {'params': other_params}, {'params': self.awl.parameters(), 'weight_decay': 0}], lr=self.cfg.lr, weight_decay=self.cfg.weight_decay) return optimizer def save_model(self, state, epoch): file_path = osp.join(self.cfg.model_dir, 'snapshot_{}.pth.tar'.format(str(epoch))) torch.save(state, file_path) if (dist.get_rank() == 0): self.logger.info('Write snapshot into {}'.format(file_path)) def load_model(self, model, optimizer): model_file_list = glob.glob(osp.join(self.cfg.model_dir, '*.pth.tar')) cur_epoch = max([int(file_name[(file_name.find('snapshot_') + 9):file_name.find('.pth.tar')]) for file_name in model_file_list]) ckpt_path = osp.join(self.cfg.model_dir, (('snapshot_' + str(cur_epoch)) + '.pth.tar')) ckpt = torch.load(ckpt_path, map_location='cpu') start_epoch = (ckpt['epoch'] + 1) info = model.load_state_dict(ckpt['network'], strict=False) if (cur_epoch != 0): self.awl.load_state_dict(ckpt['awl']) if (dist.get_rank() == 0): self.logger.info('Load checkpoint from {}'.format(ckpt_path)) return (start_epoch, model.cuda(), optimizer) def set_lr(self, epoch): for e in self.cfg.lr_dec_epoch: if (epoch < e): break if (epoch < self.cfg.lr_dec_epoch[(- 1)]): idx = self.cfg.lr_dec_epoch.index(e) for g in self.optimizer.param_groups: g['lr'] = (self.cfg.lr / (self.cfg.lr_dec_factor ** idx)) else: for g in self.optimizer.param_groups: g['lr'] = (self.cfg.lr / (self.cfg.lr_dec_factor ** len(self.cfg.lr_dec_epoch))) def get_lr(self): for g in self.optimizer.param_groups: cur_lr = g['lr'] return cur_lr def _make_batch_generator(self): if (dist.get_rank() == 0): self.logger.info('Creating dataset...') trainset3d_loader = [] for i in range(len(self.cfg.trainset_3d)): trainset3d_loader.append(eval(self.cfg.trainset_3d[i])(transforms.ToTensor(), 'train')) trainset2d_loader = [] for i in range(len(self.cfg.trainset_2d)): trainset2d_loader.append(eval(self.cfg.trainset_2d[i])(transforms.ToTensor(), 'train')) if ((len(trainset3d_loader) > 0) and (len(trainset2d_loader) > 0)): self.vertex_num = trainset3d_loader[0].vertex_num self.joint_num = trainset3d_loader[0].joint_num trainset3d_loader = MultipleDatasets(trainset3d_loader, make_same_len=False) trainset2d_loader = MultipleDatasets(trainset2d_loader, make_same_len=False) trainset_loader = MultipleDatasets([trainset3d_loader, trainset2d_loader], make_same_len=True) elif (len(trainset3d_loader) > 0): self.vertex_num = trainset3d_loader[0].vertex_num self.joint_num = trainset3d_loader[0].joint_num trainset_loader = MultipleDatasets(trainset3d_loader, make_same_len=False) elif (len(trainset2d_loader) > 0): self.vertex_num = trainset2d_loader[0].vertex_num self.joint_num = trainset2d_loader[0].joint_num trainset_loader = MultipleDatasets(trainset2d_loader, make_same_len=False) else: assert 0, 'Both 3D training set and 2D training set have zero length.' self.itr_per_epoch = math.ceil(((len(trainset_loader) / self.cfg.num_gpus) / self.cfg.train_batch_size)) if self.cfg.distributed: self.sampler = DistributedSampler(trainset_loader) else: self.sampler = None self.batch_generator = DataLoader(dataset=trainset_loader, batch_size=self.cfg.train_batch_size, shuffle=(self.sampler is None), num_workers=self.cfg.num_thread, pin_memory=True, sampler=self.sampler) def _make_model(self): if (dist.get_rank() == 0): self.logger.info('Creating graph and optimizer...') if (not hasattr(self, 'joint_num')): self.joint_num = 30 model = get_model(self.joint_num, 'train', self.cfg) awl = AutomaticWeightedLoss(6) if self.cfg.distributed: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model.cuda()) if self.cfg.is_local: model = DDP(model, device_ids=[dist.get_rank()], output_device=dist.get_rank(), find_unused_parameters=True) awl = DDP(awl.cuda(), device_ids=[dist.get_rank()], output_device=dist.get_rank(), find_unused_parameters=True) else: model = DDP(model, find_unused_parameters=True) awl = DDP(awl.cuda(), find_unused_parameters=True) else: model = DataParallel(model).cuda() self.awl = awl optimizer = self.get_optimizer(model) if self.cfg.continue_train: (start_epoch, model, optimizer) = self.load_model(model, optimizer) else: start_epoch = 0 model.train() self.start_epoch = start_epoch self.model = model self.optimizer = optimizer def _init_ddp(self): torch.cuda.set_device(f'cuda:{self.cfg.local_rank}') dist.init_process_group(backend='nccl') assert dist.is_initialized(), 'Distributed backend not initialized.'
def get_global(key: _GLOBAL_KEY) -> Mapping[(str, Any)]: global _global_data if (_global_data is None): dirname = os.path.join(os.path.dirname(__file__)) filename = os.path.join(dirname, 'global.dat') if (not os.path.isfile(filename)): _raise_no_data_error() with open(filename, 'rb') as fileobj: _global_data = pickle.load(fileobj) assert (_global_data is not None) return _global_data.get(key, {})
class InspectCommand(BaseGraphCommand): handler = staticmethod(bonobo.inspect) def add_arguments(self, parser): super(InspectCommand, self).add_arguments(parser) parser.add_argument('--graph', '-g', dest='format', action='store_const', const='graph') def parse_options(self, **options): if (not options.get('format')): raise RuntimeError('You must provide a format (try --graph).') return options
class L1Loss(nn.Module): def __init__(self, args): super(L1Loss, self).__init__() self.args = args self.loss = L1() self.loss_labels = ['L1', 'EPE'] def forward(self, output, target): lossvalue = self.loss(output, target) epevalue = EPE(output, target) return [lossvalue, epevalue]
class CUDACallback(Callback): def on_train_epoch_start(self, trainer, pl_module): torch.cuda.reset_peak_memory_stats(trainer.root_gpu) torch.cuda.synchronize(trainer.root_gpu) self.start_time = time.time() def on_train_epoch_end(self, trainer, pl_module): torch.cuda.synchronize(trainer.root_gpu) max_memory = (torch.cuda.max_memory_allocated(trainer.root_gpu) / (2 ** 20)) epoch_time = (time.time() - self.start_time) try: max_memory = trainer.training_type_plugin.reduce(max_memory) epoch_time = trainer.training_type_plugin.reduce(epoch_time) rank_zero_info(f'Average Epoch time: {epoch_time:.2f} seconds') rank_zero_info(f'Average Peak memory {max_memory:.2f}MiB') except AttributeError: pass
def noisy_dense(inputs, units, bias_shape, c_names, w_i, b_i=None, activation=tf.nn.relu, noisy_distribution='factorised'): def f(e_list): return tf.multiply(tf.sign(e_list), tf.pow(tf.abs(e_list), 0.5)) if (not isinstance(inputs, ops.Tensor)): inputs = ops.convert_to_tensor(inputs, dtype='float') if (len(inputs.shape) > 2): inputs = tf.contrib.layers.flatten(inputs) flatten_shape = inputs.shape[1] weights = tf.get_variable('weights', shape=[flatten_shape, units], initializer=w_i) w_sigma = tf.get_variable('w_sigma', [flatten_shape, units], initializer=w_i, collections=c_names) if (noisy_distribution == 'independent'): weights += tf.multiply(tf.random_normal(shape=w_sigma.shape), w_sigma) elif (noisy_distribution == 'factorised'): noise_1 = f(tf.random_normal(tf.TensorShape([flatten_shape, 1]), dtype=tf.float32)) noise_2 = f(tf.random_normal(tf.TensorShape([1, units]), dtype=tf.float32)) weights += tf.multiply((noise_1 * noise_2), w_sigma) dense = tf.matmul(inputs, weights) if (bias_shape is not None): assert (bias_shape[0] == units) biases = tf.get_variable('biases', shape=bias_shape, initializer=b_i) b_noise = tf.get_variable('b_noise', [1, units], initializer=b_i, collections=c_names) if (noisy_distribution == 'independent'): biases += tf.multiply(tf.random_normal(shape=b_noise.shape), b_noise) elif (noisy_distribution == 'factorised'): biases += tf.multiply(noise_2, b_noise) return (activation((dense + biases)) if (activation is not None) else (dense + biases)) return (activation(dense) if (activation is not None) else dense)
class RuncodeWizardPage1(BasePyzoWizardPage): _title = translate('wizard', 'Running code') _image_filename = 'pyzo_run1.png' _descriptions = [translate('wizard', "Pyzo supports several ways to run source code in the editor. (see the 'Run' menu)."), translate('wizard', '*Run selection:* if there is no selected text, the current line\n is executed; if the selection is on a single line, the selection\n is evaluated; if the selection spans multiple lines, Pyzo will\n run the the (complete) selected lines.'), translate('wizard', "*Run cell:* a cell is everything between two lines starting with '##'."), translate('wizard', '*Run file:* run all the code in the current file.'), translate('wizard', "*Run project main file:* run the code in the current project's main file.")]
def _try_get_string(dev, index, langid=None, default_str_i0='', default_access_error='Error Accessing String'): if (index == 0): string = default_str_i0 else: try: if (langid is None): string = util.get_string(dev, index) else: string = util.get_string(dev, index, langid) except: string = default_access_error return string
def get_egress_cmd(execution, test_interface, mod, vallst, duration=30): tc_set = tc_unset = tc_ls = '' param_map = {'latency': 'delay', 'loss': 'loss', 'bandwidth': 'rate'} for i in test_interface: tc_set = '{0} tc qdisc add dev {1} root netem'.format(tc_set, i) tc_unset = '{0} tc qdisc del dev {1} root ;'.format(tc_unset, i) tc_ls = '{0} tc qdisc ls dev {1} ;'.format(tc_ls, i) if (execution == 'parallel'): for val in vallst.keys(): tc_set += ' {0} {1} '.format(param_map[val], vallst[val]) tc_set += ';' else: tc_set += ' {0} {1} ;'.format(param_map[mod], vallst[mod]) exec_cmd = '{0} {1} sleep {2};{3} sleep 20;{4}'.format(tc_set, tc_ls, duration, tc_unset, tc_ls) return exec_cmd
class TestEntityCrawler(): def test_crawl_wiki_entity(self): url = ' res = entity.crawl_wiki_entity(url, label=Label['PERSON']) assert isinstance(res, Entity) def test_crawl_wiki_entity_urls(self): category_url = ' urls = [url for url in entity.crawl_wiki_entity_urls(category_url)] assert isinstance(urls, list)
def get_f1(model: BiRecurrentConvCRF4NestedNER, mode: str, file_path: str=None) -> float: with torch.no_grad(): model.eval() (pred_all, pred, recall_all, recall) = (0, 0, 0, 0) gold_cross_num = 0 pred_cross_num = 0 if (mode == 'dev'): batch_zip = zip(dev_token_batches, dev_label_batches, dev_mask_batches) elif (mode == 'test'): batch_zip = zip(test_token_batches, test_label_batches, test_mask_batches) else: raise ValueError f = None if (file_path is not None): f = open(file_path, 'w') for (token_batch, label_batch, mask_batch) in batch_zip: mask_batch_var = torch.ByteTensor(np.array(mask_batch, dtype=np.uint8)) if config.if_gpu: mask_batch_var = mask_batch_var.cuda() pred_sequence_entities = model.predict(token_batch, mask_batch_var) pred_entities = unpack_prediction(model, pred_sequence_entities) (p_a, p, r_a, r) = evaluate(label_batch, pred_entities) gold_cross_num += 0 pred_cross_num += 0 pred_all += p_a pred += p recall_all += r_a recall += r if (file_path is not None): for (token, mask, label, preds) in zip(token_batch, mask_batch, label_batch, pred_entities): f.write((' '.join(token) + '\n')) labels = [] for l in sorted(label, key=(lambda x: (x[0], x[1], x[2]))): labels.append('{},{} {}'.format(l[0], l[1], label_dict.get_instance(l[2]))) f.write(('|'.join(labels) + '\n')) labels = [] for p in sorted(preds, key=(lambda x: (x[0], x[1], x[2]))): labels.append('{},{} {}'.format(p[0], p[1], label_dict.get_instance(p[2]))) f.write(('|'.join(labels) + '\n')) f.write('\n') if (file_path is not None): f.close() pred = ((pred / pred_all) if (pred_all > 0) else 1.0) recall = ((recall / recall_all) if (recall_all > 0) else 1.0) f1 = ((2 / ((1.0 / pred) + (1.0 / recall))) if ((pred > 0.0) and (recall > 0.0)) else 0.0) logger.info('{} precision: {:.2f}%, recall: {:.2f}%, F1: {:.2f}%'.format(mode, (pred * 100.0), (recall * 100.0), (f1 * 100.0))) return f1
class VanLargeKernelAttention(nn.Module): def __init__(self, hidden_size: int): super().__init__() self.depth_wise = nn.Conv2d(hidden_size, hidden_size, kernel_size=5, padding=2, groups=hidden_size) self.depth_wise_dilated = nn.Conv2d(hidden_size, hidden_size, kernel_size=7, dilation=3, padding=9, groups=hidden_size) self.point_wise = nn.Conv2d(hidden_size, hidden_size, kernel_size=1) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: hidden_state = self.depth_wise(hidden_state) hidden_state = self.depth_wise_dilated(hidden_state) hidden_state = self.point_wise(hidden_state) return hidden_state
class PluginConfigMixin(): CONFIG_SECTION = '' def _config_key(cls, name): return cls._get_config_option(name) def _get_config_option(cls, option): prefix = cls.CONFIG_SECTION if (not prefix): prefix = cls.PLUGIN_ID.lower().replace(' ', '_') return f'{prefix}_{option}' def config_get(cls, name, default=''): return config.get(PM.CONFIG_SECTION, cls._config_key(name), default) def config_set(cls, name, value): try: config.set(PM.CONFIG_SECTION, cls._config_key(name), value) except config.Error: print_d(f"Couldn't set config item '{name}' to {value!r}") def config_get_bool(cls, name, default=False): return config.getboolean(PM.CONFIG_SECTION, cls._config_key(name), default) def config_get_stringlist(cls, name, default=False): return config.getstringlist(PM.CONFIG_SECTION, cls._config_key(name), default) def config_entry_changed(self, entry, key): if entry.get_property('sensitive'): self.config_set(key, entry.get_text()) def ConfigCheckButton(cls, label, name, default=False): option = cls._config_key(name) try: config.getboolean(PM.CONFIG_SECTION, option) except config.Error: cls.config_set(name, default) return ConfigCheckButton(label, PM.CONFIG_SECTION, option, populate=True)
def download_manifest_entry(manifest_entry: ManifestEntry, token_holder: Optional[Dict[(str, Any)]]=None, table_type: TableType=TableType.PYARROW, column_names: Optional[List[str]]=None, include_columns: Optional[List[str]]=None, file_reader_kwargs_provider: Optional[ReadKwargsProvider]=None, content_type: Optional[ContentType]=None, content_encoding: Optional[ContentEncoding]=None) -> LocalTable: conf = Config(retries={'max_attempts': BOTO_MAX_RETRIES, 'mode': 'adaptive'}) s3_client_kwargs = ({'aws_access_key_id': token_holder['accessKeyId'], 'aws_secret_access_key': token_holder['secretAccessKey'], 'aws_session_token': token_holder['sessionToken'], 'config': conf} if token_holder else {'config': conf}) if (not content_type): content_type = manifest_entry.meta.content_type assert content_type, f'Unknown content type for manifest entry: {manifest_entry}' content_type = ContentType(content_type) if (not content_encoding): content_encoding = manifest_entry.meta.content_encoding assert content_encoding, f'Unknown content encoding for manifest entry: {manifest_entry}' content_encoding = ContentEncoding(content_encoding) s3_url = manifest_entry.uri if (s3_url is None): s3_url = manifest_entry.url partial_file_download_params = None if (manifest_entry.meta and manifest_entry.meta.content_type_parameters): for type_params in manifest_entry.meta.content_type_parameters: if isinstance(type_params, PartialFileDownloadParams): partial_file_download_params = type_params break retrying = Retrying(wait=wait_random_exponential(multiplier=1, max=60), stop=stop_after_delay((30 * 60)), retry=retry_if_not_exception_type(NonRetryableError)) table = retrying(read_file, s3_url, content_type, content_encoding, table_type, column_names, include_columns, file_reader_kwargs_provider, partial_file_download_params, **s3_client_kwargs) return table
class DistributedGPUTest(unittest.TestCase): _if_not_gpu _if_not_distributed def test_gather_uneven_multidim_nccl(self) -> None: spawn_multi_process(2, 'nccl', self._test_ddp_gather_uneven_tensors_multidim_nccl) def _test_ddp_gather_uneven_tensors_multidim_nccl() -> None: rank = dist.get_rank() world_size = dist.get_world_size() tensor = torch.ones((rank + 1), (4 - rank), device=get_device_from_env()) result = all_gather_tensors(tensor) assert (len(result) == world_size) for idx in range(world_size): val = result[idx] assert (val.shape == ((idx + 1), (4 - idx))) assert (val == 1).all() _if_not_gpu _if_not_distributed def test_pg_wrapper_scatter_object_list_nccl(self) -> None: spawn_multi_process(2, 'nccl', self._test_pg_wrapper_scatter_object_list) def _test_pg_wrapper_scatter_object_list(cls) -> None: init_from_env() pg_wrapper = PGWrapper(dist.group.WORLD) output_list = ([None] * 2) pg_wrapper.scatter_object_list(output_list=output_list, input_list=([1, 2] if (get_local_rank() == 0) else ([None] * 2)), src=0) tc = unittest.TestCase() tc.assertEqual(output_list[0], (get_local_rank() + 1))
_type_check def decrypt(secret, hash, data): if (not CRYPTO_INSTALLED): raise RuntimeError('To use Telegram Passports, PTB must be installed via `pip install "python-telegram-bot[passport]"`.') digest = Hash(SHA512(), backend=default_backend()) digest.update((secret + hash)) secret_hash_hash = digest.finalize() (key, init_vector) = (secret_hash_hash[:32], secret_hash_hash[32:(32 + 16)]) cipher = Cipher(AES(key), CBC(init_vector), backend=default_backend()) decryptor = cipher.decryptor() data = (decryptor.update(data) + decryptor.finalize()) digest = Hash(SHA256(), backend=default_backend()) digest.update(data) data_hash = digest.finalize() if (data_hash != hash): raise PassportDecryptionError(f'Hashes are not equal! {data_hash} != {hash}') return data[data[0]:]
class XBKKExchangeCalendar(TradingCalendar): name = 'XBKK' tz = timezone('Asia/Bangkok') open_times = ((None, time(10, 1)),) close_times = ((None, time(16, 30)),) def regular_holidays(self): return HolidayCalendar([NewYearsDay, ChakriMemorialDay, SongkranFestival1, SongkranFestival2, SongkranFestival3, LabourDay, CoronationDay2016AndBefore, CoronationDay2019AndAfter, HMQueensBirthday, HMKingsBirthday, HMQueenMothersBirthday, HalfYearHoliday, ThePassingOfKingBhumibol, ChulalongkornDay, KingBhumibolsBirthday, ThailandConstitutionDay, NewYearsEve]) def adhoc_holidays(self): return list(chain(makha_bucha, vesak, asanha_bucha, new_years_bridge_days, asanha_bucha_bridge_days, queens_birthday_bridge_days, coronation_bridge_days, vesak_bridge_days, misc_adhoc))
def listify_value(arg, split=None): out = [] if (not isinstance(arg, (list, tuple))): arg = [arg] for val in arg: if (val is None): continue if isinstance(val, (list, tuple)): out.extend(listify_value(val, split=split)) continue out.extend((s.strip() for s in str(val).split(split))) assert all((isinstance(val, str) for val in out)) return out
def test_unused_tcp_port_factory_selects_unused_port(pytester: Pytester): pytester.makepyfile(dedent(' .asyncio\n async def test_unused_port_factory_fixture(unused_tcp_port_factory):\n async def closer(_, writer):\n writer.close()\n\n port1, port2, port3 = (\n unused_tcp_port_factory(),\n unused_tcp_port_factory(),\n unused_tcp_port_factory(),\n )\n\n server1 = await asyncio.start_server(\n closer, host="localhost", port=port1\n )\n server2 = await asyncio.start_server(\n closer, host="localhost", port=port2\n )\n server3 = await asyncio.start_server(\n closer, host="localhost", port=port3\n )\n\n for port in port1, port2, port3:\n with pytest.raises(IOError):\n await asyncio.start_server(closer, host="localhost", port=port)\n\n server1.close()\n await server1.wait_closed()\n server2.close()\n await server2.wait_closed()\n server3.close()\n await server3.wait_closed()\n '))
def read_nonterminals(filename): ans = [line.strip(' \t\r\n') for line in open(filename, 'r', encoding='latin-1')] if (len(ans) == 0): raise RuntimeError('The file {0} contains no nonterminals symbols.'.format(filename)) for nonterm in ans: if (nonterm[:9] != '#nonterm:'): raise RuntimeError("In file '{0}', expected nonterminal symbols to start with '#nonterm:', found '{1}'".format(filename, nonterm)) if (len(set(ans)) != len(ans)): raise RuntimeError('Duplicate nonterminal symbols are present in file {0}'.format(filename)) return ans
def base_js(): base_js_files = [] for file in ['base.js-2022-02-04.gz', 'base.js-2022-04-15.gz']: file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', file) with gzip.open(file_path, 'rb') as f: base_js_files.append(f.read().decode('utf-8')) return base_js_files
class ComponentPascalLexer(RegexLexer): name = 'Component Pascal' aliases = ['componentpascal', 'cp'] filenames = ['*.cp', '*.cps'] mimetypes = ['text/x-component-pascal'] url = ' version_added = '2.1' flags = (re.MULTILINE | re.DOTALL) tokens = {'root': [include('whitespace'), include('comments'), include('punctuation'), include('numliterals'), include('strings'), include('operators'), include('builtins'), include('identifiers')], 'whitespace': [('\\n+', Text), ('\\s+', Text)], 'comments': [('\\(\\*([^$].*?)\\*\\)', Comment.Multiline)], 'punctuation': [('[()\\[\\]{},.:;|]', Punctuation)], 'numliterals': [('[0-9A-F]+X\\b', Number.Hex), ('[0-9A-F]+[HL]\\b', Number.Hex), ('[0-9]+\\.[0-9]+E[+-][0-9]+', Number.Float), ('[0-9]+\\.[0-9]+', Number.Float), ('[0-9]+', Number.Integer)], 'strings': [("'[^\\n']*'", String), ('"[^\\n"]*"', String)], 'operators': [('[+-]', Operator), ('[*/]', Operator), ('[=#<>]', Operator), ('\\^', Operator), ('&', Operator), ('~', Operator), (':=', Operator), ('\\.\\.', Operator), ('\\$', Operator)], 'identifiers': [('([a-zA-Z_$][\\w$]*)', Name)], 'builtins': [(words(('ANYPTR', 'ANYREC', 'BOOLEAN', 'BYTE', 'CHAR', 'INTEGER', 'LONGINT', 'REAL', 'SET', 'SHORTCHAR', 'SHORTINT', 'SHORTREAL'), suffix='\\b'), Keyword.Type), (words(('ABS', 'ABSTRACT', 'ARRAY', 'ASH', 'ASSERT', 'BEGIN', 'BITS', 'BY', 'CAP', 'CASE', 'CHR', 'CLOSE', 'CONST', 'DEC', 'DIV', 'DO', 'ELSE', 'ELSIF', 'EMPTY', 'END', 'ENTIER', 'EXCL', 'EXIT', 'EXTENSIBLE', 'FOR', 'HALT', 'IF', 'IMPORT', 'IN', 'INC', 'INCL', 'IS', 'LEN', 'LIMITED', 'LONG', 'LOOP', 'MAX', 'MIN', 'MOD', 'MODULE', 'NEW', 'ODD', 'OF', 'OR', 'ORD', 'OUT', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN', 'SHORT', 'SHORTCHAR', 'SHORTINT', 'SIZE', 'THEN', 'TYPE', 'TO', 'UNTIL', 'VAR', 'WHILE', 'WITH'), suffix='\\b'), Keyword.Reserved), ('(TRUE|FALSE|NIL|INF)\\b', Keyword.Constant)]} def analyse_text(text): result = 0 if ('BEGIN' in text): result += 0.01 if ('END' in text): result += 0.01 if ('PROCEDURE' in text): result += 0.01 if ('END' in text): result += 0.01 return result
class BaseEvent(): def __init__(self, client): self.client = client self.logger = logging.getLogger(__name__) def to_string(self, data): raise NotImplementedError def capture(self, **kwargs): return {} def transform(self, value): return self.client.transform(value)
class VariationalGPModel(gpytorch.models.ApproximateGP): def __init__(self, inducing_points, mean_module=None, covar_module=None, streaming=False, likelihood=None, feat_extractor=None, beta=1.0, learn_inducing_locations=True): data_dim = ((- 2) if (inducing_points.dim() > 1) else (- 1)) variational_distribution = gpytorch.variational.CholeskyVariationalDistribution(inducing_points.size(data_dim)) variational_strategy = gpytorch.variational.UnwhitenedVariationalStrategy(self, inducing_points, variational_distribution, learn_inducing_locations=learn_inducing_locations) super().__init__(variational_strategy) if (mean_module is None): self.mean_module = gpytorch.means.ConstantMean() else: self.mean_module = mean_module if (covar_module is None): self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()) else: self.covar_module = covar_module self.streaming = streaming if self.streaming: self.register_added_loss_term('streaming_loss_term') self.old_variational_dist = None self.old_prior_dist = None self.old_inducing_points = None self.beta = beta self.likelihood = likelihood self.feat_extractor = feat_extractor self.train_checkpoint = deepcopy(self.state_dict()) self.eval_checkpoint = deepcopy(self.state_dict()) def __call__(self, inputs): if hasattr(self.variational_strategy, '_memoize_cache'): if (('cholesky_factor' in self.variational_strategy._memoize_cache) and (self.variational_strategy._memoize_cache['cholesky_factor'].shape[0] != inputs.shape[0])): self.variational_strategy._memoize_cache.pop('cholesky_factor') if self.feat_extractor: inputs = self.feat_extractor(inputs) output_dist = super().__call__(inputs) if (self.streaming and self.training): self.add_streaming_loss(inputs.shape[0], self.beta) return output_dist def add_streaming_loss(self, n, beta): self.eval() current_var_dist = self(self.old_inducing_points) self.train() new_added_loss_term = StreamingAddedLossTerm(current_var_dist, self.old_variational_dist, self.old_prior_dist, (beta / n)) self.update_added_loss_term('streaming_loss_term', new_added_loss_term) def register_streaming_loss(self): current_var_dist = self.variational_strategy.variational_distribution variational_covar = deepcopy(current_var_dist.covariance_matrix.detach()) variational_covar = (variational_covar + (1e-05 * torch.eye(variational_covar.shape[0], dtype=variational_covar.dtype, device=variational_covar.device))) self.old_variational_dist = gpytorch.distributions.MultivariateNormal(deepcopy(current_var_dist.mean.detach()).contiguous(), variational_covar.contiguous()) prior_dist = self.variational_strategy.prior_distribution self.old_prior_dist = gpytorch.distributions.MultivariateNormal(deepcopy(prior_dist.mean.detach()).contiguous(), deepcopy(prior_dist.covariance_matrix.detach()).contiguous()) self.old_inducing_points = self.variational_strategy.inducing_points.clone().detach() def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return gpytorch.distributions.MultivariateNormal(mean_x, covar_x) def param_groups(self, base_lr, learn_features=True): groups = [] for (name, param) in self.named_parameters(): if ('raw' in name): groups.append({'params': param, 'lr': base_lr}) else: lr = (base_lr / 10) if (('feat_extractor' in name) and (learn_features is False)): lr = 0.0 groups.append({'params': param, 'lr': lr}) return groups def reset_checkpoints(self): self.train_checkpoint = deepcopy(self.state_dict()) self.eval_checkpoint = deepcopy(self.state_dict()) def set_streaming(self, streaming_state: bool): self.streaming = streaming_state self.register_added_loss_term('streaming_loss_term') self.old_variational_dist = None self.old_prior_dist = None if (streaming_state is True): self.old_inducing_points = self.variational_strategy.inducing_points.clone().detach() else: self.old_inducing_points = None def update_variational_parameters(self, new_x, new_y, new_inducing_points=None): if (new_inducing_points is None): new_inducing_points = self.variational_strategy.inducing_points.detach().clone() self.set_streaming(True) with torch.no_grad(): self.register_streaming_loss() if (len(new_y.shape) == 1): new_y = new_y.view((- 1), 1) S_a = self.variational_strategy.variational_distribution.lazy_covariance_matrix K_aa_old = self.variational_strategy.prior_distribution.lazy_covariance_matrix m_a = self.variational_strategy.variational_distribution.mean D_a_inv = (S_a.evaluate().inverse() - K_aa_old.evaluate().inverse()) pseudo_points = torch.solve(S_a.inv_matmul(m_a).unsqueeze((- 1)), D_a_inv)[0] hat_y = torch.cat((new_y.view((- 1), 1), pseudo_points)) noise_diag = (self.likelihood.noise * torch.eye(new_y.size((- 2))).to(new_y.device)) zero_part = torch.zeros(new_y.size((- 2)), pseudo_points.size(0)).to(new_y.device) tophalf = torch.cat((noise_diag, zero_part), (- 1)) bottomhalf = torch.cat((zero_part.t(), D_a_inv.inverse()), (- 1)) sigma_hat_y = torch.cat((tophalf, bottomhalf)) stacked_data = torch.cat((new_x, self.variational_strategy.inducing_points)) K_fb = self.covar_module(stacked_data, new_inducing_points) K_bb = self.covar_module(new_inducing_points) pred_cov = ((K_fb K_bb.inv_matmul(K_fb.evaluate().t())) + sigma_hat_y) new_mean = (K_fb.t() torch.solve(hat_y, pred_cov)[0].squeeze((- 1)).detach().contiguous()) new_cov = (K_bb - (K_fb.t() torch.solve(K_fb.evaluate(), pred_cov)[0])) new_variational_chol = psd_safe_cholesky(new_cov.evaluate(), jitter=cholesky_jitter.value()).detach().contiguous() self.variational_strategy._variational_distribution.variational_mean.data.mul_(0.0).add_(new_mean) self.variational_strategy._variational_distribution.chol_variational_covar.data.mul_(0.0).add_(new_variational_chol) self.variational_strategy.inducing_points.data.mul_(0.0).add_(new_inducing_points.detach())
class ForwardTafel(BaseKinetics): def __init__(self, param, domain, reaction, options, phase='primary'): super().__init__(param, domain, reaction, options, phase) def _get_kinetics(self, j0, ne, eta_r, T, u): alpha = self.phase_param.alpha_bv Feta_RT = ((self.param.F * eta_r) / (self.param.R * T)) return ((u * j0) * pybamm.exp(((ne * alpha) * Feta_RT)))
def extract_warnings_from_single_artifact(artifact_path, targets): selected_warnings = set() buffer = [] def parse_line(fp): for line in fp: if isinstance(line, bytes): line = line.decode('UTF-8') if ('warnings summary (final)' in line): continue elif (not line.startswith(' ')): if (len(buffer) > 0): warning = '\n'.join(buffer) if any(((f': {x}: ' in warning) for x in targets)): selected_warnings.add(warning) buffer.clear() continue else: line = line.strip() buffer.append(line) if from_gh: for filename in os.listdir(artifact_path): file_path = os.path.join(artifact_path, filename) if (not os.path.isdir(file_path)): if (filename != 'warnings.txt'): continue with open(file_path) as fp: parse_line(fp) else: try: with zipfile.ZipFile(artifact_path) as z: for filename in z.namelist(): if (not os.path.isdir(filename)): if (filename != 'warnings.txt'): continue with z.open(filename) as fp: parse_line(fp) except Exception: logger.warning(f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.') return selected_warnings
class AdaptiveAvgMaxPool2d(nn.Module): def __init__(self): super(AdaptiveAvgMaxPool2d, self).__init__() self.gap = FastGlobalAvgPool2d() self.gmp = nn.AdaptiveMaxPool2d(1) def forward(self, x): avg_feat = self.gap(x) max_feat = self.gmp(x) feat = (avg_feat + max_feat) return feat
def _weights_init(m): if isinstance(m, nn.Conv2d): torch.nn.init.xavier_uniform_(m.weight) if (m.bias is not None): torch.nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): n = m.weight.size(1) m.weight.data.normal_(0, 0.01) m.bias.data.zero_()