code
stringlengths
281
23.7M
class SawyerPegUnplugSideV1Policy(Policy): _fully_parsed def _parse_obs(obs): return {'hand_pos': obs[:3], 'peg_pos': obs[3:6], 'unused_info': obs[6:]} def get_action(self, obs): o_d = self._parse_obs(obs) action = Action({'delta_pos': np.arange(3), 'grab_effort': 3}) action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0) action['grab_effort'] = self._grab_effort(o_d) return action.array def _desired_pos(o_d): pos_curr = o_d['hand_pos'] pos_peg = (o_d['peg_pos'] + np.array([0.005, 0.0, 0.015])) if (np.linalg.norm((pos_curr[:2] - pos_peg[:2])) > 0.04): return (pos_peg + np.array([0.0, 0.0, 0.3])) elif (abs((pos_curr[2] - pos_peg[2])) > 0.02): return pos_peg else: return (pos_peg + np.array([0.1, 0.0, 0.0])) def _grab_effort(o_d): pos_curr = o_d['hand_pos'] pos_peg = o_d['peg_pos'] if ((np.linalg.norm((pos_curr[:2] - pos_peg[:2])) > 0.04) or (abs((pos_curr[2] - pos_peg[2])) > 0.15)): return (- 1.0) else: return 0.7
.parametrize('\n unlogged_pulls_ok, kind_name, namespace_name, repository, repository_name,\n timestamp,\n index_response, expected_request, throws\n ', [pytest.param(False, 'non-existing', None, None, None, None, None, None, True, id='Invalid Kind'), pytest.param(False, 'pull_repo', 'user1', Mock(id=1), 'repo1', None, None, None, True, id='Invalid Parameters'), pytest.param(False, 'pull_repo', 'user1', Mock(id=1), None, None, FAILURE_400, None, True, id='Throw on pull log failure'), pytest.param(True, 'pull_repo', 'user1', Mock(id=1), None, parse('2017-03-08T03:30'), FAILURE_400, INDEX_REQUEST_2017_03_08, False, id='Ok on pull log failure'), pytest.param(False, 'pull_repo', 'user1', Mock(id=1), None, parse('2017-03-08T03:30'), INDEX_RESPONSE_2017_03_08, INDEX_REQUEST_2017_03_08, False, id='Log with namespace name and repository'), pytest.param(False, 'push_repo', 'user1', None, 'repo1', parse('2019-01-01T03:30'), INDEX_RESPONSE_2019_01_01, INDEX_REQUEST_2019_01_01, False, id='Log with namespace name and repository name')]) def test_log_action(unlogged_pulls_ok, kind_name, namespace_name, repository, repository_name, timestamp, index_response, expected_request, throws, app_config, logs_model, mock_elasticsearch, mock_db_model, mock_random_id): mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE) mock_elasticsearch.index = Mock(return_value=index_response) app_config['ALLOW_PULLS_WITHOUT_STRICT_LOGGING'] = unlogged_pulls_ok configure(app_config) performer = Mock(id=1) ip = '192.168.1.1' metadata = {'key': 'value', 'time': parse('2018-03-08T03:30'), '': ''} if throws: with pytest.raises(Exception): logs_model.log_action(kind_name, namespace_name, performer, ip, metadata, repository, repository_name, timestamp) else: logs_model.log_action(kind_name, namespace_name, performer, ip, metadata, repository, repository_name, timestamp) mock_elasticsearch.index.assert_called_with(expected_request[0], expected_request[1])
def update_changelog() -> None: print('changes were made, updating changelog') with open('CHANGELOG.rst', encoding='utf-8') as fp: content = fp.read() new_slug = (VENDOR_SLUG + f''' - Update vendored schemas ({TODAY}) ''') if EXISTING_CHANGELINE_PATTERN.search(content): content = EXISTING_CHANGELINE_PATTERN.sub(new_slug, content) else: content = content.replace(VENDOR_SLUG, new_slug) with open('CHANGELOG.rst', 'w', encoding='utf-8') as fp: fp.write(content)
def evaluate(args, model, features, tag='dev'): dataloader = DataLoader(features, batch_size=args.test_batch_size, collate_fn=collate_fn, drop_last=False) (keys, preds) = ([], []) for (i_b, batch) in enumerate(dataloader): model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'ss': batch[3].to(args.device), 'os': batch[4].to(args.device)} keys += batch[2].tolist() with torch.no_grad(): logit = model(**inputs)[0] pred = torch.argmax(logit, dim=(- 1)) preds += pred.tolist() keys = np.array(keys, dtype=np.int64) preds = np.array(preds, dtype=np.int64) (_, _, max_f1) = get_f1(keys, preds) output = {(tag + '_f1'): (max_f1 * 100)} print(output) return (max_f1, output)
def setup(app: Sphinx) -> None: schema_file = (Path(__file__).parent.parent / 'vdom-json-schema.json') current_schema = json.dumps(VDOM_JSON_SCHEMA, indent=2, sort_keys=True) if ((not schema_file.exists()) or (schema_file.read_text() != current_schema)): schema_file.write_text(current_schema)
class _AnnotationContext(Context): finder: 'TypeshedFinder' module: str def show_error(self, message: str, error_code: ErrorCode=ErrorCode.invalid_annotation, node: Optional[ast.AST]=None) -> None: self.finder.log(message, ()) def get_name(self, node: ast.Name) -> Value: return self.finder.resolve_name(self.module, node.id) def get_attribute(self, root_value: Value, node: ast.Attribute) -> Value: if isinstance(root_value, KnownValue): if isinstance(root_value.val, ModuleType): return self.finder.resolve_name(root_value.val.__name__, node.attr) elif isinstance(root_value, SyntheticModuleValue): return self.finder.resolve_name('.'.join(root_value.module_path), node.attr) return super().get_attribute(root_value, node)
() def plugin_distro(plugin_package: Package, tmp_path: Path) -> metadata.Distribution: class MockDistribution(metadata.Distribution): def read_text(self, filename: str) -> (str | None): if (filename == 'METADATA'): return '\n'.join([f'Name: {plugin_package.name}', f'Version: {plugin_package.version}', *[f'Requires-Dist: {dep.to_pep_508()}' for dep in plugin_package.requires]]) return None def locate_file(self, path: (str | PathLike[str])) -> Path: return (tmp_path / path) return MockDistribution()
def test_exception_specifiers(): c = m.C() assert (c.m1(2) == 1) assert (c.m2(3) == 1) assert (c.m3(5) == 2) assert (c.m4(7) == 3) assert (c.m5(10) == 5) assert (c.m6(14) == 8) assert (c.m7(20) == 13) assert (c.m8(29) == 21) assert (m.f1(33) == 34) assert (m.f2(53) == 55) assert (m.f3(86) == 89) assert (m.f4(140) == 144)
class SetPingRole(TourneyButton): def __init__(self, ctx: Context, letter: str): super().__init__(emoji=ri(letter)) self.ctx = ctx async def callback(self, interaction: discord.Interaction): (await interaction.response.defer()) m = (await self.ctx.simple('Mention the role you want to ping with registration open message.')) role = (await inputs.role_input(self.ctx, delete_after=True)) (await self.ctx.safe_delete(m)) self.view.record.ping_role_id = role.id (await self.view.refresh_view())
class QuantizedCommCodec(Generic[QuantizationContext]): def encode(self, input_tensor: torch.Tensor, ctx: Optional[QuantizationContext]=None) -> torch.Tensor: ... def decode(self, input_grad: torch.Tensor, ctx: Optional[QuantizationContext]=None) -> torch.Tensor: ... def quantized_dtype(self) -> torch.dtype: ... def calc_quantized_size(self, input_len: int, ctx: Optional[QuantizationContext]=None) -> int: ... def create_context(self) -> Optional[QuantizationContext]: ...
class SquirrelCommand(object): def fail(self, message): raise error.ToolError(message) def make_subparser(self, subparsers): return subparsers.add_parser(self.__class__.__name__, help='Undocumented.') def setup(self, parser): pass def run(self, parser, args): pass
class Order(): def __init__(self, p, score): self.order = list(range(p)) self.parents = {} self.local_scores = {} self.edges = 0 random.shuffle(self.order) for i in range(p): y = self.order[i] self.parents[y] = [] self.local_scores[y] = (- score.score(y, [])) def get(self, i): return self.order[i] def set(self, i, y): self.order[i] = y def index(self, y): return self.order.index(y) def insert(self, i, y): self.order.insert(i, y) def pop(self, i=(- 1)): return self.order.pop(i) def get_parents(self, y): return self.parents[y] def set_parents(self, y, y_parents): self.parents[y] = y_parents def get_local_score(self, y): return self.local_scores[y] def set_local_score(self, y, local_score): self.local_scores[y] = local_score def get_edges(self): return self.edges def set_edges(self, edges): self.edges = edges def bump_edges(self, bump): self.edges += bump def len(self): return len(self.order)
def _tf_sample_num_chunks(frame_rate, n_video_frames, chunks_per_minute): num_video_frames_float = tf.cast(n_video_frames, tf.float32) num_seconds = (num_video_frames_float / frame_rate) chunks_per_second = (chunks_per_minute / SECONDS_IN_A_MINUTE) num_chunks_float = (num_seconds * chunks_per_second) whole = tf.math.floor(num_chunks_float) fraction = (num_chunks_float - whole) additional = tf.less(tf.random.uniform(shape=[]), fraction) return (tf.cast(whole, dtype=tf.int32) + tf.cast(additional, dtype=tf.int32))
class W_Number(W_Object): _attrs_ = [] errorname = 'number' def __init__(self): raise NotImplementedError('abstract base class') def immutable(self): return True def eqv(self, other): return self.equal(other) def hash_eqv(self): return self.hash_equal(info=None)
class MediaMigrationView(RedirectView): prefix = None permanent = True query_string = False def get_redirect_url(self, *args, **kwargs): image_path = kwargs['url'] if self.prefix: image_path = '/'.join([self.prefix, image_path]) return '/'.join([settings.AWS_S3_ENDPOINT_URL, settings.AWS_STORAGE_BUCKET_NAME, image_path])
class Effect4057(BaseEffect): runTime = 'early' type = ('projected', 'passive') def handler(fit, beacon, context, projectionRange, **kwargs): fit.modules.filteredChargeMultiply((lambda mod: mod.charge.requiresSkill('Rockets')), 'emDamage', beacon.getModifiedItemAttr('smallWeaponDamageMultiplier'), stackingPenalties=True, penaltyGroup='postMul', **kwargs)
class AVStreamInfo(Structure): _fields_ = [('last_dts', c_int64), ('duration_gcd', c_int64), ('duration_count', c_int), ('rfps_duration_sum', c_int64), ('duration_error', POINTER(((c_double * 2) * ((((30 * 12) + 30) + 3) + 6)))), ('codec_info_duration', c_int64), ('codec_info_duration_fields', c_int64), ('frame_delay_evidence', c_int), ('found_decoder', c_int), ('last_duration', c_int64), ('fps_first_dts', c_int64), ('fps_first_dts_idx', c_int), ('fps_last_dts', c_int64), ('fps_last_dts_idx', c_int)]
class CosineAnnealingLRWithWarmup(object): def __init__(self, optimizer, T_max, last_epoch=(- 1), verbose=False, min_lr=0, warmup_lr=None, warmup=0): self.optimizer = optimizer self.T_max = T_max self.last_epoch = last_epoch self.verbose = verbose self.warmup_lr = warmup_lr self.warmup = warmup if (isinstance(min_lr, list) or isinstance(min_lr, tuple)): if (len(min_lr) != len(optimizer.param_groups)): raise ValueError('expected {} min_lrs, got {}'.format(len(optimizer.param_groups), len(min_lr))) self.min_lrs = list(min_lr) else: self.min_lrs = ([min_lr] * len(optimizer.param_groups)) self.max_lrs = [lr for lr in self.min_lrs] self._prepare_for_warmup() def step(self): epoch = (self.last_epoch + 1) self.last_epoch = epoch if (epoch <= self.warmup): self._increase_lr(epoch) else: self._reduce_lr(epoch) def _reduce_lr(self, epoch): for (i, param_group) in enumerate(self.optimizer.param_groups): progress = (float((epoch - self.warmup)) / float(max(1, (self.T_max - self.warmup)))) factor = max(0.0, (0.5 * (1.0 + math.cos((math.pi * progress))))) old_lr = float(param_group['lr']) new_lr = max((self.max_lrs[i] * factor), self.min_lrs[i]) param_group['lr'] = new_lr if self.verbose: print('Epoch {:5d}: reducing learning rate of group {} to {:.4e}.'.format(epoch, i, new_lr)) def _increase_lr(self, epoch): for (i, param_group) in enumerate(self.optimizer.param_groups): old_lr = float(param_group['lr']) new_lr = (old_lr + self.warmup_lr_steps[i]) param_group['lr'] = new_lr self.max_lrs[i] = max(self.max_lrs[i], new_lr) if self.verbose: print('Epoch {:5d}: increasing learning rate of group {} to {:.4e}.'.format(epoch, i, new_lr)) def _prepare_for_warmup(self): if (self.warmup_lr is not None): if isinstance(self.warmup_lr, (list, tuple)): if (len(self.warmup_lr) != len(self.optimizer.param_groups)): raise ValueError('expected {} warmup_lrs, got {}'.format(len(self.optimizer.param_groups), len(self.warmup_lr))) self.warmup_lrs = list(self.warmup_lr) else: self.warmup_lrs = ([self.warmup_lr] * len(self.optimizer.param_groups)) else: self.warmup_lrs = None if (self.warmup > self.last_epoch): curr_lrs = [group['lr'] for group in self.optimizer.param_groups] self.warmup_lr_steps = [max(0, ((self.warmup_lrs[i] - curr_lrs[i]) / float(self.warmup))) for i in range(len(curr_lrs))] else: self.warmup_lr_steps = None def state_dict(self): return {key: value for (key, value) in self.__dict__.items() if (key != 'optimizer')} def load_state_dict(self, state_dict): self.__dict__.update(state_dict) self._prepare_for_warmup()
def _prototype_parent_select(caller, new_parent): ret = None prototype_parent = protlib.search_prototype(new_parent) try: if prototype_parent: spawner.flatten_prototype(prototype_parent[0], validate=True) else: raise RuntimeError('Not found.') except RuntimeError as err: caller.msg('Selected prototype-parent {} caused Error(s):\n|r{}|n'.format(new_parent, err)) else: ret = _set_property(caller, new_parent, prop='prototype_parent', processor=str, next_node='node_prototype_parent') _get_flat_menu_prototype(caller, refresh=True) caller.msg('Selected prototype parent |c{}|n.'.format(new_parent)) return ret
class MessageMock(QObject): got_message = pyqtSignal(message.MessageInfo) got_question = pyqtSignal(usertypes.Question) def __init__(self, parent=None): super().__init__(parent) self.messages = [] self.questions = [] self._logger = logging.getLogger('messagemock') (message.MessageInfo) def _record_message(self, info): self.got_message.emit(info) log_levels = {usertypes.MessageLevel.error: logging.ERROR, usertypes.MessageLevel.info: logging.INFO, usertypes.MessageLevel.warning: logging.WARNING} log_level = log_levels[info.level] self._logger.log(log_level, info.text) self.messages.append(info) (usertypes.Question) def _record_question(self, question): self.got_question.emit(question) self._logger.debug(question) self.questions.append(question) def getmsg(self, level=None): assert (len(self.messages) == 1) msg = self.messages[0] if (level is not None): assert (msg.level == level) return msg def get_question(self): assert (len(self.questions) == 1) return self.questions[0] def connect(self): message.global_bridge.show_message.connect(self._record_message) message.global_bridge.ask_question.connect(self._record_question) message.global_bridge._connected = True def disconnect(self): message.global_bridge.show_message.disconnect(self._record_message) message.global_bridge.ask_question.disconnect(self._record_question)
class LetterItem(DemoItem): def __init__(self, letter, parent=None): super(LetterItem, self).__init__(parent) self.letter = letter self.useSharedImage((__file__ + letter)) def createImage(self, transform): scaledRect = transform.mapRect(QRect(0, 0, 25, 25)) image = QImage(scaledRect.width(), scaledRect.height(), QImage.Format_ARGB32_Premultiplied) image.fill(0) painter = QPainter(image) painter.scale(transform.m11(), transform.m22()) painter.setRenderHints(((QPainter.TextAntialiasing | QPainter.Antialiasing) | QPainter.SmoothPixmapTransform)) painter.setPen(Qt.NoPen) if Colors.useEightBitPalette: painter.setBrush(QColor(102, 175, 54)) painter.drawEllipse(0, 0, 25, 25) painter.setFont(Colors.tickerFont()) painter.setPen(QColor(255, 255, 255)) painter.drawText(10, 15, self.letter) else: brush = QLinearGradient(0, 0, 0, 25) brush.setSpread(QLinearGradient.PadSpread) brush.setColorAt(0.0, QColor(102, 175, 54, 200)) brush.setColorAt(1.0, QColor(102, 175, 54, 60)) painter.setBrush(brush) painter.drawEllipse(0, 0, 25, 25) painter.setFont(Colors.tickerFont()) painter.setPen(QColor(255, 255, 255, 255)) painter.drawText(10, 15, self.letter) return image
def save_ckpt(state, is_best, filename='ckpt.pth.tar', prefix=''): torch.save(state, (prefix + filename)) if is_best: shutil.copyfile((prefix + filename), (prefix + 'model_best.pth.tar')) logging.info('Updating the best model checkpoint: {}'.format((prefix + 'model_best.pth.tar')))
def test_path_completion_user_path_expansion(cmd2_app): if sys.platform.startswith('win'): cmd = 'dir' else: cmd = 'ls' text = '~{}'.format(os.path.sep) line = 'shell {} {}'.format(cmd, text) endidx = len(line) begidx = (endidx - len(text)) completions_tilde_slash = [match.replace(text, '', 1) for match in cmd2_app.path_complete(text, line, begidx, endidx)] text = (os.path.expanduser('~') + os.path.sep) line = 'shell {} {}'.format(cmd, text) endidx = len(line) begidx = (endidx - len(text)) completions_home = [match.replace(text, '', 1) for match in cmd2_app.path_complete(text, line, begidx, endidx)] assert (completions_tilde_slash == completions_home)
class IdentityWeightCreator(WeightCreatorInterface): def __init__(self, class_weights: np.ndarray) -> None: self._class_weights = class_weights def video_weight_inputs(self, video_labels: np.ndarray, video_targets: np.ndarray) -> np.ndarray: num_frames = video_labels.shape[0] return np.tile(self._class_weights, (num_frames, 1)) def tf_chunk_weights(self, chunk_weight_inputs): return chunk_weight_inputs def chunk_weights(self, chunk_weight_inputs): return chunk_weight_inputs
class XglcdFont(object): BIT_POS = {1: 0, 2: 2, 4: 4, 8: 6, 16: 8, 32: 10, 64: 12, 128: 14, 256: 16} def __init__(self, path, width, height, start_letter=32, letter_count=96): self.width = width self.height = max(height, 8) self.start_letter = start_letter self.letter_count = letter_count self.bytes_per_letter = (((floor(((self.height - 1) / 8)) + 1) * self.width) + 1) self.__load_xglcd_font(path) def __load_xglcd_font(self, path): bytes_per_letter = self.bytes_per_letter self.letters = bytearray((bytes_per_letter * self.letter_count)) mv = memoryview(self.letters) offset = 0 with open(path, 'r') as f: for line in f: line = line.strip() if ((len(line) == 0) or (line[0:2] != '0x')): continue comment = line.find('//') if (comment != (- 1)): line = line[0:comment].strip() if line.endswith(','): line = line[0:(len(line) - 1)] mv[offset:(offset + bytes_per_letter)] = bytearray((int(b, 16) for b in line.split(','))) offset += bytes_per_letter def lit_bits(self, n): while n: b = (n & ((~ n) + 1)) (yield self.BIT_POS[b]) n ^= b def get_letter(self, letter, color, background=0, landscape=False): letter_ord = (ord(letter) - self.start_letter) if (letter_ord >= self.letter_count): print(('Font does not contain character: ' + letter)) return (b'', 0, 0) bytes_per_letter = self.bytes_per_letter offset = (letter_ord * bytes_per_letter) mv = memoryview(self.letters[offset:(offset + bytes_per_letter)]) letter_width = mv[0] letter_height = self.height letter_size = (letter_height * letter_width) if background: buf = bytearray((background.to_bytes(2, 'big') * letter_size)) else: buf = bytearray((letter_size * 2)) (msb, lsb) = color.to_bytes(2, 'big') if landscape: pos = ((letter_size * 2) - (letter_height * 2)) lh = letter_height for b in mv[1:]: for bit in self.lit_bits(b): buf[(bit + pos)] = msb buf[((bit + pos) + 1)] = lsb if (lh > 8): pos += 16 lh -= 8 else: pos -= ((letter_height * 4) - (lh * 2)) lh = letter_height else: col = 0 bytes_per_letter = ceil((letter_height / 8)) letter_byte = 0 for b in mv[1:]: segment_size = ((letter_byte * letter_width) * 16) for bit in self.lit_bits(b): pos = (((bit * letter_width) + (col * 2)) + segment_size) buf[pos] = msb pos = ((((bit * letter_width) + (col * 2)) + 1) + segment_size) buf[pos] = lsb letter_byte += 1 if ((letter_byte + 1) > bytes_per_letter): col += 1 letter_byte = 0 return (buf, letter_width, letter_height) def measure_text(self, text, spacing=1): length = 0 for letter in text: letter_ord = (ord(letter) - self.start_letter) offset = (letter_ord * self.bytes_per_letter) length += (self.letters[offset] + spacing) return length
def sstore(computation: BaseComputation) -> None: (slot, value) = computation.stack_pop_ints(2) current_value = computation.state.get_storage(address=computation.msg.storage_address, slot=slot) is_currently_empty = (not bool(current_value)) is_going_to_be_empty = (not bool(value)) if is_currently_empty: gas_refund = 0 elif is_going_to_be_empty: gas_refund = constants.REFUND_SCLEAR else: gas_refund = 0 if (is_currently_empty and is_going_to_be_empty): gas_cost = constants.GAS_SRESET elif is_currently_empty: gas_cost = constants.GAS_SSET elif is_going_to_be_empty: gas_cost = constants.GAS_SRESET else: gas_cost = constants.GAS_SRESET computation.consume_gas(gas_cost, reason=f'SSTORE: {encode_hex(computation.msg.storage_address)}[{slot}] -> {value} ({current_value})') if gas_refund: computation.refund_gas(gas_refund) computation.state.set_storage(address=computation.msg.storage_address, slot=slot, value=value)
class SpanProtoFewNERDProcessor(FewShotNERProcessor): def __init__(self, data_args, training_args, model_args, tokenizer=None, post_tokenizer=False, keep_raw_data=True): super().__init__(data_args, training_args, model_args, tokenizer, post_tokenizer=post_tokenizer, keep_raw_data=keep_raw_data) param = {p.split('=')[0]: p.split('=')[1] for p in data_args.user_defined.split(' ')} (N, Q, K, mode) = (param['N'], param['Q'], param['K'], param['mode']) self.train_file = os.path.join(data_args.data_dir, 'train_{}_{}.jsonl'.format(N, K)) self.dev_file = os.path.join(data_args.data_dir, 'dev_{}_{}.jsonl'.format(N, K)) self.test_file = os.path.join(data_args.data_dir, 'test_{}_{}.jsonl'.format(N, K)) self.max_len = data_args.max_seq_length self.doc_stride = data_args.doc_stride self.sentence1_key = None self.mode = mode self.num_class = int(N) self.num_example = int(K) self.output_dir = './outputs/{}-{}-{}'.format(self.mode, self.num_class, self.num_example) def get_data_collator(self): pad_to_multiple_of_8 = (self.training_args.fp16 and (not self.data_args.pad_to_max_length)) return DataCollatorForSpanProto(self.tokenizer, num_class=self.num_class, num_example=self.num_example, mode=self.mode, pad_to_multiple_of=(8 if pad_to_multiple_of_8 else None), pad_to_max_length=self.data_args.pad_to_max_length) def __load_data_from_file__(self, filepath): with open(filepath) as f: lines = f.readlines() for i in range(len(lines)): lines[i] = json.loads(lines[i].strip()) return lines def get_examples(self, set_type): if (set_type == 'train'): examples = self._create_examples(self.__load_data_from_file__(self.train_file), set_type) self.train_examples = examples elif (set_type == 'dev'): examples = self._create_examples(self.__load_data_from_file__(self.dev_file), set_type) self.dev_examples = examples elif (set_type == 'test'): examples = self._create_examples(self.__load_data_from_file__(self.test_file), set_type) self.test_file = examples else: examples = None return examples def get_sentence_with_span(self, data, label2id): word_list = data['word'] label_list = data['label'] input_texts = list() labeled_spans = list() labeled_types = list() for (words, labels) in zip(word_list, label_list): (start, end) = ((- 1), (- 1)) current_label = '' text = '' spans = list() span_types = list() for (ei, word) in enumerate(words): label = labels[ei] if (label == 'O'): text += (word + ' ') if (start != (- 1)): spans.append([start, end]) span_types.append(label2id[current_label]) (start, end) = ((- 1), (- 1)) current_label = '' else: if ((label != current_label) and (start != (- 1))): spans.append([start, end]) span_types.append(label2id[current_label]) (start, end) = ((- 1), (- 1)) current_label = '' if (start == (- 1)): start = len(text) text += (word + ' ') end = len(text) current_label = label if (start != (- 1)): spans.append([start, end]) span_types.append(label2id[current_label]) input_texts.append(text.strip()) labeled_spans.append(spans) labeled_types.append(span_types) return (input_texts, labeled_spans, labeled_types) def _create_examples(self, lines, set_type): examples = [] for (id_, line) in enumerate(lines): target_classes = line['types'] label2id = {v: ei for (ei, v) in enumerate(target_classes)} support = line['support'] query = line['query'] (support_input_texts, support_labeled_spans, support_labeled_types) = self.get_sentence_with_span(support, label2id) (query_input_texts, query_labeled_spans, query_labeled_types) = self.get_sentence_with_span(query, label2id) examples.append({'id': id_, 'support_input_texts': support_input_texts, 'support_labeled_spans': support_labeled_spans, 'support_labeled_types': support_labeled_types, 'support_sentence_num': len(support_input_texts), 'query_input_texts': query_input_texts, 'query_labeled_spans': query_labeled_spans, 'query_labeled_types': query_labeled_types, 'query_sentence_num': len(query_input_texts), 'stage': set_type}) return examples def set_config(self, config): config.ent_type_size = 1 config.inner_dim = 64 config.RoPE = True return config def build_preprocess_function(self): (support_key, query_key) = (self.support_key, self.query_key) tokenizer = self.tokenizer max_seq_length = self.data_args.max_seq_length def func(examples): features = {'id': examples['id'], 'support_input': list(), 'query_input': list()} (support_inputs, query_inputs) = (examples[support_key], examples[query_key]) for (ei, support_input) in enumerate(support_inputs): support_input = (support_input,) query_input = (query_inputs[ei],) support_result = tokenizer(*support_input, padding=False, max_length=max_seq_length, truncation='longest_first', add_special_tokens=True, return_offsets_mapping=True) query_result = tokenizer(*query_input, padding=False, max_length=max_seq_length, truncation='longest_first', add_special_tokens=True, return_offsets_mapping=True) features['support_input'].append(support_result) features['query_input'].append(query_result) features['support_labeled_spans'] = examples['support_labeled_spans'] features['support_labeled_types'] = examples['support_labeled_types'] features['support_sentence_num'] = examples['support_sentence_num'] features['query_labeled_spans'] = examples['query_labeled_spans'] features['query_labeled_types'] = examples['query_labeled_types'] features['query_sentence_num'] = examples['query_sentence_num'] return features return func def fush_multi_answer(self, has_answer, new_answer): for (ans, value) in new_answer.items(): if (ans not in has_answer.keys()): has_answer[ans] = value else: has_answer[ans]['prob'] += value['prob'] has_answer[ans]['pos'].extend(value['pos']) return has_answer def get_predict_result(self, logits, examples, stage='dev'): word_size = dist.get_world_size() results = dict() for i in range(word_size): path = os.path.join(self.output_dir, 'predict', '{}_predictions_{}.npy'.format(stage, i)) assert os.path.exists(path), 'unknown path: {}'.format(path) if os.path.exists(path): res = np.load(path, allow_pickle=True)[()] for (episode_i, value) in res.items(): results[episode_i] = value predictions = dict() for example in examples: query_labeled_spans = example['query_labeled_spans'] query_labeled_types = example['query_labeled_types'] query_offset_mapping = example['query_input']['offset_mapping'] id_ = example['id'] new_labeled_spans = list() for ei in range(len(query_labeled_spans)): labeled_span = query_labeled_spans[ei] offset = query_offset_mapping[ei] new_labeled_span = list() position_map = {} for (i, (m, n)) in enumerate(offset): if ((i != 0) and (m == 0) and (n == 0)): continue for k in range(m, (n + 1)): position_map[k] = i for span in labeled_span: (start, end) = span end -= 1 if ((start in position_map) and (end in position_map)): new_labeled_span.append([position_map[start], position_map[end]]) new_labeled_spans.append(new_labeled_span) (pred_spans, pred_spans_) = (results[id_]['spans'], list()) (pred_types, pred_types_) = (results[id_]['types'], list()) predictions[id_] = {'labeled_spans': new_labeled_spans, 'labeled_types': query_labeled_types, 'predicted_spans': pred_spans, 'predicted_types': pred_types} return predictions def compute_metrics(self, eval_predictions, stage='dev'): all_metrics = {'span_precision': 0.0, 'span_recall': 0.0, 'eval_span_f1': 0, 'class_precision': 0.0, 'class_recall': 0.0, 'eval_class_f1': 0} examples = (self.raw_datasets['validation'] if (stage == 'dev') else self.raw_datasets['test']) predictions = self.get_predict_result(eval_predictions[0], examples, stage) pred_span_cnt = 0 label_span_cnt = 0 correct_span_cnt = 0 pred_class_cnt = 0 label_class_cnt = 0 correct_class_cnt = 0 for (episode_id, predicts) in predictions.items(): query_labeled_spans = predicts['labeled_spans'] query_labeled_types = predicts['labeled_types'] pred_span = predicts['predicted_spans'] pred_type = predicts['predicted_types'] for (label_span, label_type, pred_span, pred_type) in zip(query_labeled_spans, query_labeled_types, pred_span, pred_type): label_span_dict = {0: list()} pred_span_dict = {0: list()} label_class_dict = dict() pred_class_dict = dict() for (span, type) in zip(label_span, label_type): label_span_dict[0].append((span[0], span[1])) if (type not in label_class_dict.keys()): label_class_dict[type] = list() label_class_dict[type].append((span[0], span[1])) for (span, type) in zip(pred_span, pred_type): pred_span_dict[0].append((span[0], span[1])) if ((type == self.num_class) or (span == [0, 0])): continue if (type not in pred_class_dict.keys()): pred_class_dict[type] = list() pred_class_dict[type].append((span[0], span[1])) (tmp_pred_span_cnt, tmp_label_span_cnt, correct_span) = self.metrics_by_entity(label_span_dict, pred_span_dict) (tmp_pred_class_cnt, tmp_label_class_cnt, correct_class) = self.metrics_by_entity(label_class_dict, pred_class_dict) pred_span_cnt += tmp_pred_span_cnt label_span_cnt += tmp_label_span_cnt correct_span_cnt += correct_span pred_class_cnt += tmp_pred_class_cnt label_class_cnt += tmp_label_class_cnt correct_class_cnt += correct_class span_precision = (correct_span_cnt / pred_span_cnt) span_recall = (correct_span_cnt / label_span_cnt) try: span_f1 = (((2 * span_precision) * span_recall) / (span_precision + span_recall)) except: span_f1 = 0.0 class_precision = (correct_class_cnt / pred_class_cnt) class_recall = (correct_class_cnt / label_class_cnt) try: class_f1 = (((2 * class_precision) * class_recall) / (class_precision + class_recall)) except: class_f1 = 0.0 (all_metrics['span_precision'], all_metrics['span_recall'], all_metrics['eval_span_f1']) = (span_precision, span_recall, span_f1) (all_metrics['class_precision'], all_metrics['class_recall'], all_metrics['eval_class_f1']) = (class_precision, class_recall, class_f1) print('all_metrics=', all_metrics) return all_metrics def metrics_by_entity(self, label_class_span, pred_class_span): def get_cnt(label_class_span): cnt = 0 for label in label_class_span: cnt += len(label_class_span[label]) return cnt def get_intersect_by_entity(pred_class_span, label_class_span): cnt = 0 for label in label_class_span: cnt += len(list(set(label_class_span[label]).intersection(set(pred_class_span.get(label, []))))) return cnt pred_cnt = get_cnt(pred_class_span) label_cnt = get_cnt(label_class_span) correct_cnt = get_intersect_by_entity(pred_class_span, label_class_span) return (pred_cnt, label_cnt, correct_cnt) def save_result(self, logits, label_ids): self.compute_metrics((logits,), stage='test')
def test_create_bulk_import(gl, resp_create_bulk_import): configuration = {'url': gl.url, 'access_token': 'test-token'} migration_entity = {'source_full_path': 'source', 'source_type': 'group_entity', 'destination_slug': 'destination', 'destination_namespace': 'destination'} migration = gl.bulk_imports.create({'configuration': configuration, 'entities': [migration_entity]}) assert isinstance(migration, BulkImport) assert (migration.status == 'finished')
class TFAutoModelForSequenceClassification(object): def __init__(self): raise EnvironmentError('TFAutoModelForSequenceClassification is designed to be instantiated using the `TFAutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or `TFAutoModelForSequenceClassification.from_config(config)` methods.') def from_config(cls, config): if isinstance(config, DistilBertConfig): return TFDistilBertForSequenceClassification(config) elif isinstance(config, RobertaConfig): return TFRobertaForSequenceClassification(config) elif isinstance(config, BertConfig): return TFBertForSequenceClassification(config) elif isinstance(config, XLNetConfig): return TFXLNetForSequenceClassification(config) elif isinstance(config, XLMConfig): return TFXLMForSequenceClassification(config) raise ValueError('Unrecognized configuration class {}'.format(config)) def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): if ('distilbert' in pretrained_model_name_or_path): return TFDistilBertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif ('albert' in pretrained_model_name_or_path): return TFAlbertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif ('roberta' in pretrained_model_name_or_path): return TFRobertaForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif ('bert' in pretrained_model_name_or_path): return TFBertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif ('xlnet' in pretrained_model_name_or_path): return TFXLNetForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif ('xlm' in pretrained_model_name_or_path): return TFXLMForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) raise ValueError("Unrecognized model identifier in {}. Should contains one of 'distilbert', 'bert', 'xlnet', 'xlm', 'roberta'".format(pretrained_model_name_or_path))
def mobilenetv2(clip_X, mode): width_scale = 1 input_channel = 32 arguments = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]] with tf.variable_scope('Conv2d3x3', reuse=None): input_channel = int((input_channel * width_scale)) conv_3x3 = tf.layers.conv2d(clip_X, input_channel, (3, 3), strides=(2, 2), padding='same', activation=None, use_bias=False, name='conv') conv_bn_3x3 = tf.layers.batch_normalization(conv_3x3, training=mode, name='conv_bn_3x3') activation = tf.nn.relu(conv_bn_3x3, name='relu') m = 0 with tf.variable_scope('Bottlenecks', reuse=None): for (t, c, n, s) in arguments: m += 1 with tf.variable_scope(('Blocks' + str(m)), reuse=None): output_channel = int((c * width_scale)) for i in range(n): with tf.variable_scope(('block' + str(i)), reuse=None): filters = (t * input_channel) if (i == 0): pw = tf.layers.conv2d(activation, filters, (1, 1), strides=(1, 1), padding='valid', activation=None, use_bias=False, name='conv_non-linear') pw_bn = tf.layers.batch_normalization(pw, training=mode, name='pw_bn') pw_relu = tf.nn.relu(pw_bn, name='pw_relu') dw = tf.contrib.layers.separable_conv2d(pw_relu, num_outputs=None, kernel_size=[3, 3], depth_multiplier=1, stride=[s, s], padding='SAME', activation_fn=None, biases_initializer=None) dw_bn = tf.layers.batch_normalization(dw, training=mode, name='dw_bn') dw_relu = tf.nn.relu(dw_bn, name='dw_relu') plw = tf.layers.conv2d(dw_relu, output_channel, (1, 1), strides=(1, 1), padding='valid', activation=None, use_bias=False, name='conv_linear') pwl_bn = tf.layers.batch_normalization(plw, training=mode, name='pwl_bn') if ((s == 1) and (filters == output_channel)): activation = (pwl_bn + activation) else: activation = pwl_bn else: pw = tf.layers.conv2d(activation, filters, (1, 1), strides=(1, 1), padding='valid', activation=None, use_bias=False, name='conv_non-linear') pw_bn = tf.layers.batch_normalization(pw, training=mode, name='pw_bn') pw_relu = tf.nn.relu(pw_bn, name='pw_relu') dw = tf.contrib.layers.separable_conv2d(pw_relu, num_outputs=None, kernel_size=[3, 3], depth_multiplier=1, stride=[1, 1], padding='SAME', activation_fn=None, biases_initializer=None) dw_bn = tf.layers.batch_normalization(dw, training=mode, name='dw_bn') dw_relu = tf.nn.relu(dw_bn, name='dw_relu') plw = tf.layers.conv2d(dw_relu, output_channel, (1, 1), strides=(1, 1), padding='valid', activation=None, use_bias=False, name='conv_linear') pwl_bn = tf.layers.batch_normalization(plw, training=mode, name='pwl_bn') if ((s == 1) and (filters == output_channel)): activation = (pwl_bn + activation) else: activation = pwl_bn input_channel = output_channel with tf.variable_scope('Conv2d1x1', reuse=None): conv_1x1 = tf.layers.conv2d(activation, parameters.d_model, (1, 1), strides=(1, 1), padding='valid', activation=None, use_bias=False, name='conv') conv_bn_1x1 = tf.layers.batch_normalization(conv_1x1, training=mode, name='conv_bn_1x1') activation = tf.nn.relu(conv_bn_1x1, name='relu') shape = activation.get_shape().as_list() width = shape[1] height = shape[2] output = tf.layers.average_pooling2d(activation, pool_size=(height, width), strides=(1, 1), padding='valid', name='glo_avg_epool2d') output = tf.squeeze(output, name='squeeze') return output
class DrawInstanceSegmentation(LazyTransport): _class_names = class_names def __init__(self): super().__init__() self._pub = self.advertise('~output', Image, queue_size=1) def subscribe(self): sub_rgb = message_filters.Subscriber('~input/rgb', Image) sub_ins = message_filters.Subscriber('~input/label_ins', Image) sub_lbl = message_filters.Subscriber('~input/class', ObjectClassArray) self._subscribers = [sub_rgb, sub_ins, sub_lbl] if rospy.get_param('approximate_sync', False): sync = message_filters.ApproximateTimeSynchronizer(self._subscribers, queue_size=100, slop=0.1) else: sync = message_filters.TimeSynchronizer(self._subscribers, queue_size=100) sync.registerCallback(self._callback) def unsubscribe(self): for sub in self._subscribers: sub.unregister() def _callback(self, rgb_msg, ins_msg, cls_msg): bridge = cv_bridge.CvBridge() rgb = bridge.imgmsg_to_cv2(rgb_msg, desired_encoding='rgb8') ins = bridge.imgmsg_to_cv2(ins_msg) instance_ids = [] class_ids = [] class_confs = [] masks = [] captions = [] for cls in cls_msg.classes: instance_ids.append(cls.instance_id) class_ids.append(cls.class_id) class_confs.append(cls.confidence) masks.append((ins == cls.instance_id)) class_name = self._class_names[cls.class_id] captions.append(f'{cls.class_id}: {class_name}: {cls.confidence:.1%}') masks = np.array(masks) if masks.size: viz = imgviz.instances.instances2rgb(image=rgb, masks=masks, labels=class_ids, captions=captions, font_size=15) else: viz = rgb viz_msg = bridge.cv2_to_imgmsg(viz, encoding='rgb8') viz_msg.header = rgb_msg.header self._pub.publish(viz_msg)
def get_tasks(task_names): task_names = task_names.split(',') if ('all' in task_names): tasks = TASKS else: tasks = [] for task_name in task_names: if (task_name not in TASKS): raise ValueError(f'Task {task_name} not found!') tasks.append(task_name) return tasks
def format_value(val): if (val is None): return '.' elif isinstance(val, str): return val elif isinstance(val, bytes): return val.decode('utf-8') try: lst = [format_value(v) for v in val] return ','.join(lst) except TypeError: return str(val)
def _build_circuit(qubit_pairs: List[List[cirq.Qid]], use_tsym: bool, depth: int) -> cirq.Circuit: inter_gen = circuit_blocks.scrambling_block if use_tsym: inter_gen = circuit_blocks.tsym_block random_source = np.random.uniform(0, 4, size=((depth * len(qubit_pairs)), 2)) ret_circuit = circuit_blocks.block_1d_circuit([qubits[0] for qubits in qubit_pairs], depth, inter_gen, random_source) ret_circuit += [cirq.S(qubits[0]) for qubits in qubit_pairs] ret_circuit += [cirq.H(qubits[0]) for qubits in qubit_pairs] ret_circuit = cirq.merge_single_qubit_gates_to_phxz(ret_circuit) ret_circuit = cirq.drop_empty_moments(ret_circuit) ret_circuit = run_config.flatten_circuit(ret_circuit) for (i, qubit) in enumerate([qubits[0] for qubits in qubit_pairs]): ret_circuit += cirq.measure(qubit, key=f'q{i}') ret_circuit = cirq.synchronize_terminal_measurements(ret_circuit) logging.debug(f'Generated a new circuit w/ tsym={use_tsym} and depth {len(ret_circuit)}') return ret_circuit
class Effect2305(BaseEffect): type = 'passive' def handler(fit, ship, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Energy Neutralizer')), 'energyNeutralizerAmount', ship.getModifiedItemAttr('eliteBonusReconShip2'), skill='Recon Ships', **kwargs)
class DragWidget(QFrame): def __init__(self, parent=None): super(DragWidget, self).__init__(parent) self.setMinimumSize(200, 200) self.setFrameStyle((QFrame.Sunken | QFrame.StyledPanel)) self.setAcceptDrops(True) boatIcon = QLabel(self) boatIcon.setPixmap(QPixmap(':/images/boat.png')) boatIcon.move(20, 20) boatIcon.show() boatIcon.setAttribute(Qt.WA_DeleteOnClose) carIcon = QLabel(self) carIcon.setPixmap(QPixmap(':/images/car.png')) carIcon.move(120, 20) carIcon.show() carIcon.setAttribute(Qt.WA_DeleteOnClose) houseIcon = QLabel(self) houseIcon.setPixmap(QPixmap(':/images/house.png')) houseIcon.move(20, 120) houseIcon.show() houseIcon.setAttribute(Qt.WA_DeleteOnClose) def dragEnterEvent(self, event): if event.mimeData().hasFormat('application/x-dnditemdata'): if (event.source() == self): event.setDropAction(Qt.MoveAction) event.accept() else: event.acceptProposedAction() else: event.ignore() dragMoveEvent = dragEnterEvent def dropEvent(self, event): if event.mimeData().hasFormat('application/x-dnditemdata'): itemData = event.mimeData().data('application/x-dnditemdata') dataStream = QDataStream(itemData, QIODevice.ReadOnly) pixmap = QPixmap() offset = QPoint() ((dataStream >> pixmap) >> offset) newIcon = QLabel(self) newIcon.setPixmap(pixmap) newIcon.move((event.pos() - offset)) newIcon.show() newIcon.setAttribute(Qt.WA_DeleteOnClose) if (event.source() == self): event.setDropAction(Qt.MoveAction) event.accept() else: event.acceptProposedAction() else: event.ignore() def mousePressEvent(self, event): child = self.childAt(event.pos()) if (not child): return pixmap = QPixmap(child.pixmap()) itemData = QByteArray() dataStream = QDataStream(itemData, QIODevice.WriteOnly) ((dataStream << pixmap) << QPoint((event.pos() - child.pos()))) mimeData = QMimeData() mimeData.setData('application/x-dnditemdata', itemData) drag = QDrag(self) drag.setMimeData(mimeData) drag.setPixmap(pixmap) drag.setHotSpot((event.pos() - child.pos())) tempPixmap = QPixmap(pixmap) painter = QPainter() painter.begin(tempPixmap) painter.fillRect(pixmap.rect(), QColor(127, 127, 127, 127)) painter.end() child.setPixmap(tempPixmap) if (drag.exec_((Qt.CopyAction | Qt.MoveAction), Qt.CopyAction) == Qt.MoveAction): child.close() else: child.show() child.setPixmap(pixmap)
def test_parse_command_with_args(parser): line = 'command with args' statement = parser.parse(line) assert (statement.command == 'command') assert (statement == 'with args') assert (statement.args == statement) assert (statement.argv == ['command', 'with', 'args']) assert (statement.arg_list == statement.argv[1:])
.xfail("not hasattr(os, 'dup')") def test_fdopen_kept_alive_issue124(pytester: Pytester) -> None: pytester.makepyfile("\n import os, sys\n k = []\n def test_open_file_and_keep_alive(capfd):\n stdout = os.fdopen(1, 'w', buffering=1, encoding='utf-8')\n k.append(stdout)\n\n def test_close_kept_alive_file():\n stdout = k.pop()\n stdout.close()\n ") result = pytester.runpytest() result.stdout.fnmatch_lines(['*2 passed*'])
class PreActResNet(Backbone): def __init__(self, block, num_blocks): super().__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self._out_features = (512 * block.expansion) def _make_layer(self, block, planes, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = (planes * block.expansion) return nn.Sequential(*layers) def forward(self, x): out = self.conv1(x) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) return out
class Log(object): def __init__(self, hparams): utc_dt = datetime.utcnow().replace(tzinfo=timezone.utc) bj_dt = utc_dt.astimezone(timezone(timedelta(hours=8))) logging_filename = (((('logs/' + hparams.log) + '__') + bj_dt.strftime('%Y-%m-%d_%H_%M_%S')) + '.log') self.logger = logging.getLogger(__name__) self.logger.setLevel(logging.INFO) handler = logging.FileHandler(logging_filename) handler.setLevel(logging.INFO) formatter = logging.Formatter('%(message)s') handler.setFormatter(formatter) self.logger.addHandler(handler)
class _GPT2BPETokenizer(AbstractTokenizer): def __init__(self, vocab_file, merge_file): name = 'GPT2 BPE' super().__init__(name) self.tokenizer = GPT2Tokenizer(vocab_file, merge_file, errors='replace', special_tokens=[], max_len=None) self.eod_id = self.tokenizer.encoder['<|endoftext|>'] def vocab_size(self): return len(self.tokenizer.encoder) def vocab(self): return self.tokenizer.encoder def inv_vocab(self): return self.tokenizer.decoder def tokenize(self, text): return self.tokenizer.encode(text) def detokenize(self, token_ids): return self.tokenizer.decode(token_ids) def eod(self): return self.eod_id
def convert(obj, rule, func, args=(), kwargs=None, fallback=None): if (kwargs is None): kwargs = {} res = None cvargs = (rule, func, args, kwargs, fallback) try: if rule(obj): res = func(obj, *args, **kwargs) elif is_mapping(obj): res = dict(((convert(k, *cvargs), convert(v, *cvargs)) for (k, v) in obj.items())) elif is_iterable(obj): res = type(obj)((convert(i, *cvargs) for i in obj)) else: res = obj except Exception as exc: if callable(fallback): fbargs = (cvargs[:(- 1)] + (exc,)) return fallback(obj, *fbargs) raise return res
class KickstartCompleter_Test(TestCase): def runTest(self): kshandler = makeVersion(DEVEL) self.assertIsNotNone(kshandler) ksc = ksshell.KickstartCompleter(kshandler, {}) self.assertTrue((len(ksc.commands) > 0)) self.assertIn('part', ksc.commands) ksc._init_matches('part ', 5, 5) self.assertEqual(ksc.complete('', 1), '--fstype') ksc._init_matches('auth', 0, 5) self.assertIn(ksc.complete('', 1), ['auth', 'authconfig', 'authselect']) self.assertIn(ksc.complete('', 2), ['auth', 'authconfig', 'authselect']) self.assertNotEqual(ksc.complete('', 1), ksc.complete('', 2))
.skipif((sys.version_info < (3, 7)), reason='pre-commit requires Python 3.7+') def test_new_project_does_not_fail_pre_commit(cwd, pre_commit, putup): name = 'my_project' run((f'{putup} --pre-commit --cirrus --gitlab -p my_package --namespace com.blue_yonder ' + name)) with cwd.join(name).as_cwd(): try: run(f'{pre_commit} install') run(f'{pre_commit} run --all') except CalledProcessError as ex: if ((os.name == 'nt') and ('filename or extension is too long' in ((ex.stdout or '') + (ex.stderr or '')))): pytest.skip('Sometimes Windows have problems with nested files') else: raise
class Tree(nn.Module): def __init__(self, levels, block, in_channels, out_channels, stride=1, level_root=False, root_dim=0, root_kernel_size=1, dilation=1, root_residual=False): super(Tree, self).__init__() if (root_dim == 0): root_dim = (2 * out_channels) if level_root: root_dim += in_channels if (levels == 1): self.tree1 = block(in_channels, out_channels, stride, dilation=dilation) self.tree2 = block(out_channels, out_channels, 1, dilation=dilation) else: self.tree1 = Tree((levels - 1), block, in_channels, out_channels, stride, root_dim=0, root_kernel_size=root_kernel_size, dilation=dilation, root_residual=root_residual) self.tree2 = Tree((levels - 1), block, out_channels, out_channels, root_dim=(root_dim + out_channels), root_kernel_size=root_kernel_size, dilation=dilation, root_residual=root_residual) if (levels == 1): self.root = Root(root_dim, out_channels, root_kernel_size, root_residual) self.level_root = level_root self.root_dim = root_dim self.downsample = None self.project = None self.levels = levels if (stride > 1): self.downsample = nn.MaxPool2d(stride, stride=stride) if (in_channels != out_channels): self.project = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)) def forward(self, x, residual=None, children=None): children = ([] if (children is None) else children) bottom = (self.downsample(x) if self.downsample else x) residual = (self.project(bottom) if self.project else bottom) if self.level_root: children.append(bottom) x1 = self.tree1(x, residual) if (self.levels == 1): x2 = self.tree2(x1) x = self.root(x2, x1, *children) else: children.append(x1) x = self.tree2(x1, children=children) return x
def setUpModule(): global mol, mm_coords, mm_charges, mm_radii mol = gto.M(verbose=5, output='/dev/null', atom='O -1.464 0.099 0.300\n H -1.956 0.624 -0.340\n H -1.797 -0.799 0.206', basis='631G') mm_coords = [(1.369, 0.146, (- 0.395)), (1.894, 0.486, 0.335), (0.451, 0.165, (- 0.083))] mm_charges = [(- 1.04), 0.52, 0.52] mm_radii = [0.63, 0.32, 0.32]
.parametrize('ansi_sequence', [ansi.Fg.MAGENTA, ansi.Bg.LIGHT_GRAY, ansi.EightBitBg.CHARTREUSE_2A, ansi.EightBitBg.MEDIUM_PURPLE, ansi.RgbFg(0, 5, 22), ansi.RgbBg(100, 150, 222), ansi.TextStyle.OVERLINE_ENABLE]) def test_sequence_str_building(ansi_sequence): assert ((ansi_sequence + ansi_sequence) == (str(ansi_sequence) + str(ansi_sequence)))
class PolyTranslator(TypeTranslator): def __init__(self, poly_tvars: Iterable[TypeVarLikeType], bound_tvars: frozenset[TypeVarLikeType]=frozenset(), seen_aliases: frozenset[TypeInfo]=frozenset()) -> None: self.poly_tvars = set(poly_tvars) self.bound_tvars = bound_tvars self.seen_aliases = seen_aliases def collect_vars(self, t: (CallableType | Parameters)) -> list[TypeVarLikeType]: found_vars = [] for arg in t.arg_types: for tv in get_all_type_vars(arg): if isinstance(tv, ParamSpecType): normalized: TypeVarLikeType = tv.copy_modified(flavor=ParamSpecFlavor.BARE, prefix=Parameters([], [], [])) else: normalized = tv if ((normalized in self.poly_tvars) and (normalized not in self.bound_tvars)): found_vars.append(normalized) return remove_dups(found_vars) def visit_callable_type(self, t: CallableType) -> Type: found_vars = self.collect_vars(t) self.bound_tvars |= set(found_vars) result = super().visit_callable_type(t) self.bound_tvars -= set(found_vars) assert (isinstance(result, ProperType) and isinstance(result, CallableType)) result.variables = (list(result.variables) + found_vars) return result def visit_type_var(self, t: TypeVarType) -> Type: if ((t in self.poly_tvars) and (t not in self.bound_tvars)): raise PolyTranslationError() return super().visit_type_var(t) def visit_param_spec(self, t: ParamSpecType) -> Type: if ((t in self.poly_tvars) and (t not in self.bound_tvars)): raise PolyTranslationError() return super().visit_param_spec(t) def visit_type_var_tuple(self, t: TypeVarTupleType) -> Type: if ((t in self.poly_tvars) and (t not in self.bound_tvars)): raise PolyTranslationError() return super().visit_type_var_tuple(t) def visit_type_alias_type(self, t: TypeAliasType) -> Type: if (not t.args): return t.copy_modified() if (not t.is_recursive): return get_proper_type(t).accept(self) raise PolyTranslationError() def visit_instance(self, t: Instance) -> Type: if t.type.has_param_spec_type: param_spec_index = next((i for (i, tv) in enumerate(t.type.defn.type_vars) if isinstance(tv, ParamSpecType))) p = get_proper_type(t.args[param_spec_index]) if isinstance(p, Parameters): found_vars = self.collect_vars(p) self.bound_tvars |= set(found_vars) new_args = [a.accept(self) for a in t.args] self.bound_tvars -= set(found_vars) repl = new_args[param_spec_index] assert (isinstance(repl, ProperType) and isinstance(repl, Parameters)) repl.variables = (list(repl.variables) + list(found_vars)) return t.copy_modified(args=new_args) if (t.args and t.type.is_protocol and (t.type.protocol_members == ['__call__'])): if (t.type in self.seen_aliases): raise PolyTranslationError() call = find_member('__call__', t, t, is_operator=True) assert (call is not None) return call.accept(PolyTranslator(self.poly_tvars, self.bound_tvars, (self.seen_aliases | {t.type}))) return super().visit_instance(t)
class Task(abc.ABC): DATASET_PATH: str = None DATASET_NAME: str = None def __init__(self, data_dir=None, cache_dir=None, download_mode=None): self.download(data_dir, cache_dir, download_mode) self._training_docs = None self._fewshot_docs = None def download(self, data_dir=None, cache_dir=None, download_mode=None): self.dataset = datasets.load_dataset(path=self.DATASET_PATH, name=self.DATASET_NAME, data_dir=data_dir, cache_dir=cache_dir, download_mode=download_mode) def should_decontaminate(self): return False def has_training_docs(self): pass def has_validation_docs(self): pass def has_test_docs(self): pass def training_docs(self): return [] def validation_docs(self): return [] def test_docs(self): return [] def _process_doc(self, doc): return doc def fewshot_examples(self, k, rnd): if (self._training_docs is None): self._training_docs = list(self.training_docs()) return rnd.sample(self._training_docs, k) def doc_to_decontamination_query(self, doc): print('Override doc_to_decontamination_query with document specific decontamination query.') assert False def doc_to_text(self, doc): pass def doc_to_target(self, doc): pass def construct_requests(self, doc, ctx): pass def process_results(self, doc, results): pass def aggregation(self): pass def higher_is_better(self): pass def fewshot_description(self): import warnings warnings.warn('`fewshot_description` will be removed in futures versions. Pass any custom descriptions to the `evaluate` function instead.', DeprecationWarning) return '' _deprecated def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, description=None): assert (rnd is not None), 'A `random.Random` generator argument must be provided to `rnd`' assert (not provide_description), 'The `provide_description` arg will be removed in future versions. To prepend a custom description to the context, supply the corresponding string via the `description` arg.' if (provide_description is not None): print('WARNING: provide_description is deprecated and will be removed in a future version in favor of description_dict') description = ((description + '\n\n') if description else '') if (num_fewshot == 0): labeled_examples = '' else: if self.has_training_docs(): fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd) else: if (self._fewshot_docs is None): self._fewshot_docs = list((self.validation_docs() if self.has_validation_docs() else self.test_docs())) fewshotex = rnd.sample(self._fewshot_docs, (num_fewshot + 1)) fewshotex = [x for x in fewshotex if (x != doc)][:num_fewshot] labeled_examples = ('\n\n'.join([(self.doc_to_text(doc) + self.doc_to_target(doc)) for doc in fewshotex]) + '\n\n') example = self.doc_to_text(doc) return ((description + labeled_examples) + example)
_module() class ONNXRuntimeRecognizer(EncodeDecodeRecognizer): def __init__(self, onnx_file: str, cfg: Any, device_id: int, show_score: bool=False): if ('type' in cfg.model): cfg.model.pop('type') EncodeDecodeRecognizer.__init__(self, **cfg.model) import onnxruntime as ort ort_custom_op_path = '' try: from mmcv.ops import get_onnxruntime_op_path ort_custom_op_path = get_onnxruntime_op_path() except (ImportError, ModuleNotFoundError): warnings.warn('If input model has custom op from mmcv, you may have to build mmcv with ONNXRuntime from source.') session_options = ort.SessionOptions() if osp.exists(ort_custom_op_path): session_options.register_custom_ops_library(ort_custom_op_path) sess = ort.InferenceSession(onnx_file, session_options) providers = ['CPUExecutionProvider'] options = [{}] is_cuda_available = (ort.get_device() == 'GPU') if is_cuda_available: providers.insert(0, 'CUDAExecutionProvider') options.insert(0, {'device_id': device_id}) sess.set_providers(providers, options) self.sess = sess self.device_id = device_id self.io_binding = sess.io_binding() self.output_names = [_.name for _ in sess.get_outputs()] for name in self.output_names: self.io_binding.bind_output(name) self.cfg = cfg def forward_train(self, img, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def aug_test(self, imgs, img_metas, **kwargs): if isinstance(imgs, list): for (idx, each_img) in enumerate(imgs): if (each_img.dim() == 3): imgs[idx] = each_img.unsqueeze(0) imgs = imgs[0] img_metas = img_metas[0] elif ((len(img_metas) == 1) and isinstance(img_metas[0], list)): img_metas = img_metas[0] return self.simple_test(imgs, img_metas=img_metas) def extract_feat(self, imgs): raise NotImplementedError('This method is not implemented.') def simple_test(self, img: torch.Tensor, img_metas: Iterable, rescale: bool=False): onnx_pred = inference_with_session(self.sess, self.io_binding, 'input', self.output_names, img) onnx_pred = torch.from_numpy(onnx_pred[0]) (label_indexes, label_scores) = self.label_convertor.tensor2idx(onnx_pred, img_metas) label_strings = self.label_convertor.idx2str(label_indexes) results = [] for (string, score) in zip(label_strings, label_scores): results.append(dict(text=string, score=score)) return results
def _realign_dfs(): idx_len = 0 idx = None for df in shared._DFS.values(): if (len(df) > idx_len): idx_len = len(df) idx = df.index for key in shared._DFS.keys(): try: shared._DFS[key] = _pd.DataFrame(index=idx, data=shared._DFS[key]).drop_duplicates() except Exception: shared._DFS[key] = _pd.concat([utils.empty_df(idx), shared._DFS[key].dropna()], axis=0, sort=True) shared._DFS[key] = shared._DFS[key].loc[(~ shared._DFS[key].index.duplicated(keep='last'))]
class TestRotateProperties(EndianTest): def setUp(self): self.req_args_0 = {'delta': (- 11867), 'properties': [, , , , , , , , , , , ], 'window': } self.req_bin_0 = b'r\x00\x00\x0f\x10*\xed!\x00\x0c\xd1\xa5\x01\xd0\x9d\x12Z\xa1Y\x87D_\x89\xe8\x104\xde\xd6#\x1d\xa2=\x05\xd4u\\|\xb6\xb2E\x06\xfb\xb5cF\xd5wr%e\xbb\xc6YE\xf9\x12x\xd8\xed\xb2' def testPackRequest0(self): bin = request.RotateProperties._request.to_binary(*(), **self.req_args_0) self.assertBinaryEqual(bin, self.req_bin_0) def testUnpackRequest0(self): (args, remain) = request.RotateProperties._request.parse_binary(self.req_bin_0, dummy_display, 1) self.assertBinaryEmpty(remain) self.assertEqual(args, self.req_args_0)
def init(disp, info): disp.extension_add_method('display', 'record_get_version', get_version) disp.extension_add_method('display', 'record_create_context', create_context) disp.extension_add_method('display', 'record_register_clients', register_clients) disp.extension_add_method('display', 'record_unregister_clients', unregister_clients) disp.extension_add_method('display', 'record_get_context', get_context) disp.extension_add_method('display', 'record_enable_context', enable_context) disp.extension_add_method('display', 'record_disable_context', disable_context) disp.extension_add_method('display', 'record_free_context', free_context)
class TestInfo(object): def test_info(self): if (not torch.cuda.is_available()): return from mmcv.ops import get_compiler_version, get_compiling_cuda_version cv = get_compiler_version() ccv = get_compiling_cuda_version() assert (cv is not None) assert (ccv is not None)
class GraphStructuralEncoder(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1): super(GraphStructuralEncoder, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = F.relu def forward(self, src): src2 = self.self_attn(src, src, src)[0] src = (src + self.dropout1(src2)) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = (src + self.dropout2(src2)) src = self.norm2(src) return src
def apply_migrations(data: dict, migrations: Migrations, *, copy_before_migrating: bool=False, version_name: str='version') -> dict: schema_version = data.get('schema_version', 1) version = get_version(migrations) while (schema_version < version): if copy_before_migrating: data = copy.deepcopy(data) copy_before_migrating = False migration = migrations[(schema_version - 1)] if (migration is None): raise UnsupportedVersion(f"Requested a migration from {version_name} {schema_version}, but it's no longer supported. You can try using an older Randovania version.") data = migration(data) schema_version += 1 if (schema_version > version): raise UnsupportedVersion(f'Found {version_name} {schema_version}, but only up to {version} is supported. This file was created using a newer Randovania version.') data['schema_version'] = schema_version return data
class TestFunctoolsPartial(): def test_infer_partial() -> None: ast_node = astroid.extract_node("\n from functools import partial\n def test(a, b):\n '''Docstring'''\n return a + b\n partial(test, 1)(3) #\n ") assert isinstance(ast_node.func, nodes.Call) inferred = ast_node.func.inferred() assert (len(inferred) == 1) partial = inferred[0] assert isinstance(partial, objects.PartialFunction) assert isinstance(partial.as_string(), str) assert isinstance(partial.doc_node, nodes.Const) assert (partial.doc_node.value == 'Docstring') assert (partial.lineno == 3) assert (partial.col_offset == 0) def test_invalid_functools_partial_calls(self) -> None: ast_nodes = astroid.extract_node('\n from functools import partial\n from unknown import Unknown\n\n def test(a, b, c):\n return a + b + c\n\n partial() #\n partial(test) #\n partial(func=test) #\n partial(some_func, a=1) #\n partial(Unknown, a=1) #\n partial(2, a=1) #\n partial(test, unknown=1) #\n ') for node in ast_nodes: inferred = next(node.infer()) assert isinstance(inferred, (astroid.FunctionDef, astroid.Instance)) assert (inferred.qname() in {'functools.partial', 'functools.partial.newfunc'}) def test_inferred_partial_function_calls(self) -> None: ast_nodes = astroid.extract_node('\n from functools import partial\n def test(a, b):\n return a + b\n partial(test, 1)(3) #\n partial(test, b=4)(3) #\n partial(test, b=4)(a=3) #\n def other_test(a, b, *, c=1):\n return (a + b) * c\n\n partial(other_test, 1, 2)() #\n partial(other_test, 1, 2)(c=4) #\n partial(other_test, c=4)(1, 3) #\n partial(other_test, 4, c=4)(4) #\n partial(other_test, 4, c=4)(b=5) #\n test(1, 2) #\n partial(other_test, 1, 2)(c=3) #\n partial(test, b=4)(a=3) #\n ') expected_values = [4, 7, 7, 3, 12, 16, 32, 36, 3, 9, 7] for (node, expected_value) in zip(ast_nodes, expected_values): inferred = next(node.infer()) assert isinstance(inferred, astroid.Const) assert (inferred.value == expected_value) def test_partial_assignment(self) -> None: ast_nodes = astroid.extract_node('\n from functools import partial\n def test(a, b): #\n return a + b\n test2 = partial(test, 1)\n test2 #\n def test3_scope(a):\n test3 = partial(test, a)\n test3 #\n ') (func1, func2, func3) = ast_nodes assert (func1.parent.scope() == func2.parent.scope()) assert (func1.parent.scope() != func3.parent.scope()) partial_func3 = next(func3.infer()) scope = partial_func3.parent.scope() assert (scope.name == 'test3_scope'), 'parented by closure' def test_partial_does_not_affect_scope(self) -> None: ast_nodes = astroid.extract_node('\n from functools import partial\n def test(a, b):\n return a + b\n def scope():\n test2 = partial(test, 1)\n test2 #\n ') test2 = next(ast_nodes.infer()) mod_scope = test2.root() scope = test2.parent.scope() assert (set(mod_scope) == {'test', 'scope', 'partial'}) assert (set(scope) == {'test2'}) def test_multiple_partial_args(self) -> None: ast_node = astroid.extract_node('\n from functools import partial\n def test(a, b, c, d, e=5):\n return a + b + c + d + e\n test1 = partial(test, 1)\n test2 = partial(test1, 2)\n test3 = partial(test2, 3)\n test3(4, e=6) #\n ') expected_args = [1, 2, 3, 4] expected_keywords = {'e': 6} call_site = astroid.arguments.CallSite.from_call(ast_node) called_func = next(ast_node.func.infer()) called_args = (called_func.filled_args + call_site.positional_arguments) called_keywords = {**called_func.filled_keywords, **call_site.keyword_arguments} assert (len(called_args) == len(expected_args)) assert ([arg.value for arg in called_args] == expected_args) assert (len(called_keywords) == len(expected_keywords)) for (keyword, value) in expected_keywords.items(): assert (keyword in called_keywords) assert (called_keywords[keyword].value == value)
_processor('copy') class CopyProcessor(BaseProcessor): def __init__(self, config, *args, **kwargs): self.max_length = config.max_length def __call__(self, item): blob = item['blob'] final_blob = np.zeros(((self.max_length,) + blob.shape[1:]), blob.dtype) final_blob[:len(blob)] = blob[:len(final_blob)] return {'blob': torch.from_numpy(final_blob)}
def get_learning_rate_decay(learning_rate, global_step, params): if (params.learning_rate_decay in ['linear_warmup_rsqrt_decay', 'noam']): step = tf.to_float(global_step) warmup_steps = tf.to_float(params.warmup_steps) multiplier = (params.hidden_size ** (- 0.5)) decay = (multiplier * tf.minimum(((step + 1) * (warmup_steps ** (- 1.5))), ((step + 1) ** (- 0.5)))) return (learning_rate * decay) elif (params.learning_rate_decay == 'piecewise_constant'): return tf.train.piecewise_constant(tf.to_int32(global_step), params.learning_rate_boundaries, params.learning_rate_values) elif (params.learning_rate_decay == 'none'): return learning_rate else: raise ValueError('Unknown learning_rate_decay')
def test_query_paths_with_second_try(query_paths_args, valid_response_json): for try_again in (PFSError.BAD_IOU, PFSError.MISSING_IOU, PFSError.USE_THIS_IOU): response = ([dict(error_code=try_again.value)] * 2) assert_failed_pfs_request(query_paths_args, response, expected_requests=2, exception_type=ServiceRequestIOURejected) response[1] = valid_response_json assert_failed_pfs_request(query_paths_args, response, [400, 200], expected_success=True)
def read_MW_dataset(mw_json_fn): DOMAINS = ['hotel', 'restaurant', 'attraction', 'taxi', 'train'] with open(mw_json_fn, 'r') as f: data = json.load(f) dial_dict = {} examples = defaultdict(list) idx = 0 for turn in data: if (not set(turn['domains']).issubset(set(DOMAINS))): continue sys_utt = turn['dialog']['sys'][(- 1)] usr_utt = turn['dialog']['usr'][(- 1)] if (sys_utt == 'none'): sys_utt = '' if (usr_utt == 'none'): usr_utt = '' history = f'[system] {sys_utt} [user] {usr_utt}' name = f"{turn['ID']}_turn_{turn['turn_id']}" assert (not (name in dial_dict)) dial_dict[name] = [idx, history] one_example = copy.deepcopy(turn) one_example['id'] = idx one_example['name'] = name one_example['dialogue_ID'] = turn['ID'] one_example['history'] = history examples[one_example['dialogue_ID']].append(one_example) idx += 1 return (dial_dict, examples)
def save_preds(exp, probability, clean): name = './stats/cifar100/stats{}.pcl' nm = name.format(exp) if os.path.exists(nm): (probs_history, clean_history) = pickle.load(open(nm, 'rb')) else: (probs_history, clean_history) = ([], []) probs_history.append(probability) clean_history.append(clean) pickle.dump((probs_history, clean_history), open(nm, 'wb'))
class Tracker(): module: nn.Module traced: List[nn.Module] = field(default_factory=list) handles: list = field(default_factory=list) def _forward_hook(self, m, inputs: Tensor, outputs: Tensor): has_not_submodules = ((len(list(m.modules())) == 1) or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)) if has_not_submodules: self.traced.append(m) def __call__(self, x: Tensor): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook)) self.module(x) list(map((lambda x: x.remove()), self.handles)) return self def parametrized(self): return list(filter((lambda x: (len(list(x.state_dict().keys())) > 0)), self.traced))
def test_on_menu_action_dependencies(default_main_window, monkeypatch): mock_show = MagicMock() monkeypatch.setattr(QtWidgets.QWidget, 'show', mock_show) default_main_window._on_menu_action_dependencies() assert (default_main_window.dependencies_window is not None) assert (default_main_window.dependencies_window.windowTitle() == 'Dependencies') assert isinstance(default_main_window.dependencies_window.centralWidget(), DependenciesWidget) mock_show.assert_called_once_with()
class Retriever(abc.ABC): def __init__(self, key: str, on: typing.Union[(str, list)], k: typing.Optional[int], batch_size: int) -> None: super().__init__() self.key = key self.on = (on if isinstance(on, list) else [on]) self.documents = None self.k = k self.batch_size = batch_size def __repr__(self) -> str: repr = f'{self.__class__.__name__} retriever' repr += f''' key : {self.key}''' repr += f''' on : {', '.join(self.on)}''' repr += f''' documents: {len(self)}''' return repr def __call__(self, q: typing.Union[(typing.List[str], str)], k: typing.Optional[int], batch_size: typing.Optional[int], **kwargs) -> typing.Union[(typing.List[typing.List[typing.Dict[(str, str)]]], typing.List[typing.Dict[(str, str)]])]: return [] def __len__(self): return (len(self.documents) if (self.documents is not None) else 0) def __add__(self, other) -> Pipeline: if isinstance(other, Pipeline): return Pipeline(self, other.models) elif isinstance(other, list): return Pipeline([self, {document[self.key]: document for document in other}]) return Pipeline([self, other]) def __or__(self, other) -> Union: if isinstance(other, Union): return Union(([self] + other.models)) return Union([self, other]) def __and__(self, other) -> Intersection: if isinstance(other, Intersection): return Intersection(([self] + other.models)) return Intersection([self, other]) def __mul__(self, other) -> Vote: if isinstance(other, Vote): return Vote(([self] + other.models)) return Vote([self, other])
(params={'This': POINTER, 'Width': ULONGLONG, 'Register': INT, 'CpuIndex': ULONGLONG, 'Buffer': POINTER}) def hook_SmmWriteSaveState(ql: Qiling, address: int, params): Width = params['Width'] Register = params['Register'] CpuIndex = params['CpuIndex'] Buffer = params['Buffer'] if (CpuIndex > 0): return EFI_INVALID_PARAMETER data = ql.mem.read(Buffer, Width) ql.os.smm.ssa.write(Register, bytes(data)) return EFI_SUCCESS
def plot_results(allresults, *, xy_fn=default_xy_fn, split_fn=default_split_fn, group_fn=default_split_fn, average_group=False, shaded_std=True, shaded_err=True, figsize=None, legend_outside=False, resample=0, smooth_step=1.0): if (split_fn is None): split_fn = (lambda _: '') if (group_fn is None): group_fn = (lambda _: '') sk2r = defaultdict(list) for result in allresults: splitkey = split_fn(result) sk2r[splitkey].append(result) assert (len(sk2r) > 0) assert isinstance(resample, int), "0: don't resample. <integer>: that many samples" nrows = len(sk2r) ncols = 1 figsize = (figsize or (6, (6 * nrows))) (f, axarr) = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize) groups = list(set((group_fn(result) for result in allresults))) default_samples = 512 if average_group: resample = (resample or default_samples) for (isplit, sk) in enumerate(sorted(sk2r.keys())): g2l = {} g2c = defaultdict(int) sresults = sk2r[sk] gresults = defaultdict(list) ax = axarr[isplit][0] for result in sresults: group = group_fn(result) g2c[group] += 1 (x, y) = xy_fn(result) if (x is None): x = np.arange(len(y)) (x, y) = map(np.asarray, (x, y)) if average_group: gresults[group].append((x, y)) else: if resample: (x, y, counts) = symmetric_ema(x, y, x[0], x[(- 1)], resample, decay_steps=smooth_step) (l,) = ax.plot(x, y, color=COLORS[(groups.index(group) % len(COLORS))]) g2l[group] = l if average_group: for group in sorted(groups): xys = gresults[group] if (not any(xys)): continue color = COLORS[(groups.index(group) % len(COLORS))] origxs = [xy[0] for xy in xys] minxlen = min(map(len, origxs)) def allequal(qs): return all(((q == qs[0]).all() for q in qs[1:])) if resample: low = max((x[0] for x in origxs)) high = min((x[(- 1)] for x in origxs)) usex = np.linspace(low, high, resample) ys = [] for (x, y) in xys: ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1]) else: assert allequal([x[:minxlen] for x in origxs]), 'If you want to average unevenly sampled data, set resample=<number of samples you want>' usex = origxs[0] ys = [xy[1][:minxlen] for xy in xys] ymean = np.mean(ys, axis=0) ystd = np.std(ys, axis=0) ystderr = (ystd / np.sqrt(len(ys))) (l,) = axarr[isplit][0].plot(usex, ymean, color=color) g2l[group] = l if shaded_err: ax.fill_between(usex, (ymean - ystderr), (ymean + ystderr), color=color, alpha=0.4) if shaded_std: ax.fill_between(usex, (ymean - ystd), (ymean + ystd), color=color, alpha=0.2) plt.tight_layout() if any(g2l.keys()): ax.legend(g2l.values(), ([('%s (%i)' % (g, g2c[g])) for g in g2l] if average_group else g2l.keys()), loc=(2 if legend_outside else None), bbox_to_anchor=((1, 1) if legend_outside else None)) ax.set_title(sk) return (f, axarr)
def python_param_net_file(): with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: f.write("name: 'pythonnet' force_backward: true\n input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }\n layer { type: 'Python' name: 'mul10' bottom: 'data' top: 'mul10'\n python_param { module: 'test_python_layer_with_param_str'\n layer: 'SimpleParamLayer' param_str: '10' } }\n layer { type: 'Python' name: 'mul2' bottom: 'mul10' top: 'mul2'\n python_param { module: 'test_python_layer_with_param_str'\n layer: 'SimpleParamLayer' param_str: '2' } }") return f.name
.supported(only_if=(lambda backend: backend.cipher_supported(algorithms.SM4((b'\x00' * 16)), modes.ECB())), skip_message='Does not support SM4 ECB') class TestSM4ModeECB(): test_ecb = generate_encrypt_test(load_nist_vectors, os.path.join('ciphers', 'SM4'), ['draft-ribose-cfrg-sm4-10-ecb.txt'], (lambda key, **kwargs: algorithms.SM4(binascii.unhexlify(key))), (lambda **kwargs: modes.ECB()))
def _adjust_widths_groups_compatibilty(stage_widths, bottleneck_ratios, group_widths): widths = [int((w * b)) for (w, b) in zip(stage_widths, bottleneck_ratios)] groud_widths_min = [min(g, w_bot) for (g, w_bot) in zip(group_widths, widths)] ws_bot = [_quantize_float(w_bot, g) for (w_bot, g) in zip(widths, groud_widths_min)] stage_widths = [int((w_bot / b)) for (w_bot, b) in zip(ws_bot, bottleneck_ratios)] return (stage_widths, groud_widths_min)
def fix_missing_data(contour_data): contour_data = np.array(contour_data) if (contour_data.any() == ''): logger.debug('Missing values detected.') missing_values = np.where((contour_data == ''))[0] if (missing_values.shape[0] > 1): logger.debug("More than one value missing, fixing this isn't implemented yet...") else: logger.debug('Only one value missing.') missing_index = missing_values[0] missing_axis = (missing_index % 3) if (missing_axis == 0): logger.debug('Missing value in x axis: interpolating.') if (missing_index > (len(contour_data) - 3)): lower_value = contour_data[(missing_index - 3)] upper_value = contour_data[0] elif (missing_index == 0): lower_value = contour_data[(- 3)] upper_value = contour_data[3] else: lower_value = contour_data[(missing_index - 3)] upper_value = contour_data[(missing_index + 3)] contour_data[missing_index] = (0.5 * (lower_value + upper_value)) elif (missing_axis == 1): logger.debug('Missing value in y axis: interpolating.') if (missing_index > (len(contour_data) - 2)): lower_value = contour_data[(missing_index - 3)] upper_value = contour_data[1] elif (missing_index == 0): lower_value = contour_data[(- 2)] upper_value = contour_data[4] else: lower_value = contour_data[(missing_index - 3)] upper_value = contour_data[(missing_index + 3)] contour_data[missing_index] = (0.5 * (lower_value + upper_value)) else: logger.debug('Missing value in z axis: taking slice value') temp = contour_data[2::3].tolist() temp.remove('') contour_data[missing_index] = np.min(np.array(temp, dtype=np.double)) return contour_data
('current-continuation-marks', [default(values.W_ContinuationPromptTag, values.w_default_continuation_prompt_tag)], simple=False) def current_cont_marks(prompt_tag, env, cont): from pycket.interpreter import return_value return return_value(values.W_ContinuationMarkSet(cont, prompt_tag), env, cont)
def _test(): import torch pretrained = False models = [shufflenet_g1_w1, shufflenet_g2_w1, shufflenet_g3_w1, shufflenet_g4_w1, shufflenet_g8_w1, shufflenet_g1_w3d4, shufflenet_g3_w3d4, shufflenet_g1_wd2, shufflenet_g3_wd2, shufflenet_g1_wd4, shufflenet_g3_wd4] for model in models: net = model(pretrained=pretrained) net.eval() weight_count = _calc_width(net) print('m={}, {}'.format(model.__name__, weight_count)) assert ((model != shufflenet_g1_w1) or (weight_count == 1531936)) assert ((model != shufflenet_g2_w1) or (weight_count == 1733848)) assert ((model != shufflenet_g3_w1) or (weight_count == 1865728)) assert ((model != shufflenet_g4_w1) or (weight_count == 1968344)) assert ((model != shufflenet_g8_w1) or (weight_count == 2434768)) assert ((model != shufflenet_g1_w3d4) or (weight_count == 975214)) assert ((model != shufflenet_g3_w3d4) or (weight_count == 1238266)) assert ((model != shufflenet_g1_wd2) or (weight_count == 534484)) assert ((model != shufflenet_g3_wd2) or (weight_count == 718324)) assert ((model != shufflenet_g1_wd4) or (weight_count == 209746)) assert ((model != shufflenet_g3_wd4) or (weight_count == 305902)) x = torch.randn(1, 3, 224, 224) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, 1000))
.parametrize('dialect', ['tsql']) def test_tsql_assignment_operator(dialect: str): sql = "INSERT INTO foo\nSELECT FirstColumnHeading = 'xyz',\n SecondColumnHeading = ProductID\nFROM Production.Product" assert_column_lineage_equal(sql, [(ColumnQualifierTuple('ProductID', 'Production.Product'), ColumnQualifierTuple('SecondColumnHeading', 'foo'))], dialect=dialect, test_sqlparse=False)
def test_cmd_dict_input_with_args(): cmd = get_cmd('tests/testfiles/cmds/args.sh', 'tests\\testfiles\\cmds\\args.bat') context = Context({'a': 'one', 'b': 'two two', 'c': 'three', 'd': cmd, 'cmd': {'run': '{d} {a} "{b}" {c}'}}) pypyr.steps.cmd.run_step(context) assert ('cmdOut' not in context)
class InputOutputOracleLevelDB(InputOutputOracle): def __init__(self, grammar: TritonGrammar, inputs: List[Input], f_name: str=''): super(InputOutputOracleLevelDB, self).__init__(grammar, inputs, f_name) self.db = None def create(filename: Union[(str, Path)], grammar: TritonGrammar, inputs: List[Input], constants: List[int]=[]) -> 'InputOutputOracleLevelDB': db = plyvel.DB(str(filename), create_if_missing=True) metas = dict(hash_mode='MD5', operators=[x.name for x in grammar.ops], constants=constants) db.put(META_KEY, json.dumps(metas).encode()) db.put(VARS_KEY, json.dumps(grammar.vars_dict).encode()) db.put(INPUTS_KEY, json.dumps(inputs).encode()) lkp = InputOutputOracleLevelDB(grammar=grammar, inputs=inputs, f_name=filename) lkp.db = db return lkp def load(file: Union[(Path, str)]) -> 'InputOutputOracleLevelDB': db = plyvel.DB(str(file)) metas = json.loads(db.get(META_KEY)) ops = [BvOp[x] for x in metas['operators']] vrs = list(json.loads(db.get(VARS_KEY)).items()) inps = json.loads(db.get(INPUTS_KEY)) grammar = TritonGrammar(vars=vrs, ops=ops) lkp = InputOutputOracleLevelDB(grammar=grammar, inputs=inps, f_name=file) lkp.db = db return lkp def add_entry(self, hash: Hash, value: str) -> None: self.db.put(hash, value.encode()) self.db.put(SIZE_KEY, str((int(self.db.get(SIZE_KEY)) + 1)).encode()) def add_entries(self, entries: List[Tuple[(Hash, str)]], chunk_size: int=10000, update_count: bool=True) -> None: count = len(entries) for step in range(0, count, chunk_size): with self.db.write_batch(sync=True) as wb: for (outs, s) in entries[step:(step + chunk_size)]: h = self.hash(list(outs)) wb.put(h, s.encode()) if update_count: cur_count = self.db.get(SIZE_KEY) new_count = (count if (cur_count is None) else (int(cur_count) + count)) self.db.put(SIZE_KEY, str(new_count).encode()) def __iter__(self) -> Iterator[Tuple[(Hash, str)]]: for (key, value) in self.db: if (key not in [META_KEY, VARS_KEY, INPUTS_KEY, SIZE_KEY]): (yield (key, value.decode())) def is_writable(self) -> bool: return True def size(self) -> int: return int(self.db.get(SIZE_KEY)) def _get_item(self, h: Hash) -> Optional[str]: entry = self.db.get(h) return (entry.decode() if entry else None)
class test_element3(unittest.TestCase): def test_cat_messages(self): self.assertEqual(e3.cat_messages([]), b'') self.assertEqual(e3.cat_messages([b'foo']), b'd\x00\x00\x00\x07foo') self.assertEqual(e3.cat_messages([b'foo', b'foo']), (2 * b'd\x00\x00\x00\x07foo')) self.assertEqual(e3.cat_messages([b'foo', e3.SynchronizeMessage, b'foo']), ((b'd\x00\x00\x00\x07foo' + e3.SynchronizeMessage.bytes()) + b'd\x00\x00\x00\x07foo')) self.assertEqual(e3.cat_messages((1000 * [b'foo', e3.SynchronizeMessage, b'foo'])), (1000 * ((b'd\x00\x00\x00\x07foo' + e3.SynchronizeMessage.bytes()) + b'd\x00\x00\x00\x07foo'))) self.assertEqual(e3.cat_messages((1000 * [e3.SynchronizeMessage, b'foo', b'foo'])), (1000 * (e3.SynchronizeMessage.bytes() + (2 * b'd\x00\x00\x00\x07foo')))) pack_head = struct.Struct('!lH').pack self.assertEqual(e3.cat_messages([(b'foo',)]), ((b'D' + pack_head(((7 + 4) + 2), 1)) + b'\x00\x00\x00\x03foo')) self.assertEqual(e3.cat_messages([(b'foo', None)]), ((b'D' + pack_head((((7 + 4) + 4) + 2), 2)) + b'\x00\x00\x00\x03foo\xff\xff\xff\xff')) self.assertEqual(e3.cat_messages([(b'foo', None, b'bar')]), ((b'D' + pack_head(((((7 + 7) + 4) + 4) + 2), 3)) + b'\x00\x00\x00\x03foo\xff\xff\xff\xff\x00\x00\x00\x03bar')) self.assertRaises((OverflowError, struct.error), e3.cat_messages, [((None,) * 65536)]) class ThisEx(Exception): pass class ThatEx(Exception): pass class Bad(e3.Message): def serialize(self): raise ThisEx('foo') self.assertRaises(ThisEx, e3.cat_messages, [Bad()]) class NoType(e3.Message): def serialize(self): return b'' self.assertRaises(AttributeError, e3.cat_messages, [NoType()]) class BadType(e3.Message): type = 123 def serialize(self): return b'' self.assertRaises((TypeError, struct.error), e3.cat_messages, [BadType()]) def testSerializeParseConsistency(self): for msg in message_samples: smsg = msg.serialize() self.assertEqual(msg, msg.parse(smsg)) def testEmptyMessages(self): for x in e3.__dict__.values(): if isinstance(x, e3.EmptyMessage): xtype = type(x) self.assertTrue((x is xtype())) def testUnknownNoticeFields(self): N = e3.Notice.parse(b'\x00\x00Z\x00Xklsvdnvldsvkndvlsn\x00Pfoobar\x00Mmessage\x00') E = e3.Error.parse(b'Z\x00Xklsvdnvldsvkndvlsn\x00Pfoobar\x00Mmessage\x00\x00') self.assertEqual(N[b'M'], b'message') self.assertEqual(E[b'M'], b'message') self.assertEqual(N[b'P'], b'foobar') self.assertEqual(E[b'P'], b'foobar') self.assertEqual(len(N), 4) self.assertEqual(len(E), 4) def testCompleteExtracts(self): x = e3.Complete(b'FOO BAR 1321') self.assertEqual(x.extract_command(), b'FOO BAR') self.assertEqual(x.extract_count(), 1321) x = e3.Complete(b' CREATE \tTABLE 13210 ') self.assertEqual(x.extract_command(), b'CREATE \tTABLE') self.assertEqual(x.extract_count(), 13210) x = e3.Complete(b' CREATE \tTABLE \t713210 ') self.assertEqual(x.extract_command(), b'CREATE \tTABLE') self.assertEqual(x.extract_count(), 713210) x = e3.Complete(b' CREATE \tTABLE 0 \t13210 ') self.assertEqual(x.extract_command(), b'CREATE \tTABLE') self.assertEqual(x.extract_count(), 13210) x = e3.Complete(b' 0 \t13210 ') self.assertEqual(x.extract_command(), None) self.assertEqual(x.extract_count(), 13210)
class ResLayer(nn.Sequential): def __init__(self, block, inplanes, planes, num_blocks, stride=1, dilation=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), multi_grid=None, contract_dilation=False, **kwargs): self.block = block downsample = None if ((stride != 1) or (inplanes != (planes * block.expansion))): downsample = [] conv_stride = stride if avg_down: conv_stride = 1 downsample.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) downsample.extend([build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]]) downsample = nn.Sequential(*downsample) layers = [] if (multi_grid is None): if ((dilation > 1) and contract_dilation): first_dilation = (dilation // 2) else: first_dilation = dilation else: first_dilation = multi_grid[0] layers.append(block(inplanes=inplanes, planes=planes, stride=stride, dilation=first_dilation, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) inplanes = (planes * block.expansion) for i in range(1, num_blocks): layers.append(block(inplanes=inplanes, planes=planes, stride=1, dilation=(dilation if (multi_grid is None) else multi_grid[i]), conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) super(ResLayer, self).__init__(*layers)
def parse(quteproc): html = quteproc.get_content(plain=False) soup = bs4.BeautifulSoup(html, 'html.parser') with testutils.ignore_bs4_warning(): print(soup.prettify()) title_prefix = 'Browse directory: ' path = pathlib.Path(soup.title.string[len(title_prefix):]) container = soup('div', id='dirbrowserContainer')[0] parent_elem = container('ul', class_='parent') if (not parent_elem): parent = None else: parent = pathlib.Path(QUrl(parent_elem[0].li.a['href']).toLocalFile()) folders = [] files = [] for (css_class, list_) in [('folders', folders), ('files', files)]: for li in container('ul', class_=css_class)[0]('li'): item_path = pathlib.Path(QUrl(li.a['href']).toLocalFile()) list_.append(Item(path=item_path, link=li.a['href'], text=str(li.a.string))) return Parsed(path=path, parent=parent, folders=folders, files=files)
def forwarding(args, bkd_dr: DataReader, model, gids, criterion): assert torch.cuda.is_available(), 'no GPU available' cuda = torch.device('cuda') gdata = GraphData(bkd_dr, gids) loader = DataLoader(gdata, batch_size=args.batch_size, shuffle=False, collate_fn=collate_batch) if (not next(model.parameters()).is_cuda): model.to(cuda) model.eval() (all_loss, n_samples) = (0.0, 0.0) for (batch_idx, data) in enumerate(loader): for i in range(len(data)): data[i] = data[i].to(cuda) output = model(data) if (len(output.shape) == 1): output = output.unsqueeze(0) loss = criterion(output, data[4]) all_loss = torch.add(torch.mul(loss, len(output)), all_loss) n_samples += len(output) all_loss = torch.div(all_loss, n_samples) return all_loss
def min_informative_str(obj, indent_level=0, _prev_obs=None, _tag_generator=None): if (_prev_obs is None): _prev_obs = {} indent = (' ' * indent_level) if (id(obj) in _prev_obs): tag = _prev_obs[id(obj)] return (((indent + '<') + tag) + '>') if (_tag_generator is None): _tag_generator = _TagGenerator() cur_tag = _tag_generator.get_tag() _prev_obs[id(obj)] = cur_tag if hasattr(obj, '__array__'): name = '<ndarray>' elif (hasattr(obj, 'name') and (obj.name is not None)): name = obj.name elif (hasattr(obj, 'owner') and (obj.owner is not None)): name = str(obj.owner.op) for ipt in obj.owner.inputs: name += '\n' name += min_informative_str(ipt, indent_level=(indent_level + 1), _prev_obs=_prev_obs, _tag_generator=_tag_generator) else: name = str(obj) prefix = (cur_tag + '. ') rval = ((indent + prefix) + name) return rval
def setup_logging(args): project_name = args.model_ckpt.split('/')[(- 1)] logger = logging.getLogger(__name__) log_dir = (Path(args.save_dir) / 'log/') log_dir.mkdir(exist_ok=True) filename = f'debug_{accelerator.process_index}.log' logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, handlers=[logging.FileHandler((log_dir / filename)), logging.StreamHandler()]) if accelerator.is_main_process: accelerator.init_trackers(project_name, vars(args)) run_name = accelerator.trackers[0].run.name logger.setLevel(logging.INFO) datasets.utils.logging.set_verbosity_info() transformers.utils.logging.set_verbosity_info() else: run_name = '' logger.setLevel(logging.ERROR) datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() return (logger, run_name)
class _Transition(nn.Sequential): def __init__(self, num_input_features, num_output_features, stride=2): super(_Transition, self).__init__() self.add_module('norm', bn(num_input_features)) self.add_module('relu', nn.ReLU(inplace=True)) self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) if (stride == 2): self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=stride))
def test_AccessorABC_invalid_kind(): class FooAccessor(AccessorABC): _default_kind = 'zaraza' def __init__(self): self.dont_work = None def _zaraza(self): pass acc = FooAccessor() with pytest.raises(ValueError): acc('_zaraza') with pytest.raises(ValueError): acc('dont_work')
def unpack(path: str, dest: str='.') -> None: with WheelFile(path) as wf: namever = wf.parsed_filename.group('namever') destination = (Path(dest) / namever) print(f'Unpacking to: {destination}...', end='', flush=True) for zinfo in wf.filelist: wf.extract(zinfo, destination) permissions = ((zinfo.external_attr >> 16) & 511) destination.joinpath(zinfo.filename).chmod(permissions) print('OK')
def test_nested_credentials(monkeypatch): _env_credentialled def fake_opener(path): return getenv() with rasterio.Env(session=AWSSession(aws_access_key_id='foo', aws_secret_access_key='bar')): assert (getenv()['AWS_ACCESS_KEY_ID'] == 'foo') assert (getenv()['AWS_SECRET_ACCESS_KEY'] == 'bar') monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'lol') monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'wut') gdalenv = fake_opener('s3://foo/bar') assert (gdalenv['AWS_ACCESS_KEY_ID'] == 'foo') assert (gdalenv['AWS_SECRET_ACCESS_KEY'] == 'bar')
def _decode_string_to_dict(encoded_value: str, param_type: Type[Dict[(Any, Any)]]) -> Dict[(Any, Any)]: (key_type, value_type) = typing_inspect.get_args(param_type) arg_values = {} for (key, value) in to_dict(encoded_value).items(): arg_values[key_type(key)] = value_type(value) return arg_values
def readme_simple(): from sklearn.datasets import load_breast_cancer from xgboost_ray import RayDMatrix, RayParams, train (train_x, train_y) = load_breast_cancer(return_X_y=True) train_set = RayDMatrix(train_x, train_y) evals_result = {} bst = train({'objective': 'binary:logistic', 'eval_metric': ['logloss', 'error']}, train_set, evals_result=evals_result, evals=[(train_set, 'train')], verbose_eval=False, ray_params=RayParams(num_actors=2, cpus_per_actor=1)) bst.save_model('model.xgb') print('Final training error: {:.4f}'.format(evals_result['train']['error'][(- 1)]))
class NgrokStart(Command): keyword = 'start' def assemble(self): super().assemble() location_group = self.parser.add_mutually_exclusive_group(required=True) location_group.add_argument('-l', '--local', action='store_true', dest='local', help='exposes the local machine ssh port') location_group.add_argument('-V', '--vagrant', nargs='?', const='default', default=False, help='exposes the given vagrant machine') location_group.add_argument('-D', '--docker', nargs='?', const='reahl', default=False, help='exposes the given docker container') location_group.add_argument('-s', '--ssh', default=None, help='exposes the given remote machine') location_group.add_argument('-n', '--named-tunnel', nargs='*', default=None, help='starts the given named tunnels') self.parser.add_argument('-p', '--port', default='22', help='the port to expose') self.parser.add_argument('-r', '--region', default='eu', help='ngrok region to connect to') self.parser.add_argument('-P', '--path', default='~/bin', help='where to find ngrok') def execute(self, args): super().execute(args) env_with_path = dict(os.environ) env_with_path['PATH'] += ('%s%s' % (os.pathsep, os.path.expanduser(args.path))) if args.named_tunnel: return Executable('ngrok', verbose=True).check_call((['start', ('--region=%s' % args.region)] + args.named_tunnel), env=env_with_path) else: if args.vagrant: vagrant_ssh_config = VagrantMachine(args.vagrant).get_ssh_config() hostname = vagrant_ssh_config['HostName'] port = vagrant_ssh_config['Port'] elif args.docker: docker_container = DockerContainer(args.docker) hostname = docker_container.ip_address port = '22' elif args.ssh: hostname = args.ssh port = args.port else: hostname = None port = args.port hostname_port = ':'.join([i for i in [hostname, port] if i]) return Executable('ngrok', verbose=True).check_call(['tcp', ('--region=%s' % args.region), hostname_port], env=env_with_path)
def get_main_ubo_table(flavors: list[FlavorMeta]): ret = md_tr('', *(f.table_name for f in flavors)) ret += md_tr('---', *(':---:' for f in flavors)) for filter_meta in search_engines: ret += md_tr(filter_meta.name, *(md_link(get_badge('uBO - add this filter', 'uBlock Origin', 'uBO', 'add this filter', filter_meta.color), get_ubo_subscribe_url(filter_meta.dist_path, f.filename, f'uBlock-Origin-dev-filter - {filter_meta.name} - {f.name}')) for f in flavors)) return ret
class PrecisionAtRecallDetectionEvaluator(ObjectDetectionEvaluator): def __init__(self, categories, matching_iou_threshold=0.5, recall_lower_bound=0.0, recall_upper_bound=1.0): super(PrecisionAtRecallDetectionEvaluator, self).__init__(categories, matching_iou_threshold=matching_iou_threshold, recall_lower_bound=recall_lower_bound, recall_upper_bound=recall_upper_bound, evaluate_corlocs=False, metric_prefix='PrecisionAtRecallBoxes', use_weighted_mean_ap=False)
def test_prints_skip_message_for_uploaded_package(upload_settings, stub_repository, capsys, caplog): upload_settings.skip_existing = True stub_repository.package_is_uploaded = (lambda package: True) result = upload.upload(upload_settings, [helpers.WHEEL_FIXTURE]) assert (result is None) captured = capsys.readouterr() assert (RELEASE_URL not in captured.out) assert (caplog.messages == ['Skipping twine-1.5.0-py2.py3-none-any.whl because it appears to already exist'])
def define_D(opt): gpu_ids = opt['gpu_ids'] opt_net = opt['network_D'] which_model = opt_net['which_model_D'] if (which_model == 'discriminator_vgg_128'): netD = arch.Discriminator_VGG_128(in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type']) elif (which_model == 'dis_acd'): netD = sft_arch.ACD_VGG_BN_96() elif (which_model == 'discriminator_vgg_96'): netD = arch.Discriminator_VGG_96(in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type']) elif (which_model == 'discriminator_vgg_192'): netD = arch.Discriminator_VGG_192(in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type']) elif (which_model == 'discriminator_vgg_128_SN'): netD = arch.Discriminator_VGG_128_SN() else: raise NotImplementedError('Discriminator model [{:s}] not recognized'.format(which_model)) init_weights(netD, init_type='kaiming', scale=1) if gpu_ids: netD = nn.DataParallel(netD) return netD
def test_entity_relation(): (tokens, entities, relations) = get_data() for solver in get_solvers(num_samples=200): cons = constraint(OrgBasedIn_Org_Loc, solver) (ner, re) = train(cons) re = torch.argmax(torch.softmax(re(tokens).view((- 1), 11), dim=(- 1)), dim=(- 1)) ner = torch.argmax(torch.softmax(ner(tokens).view((- 1), 5), dim=(- 1)), dim=(- 1)) assert ((ner[(re == 3)] == 2).all() and (ner[(re == 8)] == 1).all())
def create_test_image(x, y, field_centre, field_side_lengths, field_penumbra, field_rotation, bb_centre, bb_diameter, bb_max_attenuation): field = create_field_with_bb_func(field_centre, field_side_lengths, field_penumbra, field_rotation, bb_centre, bb_diameter, bb_max_attenuation) (xx, yy) = np.meshgrid(x, y) img = field(xx, yy) return img
class SuspendUser(IntermediateActionView): permission_required = ('dictionary.suspend_user', 'dictionary.change_author') model = Author page_title = _('Suspend authors') template_name = 'admin/actions/suspend_user.html' max_input = 100 def post(self, request): response = redirect(self.get_changelist_url()) factors = (request.POST.get('multiplier', ''), request.POST.get('quantity', '')) if (not all(map(str.isdigit, factors))): notifications.error(request, gettext('The selected duration was invalid.')) return response try: ban_hours = math.prod(map(int, factors)) suspended_until = (timezone.now() + timedelta(hours=ban_hours)) except OverflowError: notifications.error(self.request, gettext('The selected duration was invalid.')) return response user_list_raw = list(self.get_object_list()) action_information = request.POST.get('information', gettext('No information was given.')) message_for_user = (gettext('your account has been suspended. administration message: %(message)s\n\nin your profile page, you can see the remaining time until your account gets reactivated.') % {'message': action_information}) message_for_log = f'Suspended until {suspended_until}, information: {action_information}' log_list = [] generic_superuser = get_generic_superuser() for user in user_list_raw: user.suspended_until = suspended_until log_list.append(logentry_instance(message_for_log, request.user, Author, user)) Message.objects.compose(generic_superuser, user, message_for_user) Author.objects.bulk_update(user_list_raw, ['suspended_until']) logentry_bulk_create(log_list) count = len(user_list_raw) notifications.success(request, (ngettext('%(count)d author was suspended.', '%(count)d authors were suspended.', count) % {'count': count})) return response
class ConvBnReLU3D(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1, norm_act=InPlaceABN): super(ConvBnReLU3D, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, bias=False) self.bn = norm_act(out_channels) def forward(self, x): return self.bn(self.conv(x))
class Solution(object): def validPalindrome(self, s): return self.validPalindromeHelper(s, 0, (len(s) - 1), 1) def validPalindromeHelper(self, s, left, right, budget): while ((left < len(s)) and (right >= 0) and (left <= right) and (s[left] == s[right])): left += 1 right -= 1 if ((left >= len(s)) or (right < 0) or (left >= right)): return True if (budget == 0): return False budget -= 1 return (self.validPalindromeHelper(s, (left + 1), right, budget) or self.validPalindromeHelper(s, left, (right - 1), budget))
def make_report(parsed): table = '' full_table = '' analyzer_db = None sniffer_db = None analyzer_path = '{}{}{}'.format(parsed['locations']['box_output'], parsed['task'], parsed['locations']['analyzer_logs']) sniffer_path = '{}{}{}'.format(parsed['locations']['box_output'], parsed['task'], parsed['locations']['sniffer_logs']) analyzer_db = TinyDB(analyzer_path) sniffer_db = TinyDB(sniffer_path) with open(analyzer_path) as file: temp_id = add_item_fs(defaultdb['dbname'], defaultdb['reportscoll'], file.read(), parsed['task'], None, parsed['task'], 'application/json', datetime.now()) with ignore_excpetion(): screenshot_table = analyzer_db.table('screenshot_table') item = screenshot_table.search((lambda x: (x if ('normal_image' in x) else 0))) if item: bimage = b64encode(unhexlify(item[0]['normal_image'].encode('utf-8'))) img_base64 = 'data:image/jpeg;base64, {}'.format(bimage.decode('utf-8', errors='ignore')) table += make_image_table_base64(ENV_JINJA2, img_base64, 'Screenshot') log_string('Parsed normal screenshot', task=parsed['task']) with ignore_excpetion(): screenshot_table = analyzer_db.table('screenshot_table') item = screenshot_table.search((lambda x: (x if ('full_image' in x) else 0))) if item: bimage = b64encode(unhexlify(item[0]['full_image'].encode('utf-8'))) img_base64 = 'data:image/jpeg;base64, {}'.format(bimage.decode('utf-8', errors='ignore')) table += make_image_table_base64(ENV_JINJA2, img_base64, 'Full Screenshot') log_string('Parsed full screenshot', task=parsed['task']) with ignore_excpetion(): network_table = analyzer_db.table('network_table') item = network_table.search((lambda x: (x if ('circular_layout' in x) else 0))) if item: bimage = b64encode(unhexlify(item[0]['circular_layout'].encode('utf-8'))) img_base64 = 'data:image/jpeg;base64, {}'.format(bimage.decode('utf-8', errors='ignore')) table += make_image_table_base64(ENV_JINJA2, img_base64, 'Network Graph') log_string('Parsed Network Graph', task=parsed['task']) with ignore_excpetion(): words_table = analyzer_db.table('extracted_table') item = words_table.search((lambda x: (x if ('dns_records' in x) else 0))) if item: table += make_json_table_no_loop(ENV_JINJA2, item[0]['dns_records'], 'DNS Records') with ignore_excpetion(): words_table = analyzer_db.table('extracted_table') item = words_table.search((lambda x: (x if ('Request_Headers' in x) else 0))) if item: table += make_json_table_no_loop(ENV_JINJA2, item[0]['Request_Headers'], 'Request Headers') with ignore_excpetion(): words_table = analyzer_db.table('extracted_table') item = words_table.search((lambda x: (x if ('Response_Headers' in x) else 0))) if item: table += make_json_table_no_loop(ENV_JINJA2, item[0]['Response_Headers'], 'Response Headers') with ignore_excpetion(): words_table = analyzer_db.table('extracted_table') item = words_table.search((lambda x: (x if ('Certificate' in x) else 0))) if item: table += make_json_table_no_loop(ENV_JINJA2, item[0]['Certificate'], 'Certificate') with ignore_excpetion(): words_table = analyzer_db.table('words_table') item = words_table.search((lambda x: (x if ('all_words' in x) else 0))) if item: table += make_json_table_no_loop(ENV_JINJA2, item, 'OCR Words') with ignore_excpetion(): extracted_table = analyzer_db.table('extracted_table') item = extracted_table.search((lambda x: (x if ('extracted_links' in x) else 0))) if item: table += make_json_table_no_loop(ENV_JINJA2, item[0]['extracted_links'], 'Extracted links') with ignore_excpetion(): extracted_table = analyzer_db.table('extracted_table') item = extracted_table.search((lambda x: (x if ('extracted_scripts' in x) else 0))) if item: table += make_json_table_no_loop(ENV_JINJA2, item[0]['extracted_scripts'], 'Extracted scripts') with ignore_excpetion(): analyzer_table = analyzer_db.table('analyzer_table') if (len(analyzer_table.all()) > 0): table += make_json_table(ENV_JINJA2, analyzer_table.all(), 'Browser') with ignore_excpetion(): sniffer_table = sniffer_db.table('sniffer_table') if (len(sniffer_table.all()) > 0): table += make_json_table_no_loop(ENV_JINJA2, sniffer_table.all(), 'Sniffer') all_logs = find_item(defaultdb['dbname'], defaultdb['taskdblogscoll'], {'task': parsed['task']}) if all_logs: full_table = make_text_table(ENV_JINJA2, all_logs['logs'], 'Logs') log_string('Adding logs', task=parsed['task']) full_table += table if (len(full_table) == 0): full_table = 'Error' with open('template.html') as file: rendered = Template(file.read()).render(title=parsed['task'], content=full_table) temp_id = add_item_fs(defaultdb['dbname'], defaultdb['reportscoll'], rendered, parsed['task'], None, parsed['task'], 'text/html', datetime.now()) temp_id = add_item_fs(defaultdb['dbname'], defaultdb['taskfileslogscoll'], '\n'.join(all_logs['logs']), 'log', None, parsed['task'], 'text/plain', datetime.now())
class TestExponential(BaseTestDistributionRandom): pymc_dist = pm.Exponential pymc_dist_params = {'lam': 10.0} expected_rv_op_params = {'mu': (1.0 / pymc_dist_params['lam'])} reference_dist_params = {'scale': (1.0 / pymc_dist_params['lam'])} reference_dist = seeded_numpy_distribution_builder('exponential') checks_to_run = ['check_pymc_params_match_rv_op', 'check_pymc_draws_match_reference']