code
stringlengths
281
23.7M
class Account(rlp.Serializable, AccountAPI): fields = [('nonce', big_endian_int), ('balance', big_endian_int), ('storage_root', trie_root), ('code_hash', hash32)] def __init__(self, nonce: int=0, balance: int=0, storage_root: bytes=BLANK_ROOT_HASH, code_hash: bytes=EMPTY_SHA3, **kwargs: Any) -> None: super().__init__(nonce, balance, storage_root, code_hash, **kwargs) def __repr__(self) -> str: return f'Account(nonce={self.nonce}, balance={self.balance}, storage_root=0x{self.storage_root.hex()}, code_hash=0x{self.code_hash.hex()})'
class Player(GstPlay.Play): __gtype_name__ = 'SoundPlayer' def __init__(self, sound): super().__init__() self.sound = sound self.saved_volume = 0.0 self.set_volume(0) self.set_uri(self.sound.uri) self.name = self.sound.name self.prerolled = False self.pipeline = self.get_pipeline() bus = self.pipeline.get_bus() bus.add_signal_watch() bus.connect('message', self._on_bus_message) self.volume_hdlr = MainPlayer.get().connect('notify::volume', self._on_main_volume_changed) self.playing_hdlr = MainPlayer.get().connect('notify::playing', self._on_playing_changed) self.connect('notify::volume', self._on_volume_changed) def set_virtual_volume(self, volume: float): self.saved_volume = volume volume = (self.saved_volume * MainPlayer.get().volume) self.set_volume(volume) def remove(self): self.stop() MainPlayer.get().disconnect(self.volume_hdlr) MainPlayer.get().disconnect(self.playing_hdlr) def _on_playing_changed(self, _player, _volume): if (not self.__vol_zero()): if MainPlayer.get().playing: self.play() else: self.pause() def _on_volume_changed(self, _player, _volume): volume = (self.saved_volume * MainPlayer.get().volume) if ((volume > 0) and (self.get_volume() == 0.0)): self.set_volume(volume) return if self.__vol_zero(): self.pause() elif MainPlayer.get().playing: self.play() def _on_main_volume_changed(self, _player, _volume): if (not self.__vol_zero(self.sound.saved_volume)): self.set_virtual_volume(self.saved_volume) def _on_bus_message(self, _bus, message: Gst.Message): if message: if (message.type is Gst.MessageType.SEGMENT_DONE): self.pipeline.seek_simple(Gst.Format.TIME, Gst.SeekFlags.SEGMENT, 0) if (message.type is Gst.MessageType.ASYNC_DONE): if (not self.prerolled): self.pipeline.seek_simple(Gst.Format.TIME, (Gst.SeekFlags.FLUSH | Gst.SeekFlags.SEGMENT), 0) self.prerolled = True return True def __vol_zero(self, volume: (float | None)=None): volume = (volume if volume else self.get_volume()) return (True if (volume == 0) else False)
def find_test_files2(dir0, suffix0, dir1, suffix1): D = collections.defaultdict(list) for (root, dirnames, filenames) in os.walk(dir0): for filename in filenames: if ('.' in filename): (basename, suffix) = filename.rsplit('.', 1) if (suffix == suffix0): D[basename].append(os.path.join(root, filename)) for (root, dirnames, filenames) in os.walk(dir1): for filename in filenames: if ('.' in filename): (basename, suffix) = filename.rsplit('.', 1) if (suffix == suffix1): D[basename].append(os.path.join(root, filename)) for basename in D: if (len(D[basename]) > 1): (yield D[basename])
class TextImageDataset(torch.utils.data.Dataset): def __init__(self, dir_path, prompt_filepath, transform=None): self.dir_path = dir_path path = pathlib.Path(dir_path) self.files = sorted([file for ext in IMAGE_EXTENSIONS for file in path.glob('*.{}'.format(ext))], key=(lambda x: int(os.path.basename(str(x)).split('.')[0]))) df = pd.read_csv(prompt_filepath) self.text_description = df['caption'] self.transform = transform self.tokenizer = open_clip.get_tokenizer('ViT-g-14') def __len__(self): return len(self.files) def __getitem__(self, idx): img_path = self.files[idx] image = Image.open(img_path) if (self.transform is not None): image = self.transform(image).squeeze().float() text = self.tokenizer(self.text_description[idx]).squeeze() return (image, text)
class EffiInitBlock(nn.Module): def __init__(self, in_channels, out_channels, bn_eps, activation, tf_mode): super(EffiInitBlock, self).__init__() self.tf_mode = tf_mode self.conv = conv3x3_block(in_channels=in_channels, out_channels=out_channels, stride=2, padding=(0 if tf_mode else 1), bn_eps=bn_eps, activation=activation) def forward(self, x): if self.tf_mode: x = F.pad(x, pad=calc_tf_padding(x, kernel_size=3, stride=2)) x = self.conv(x) return x
def test_use_scm_version_callable(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.delenv('SETUPTOOLS_SCM_DEBUG') p = ((tmp_path / 'sub') / 'package') p.mkdir(parents=True) p.joinpath('setup.py').write_text('from setuptools import setup\ndef vcfg():\n from setuptools_scm.version import guess_next_dev_version\n def vs(v):\n return guess_next_dev_version(v)\n return {"version_scheme": vs}\nsetup(use_scm_version=vcfg)\n', encoding='utf-8') p.joinpath('PKG-INFO').write_text('Version: 1.0', encoding='utf-8') res = run([sys.executable, 'setup.py', '--version'], p) assert (res.stdout == '1.0')
def assert_plugin_installation(subprocess_run, dependencies: list[str], *, verbosity=0): command = [sys.executable, '-u', '-m', 'pip', 'install', '--disable-pip-version-check', '--no-python-version-warning'] add_verbosity_flag(command, verbosity, adjustment=(- 1)) command.extend(dependencies) subprocess_run.assert_called_once_with(command, shell=False)
class _AbstractSignalBlocker(): def __init__(self, timeout=5000, raising=True): self._loop = qt_api.QtCore.QEventLoop() self.timeout = timeout self.signal_triggered = False self.raising = raising self._signals = None self._timeout_message = '' if ((timeout is None) or (timeout == 0)): self._timer = None else: self._timer = qt_api.QtCore.QTimer(self._loop) self._timer.setSingleShot(True) self._timer.setInterval(timeout) def wait(self): __tracebackhide__ = True if self.signal_triggered: return if ((self.timeout is None) and (not self._signals)): raise ValueError('No signals or timeout specified.') if (self._timer is not None): self._timer.timeout.connect(self._quit_loop_by_timeout) self._timer.start() if (self.timeout != 0): qt_api.exec(self._loop) if ((not self.signal_triggered) and self.raising): raise TimeoutError(self._timeout_message) def _quit_loop_by_timeout(self): try: self._cleanup() finally: self._loop.quit() def _cleanup(self): self._timeout_message = self._get_timeout_error_message() if (self._timer is not None): _silent_disconnect(self._timer.timeout, self._quit_loop_by_timeout) self._timer.stop() self._timer = None def _get_timeout_error_message(self): raise NotImplementedError def _extract_pyqt_signal_name(self, potential_pyqt_signal): signal_name = potential_pyqt_signal.signal if (not isinstance(signal_name, str)): raise TypeError("Invalid 'signal' attribute in {}. Expected str but got {}".format(signal_name, type(signal_name))) signal_name = signal_name.lstrip('2') return signal_name def _extract_signal_from_signal_tuple(self, potential_signal_tuple): if isinstance(potential_signal_tuple, tuple): if (len(potential_signal_tuple) != 2): raise ValueError("Signal tuple must have length of 2 (first element is the signal, the second element is the signal's name).") signal_tuple = potential_signal_tuple signal_name = signal_tuple[1] if (not isinstance(signal_name, str)): raise TypeError('Invalid type for provided signal name, expected str but got {}'.format(type(signal_name))) if (not signal_name): raise ValueError('The provided signal name may not be empty') return signal_name return '' def determine_signal_name(self, potential_signal_tuple): signal_name = self._extract_signal_from_signal_tuple(potential_signal_tuple) if (not signal_name): try: signal_name = self._extract_pyqt_signal_name(potential_signal_tuple) except AttributeError: signal_name = '' return signal_name def get_callback_name(self, callback): try: name = callback.__name__ except AttributeError: try: name = callback.func.__name__ except AttributeError: name = '' return name def get_signal_from_potential_signal_tuple(signal_tuple): if isinstance(signal_tuple, tuple): return signal_tuple[0] return signal_tuple def __enter__(self): return self def __exit__(self, type, value, traceback): __tracebackhide__ = True if (value is None): self.wait()
class BoxList(list): def __init__(self, iterable=None, box_class=Box, **box_options): self.box_class = box_class self.box_options = box_options self.box_org_ref = self.box_org_ref = (id(iterable) if iterable else 0) if iterable: for x in iterable: self.append(x) if box_options.get('frozen_box'): def frozen(*args, **kwargs): raise BoxError('BoxList is frozen') for method in ['append', 'extend', 'insert', 'pop', 'remove', 'reverse', 'sort']: self.__setattr__(method, frozen) def __delitem__(self, key): if self.box_options.get('frozen_box'): raise BoxError('BoxList is frozen') super(BoxList, self).__delitem__(key) def __setitem__(self, key, value): if self.box_options.get('frozen_box'): raise BoxError('BoxList is frozen') super(BoxList, self).__setitem__(key, value) def append(self, p_object): if isinstance(p_object, dict): try: p_object = self.box_class(p_object, **self.box_options) except AttributeError as err: if ('box_class' in self.__dict__): raise err elif isinstance(p_object, list): try: p_object = (self if (id(p_object) == self.box_org_ref) else BoxList(p_object)) except AttributeError as err: if ('box_org_ref' in self.__dict__): raise err super(BoxList, self).append(p_object) def extend(self, iterable): for item in iterable: self.append(item) def insert(self, index, p_object): if isinstance(p_object, dict): p_object = self.box_class(p_object, **self.box_options) elif isinstance(p_object, list): p_object = (self if (id(p_object) == self.box_org_ref) else BoxList(p_object)) super(BoxList, self).insert(index, p_object) def __repr__(self): return '<BoxList: {0}>'.format(self.to_list()) def __str__(self): return str(self.to_list()) def __copy__(self): return BoxList((x for x in self), self.box_class, **self.box_options) def __deepcopy__(self, memodict=None): out = self.__class__() memodict = (memodict or {}) memodict[id(self)] = out for k in self: out.append(copy.deepcopy(k)) return out def __hash__(self): if self.box_options.get('frozen_box'): hashing = 98765 hashing ^= hash(tuple(self)) return hashing raise TypeError("unhashable type: 'BoxList'") def to_list(self): new_list = [] for x in self: if (x is self): new_list.append(new_list) elif isinstance(x, Box): new_list.append(x.to_dict()) elif isinstance(x, BoxList): new_list.append(x.to_list()) else: new_list.append(x) return new_list def to_json(self, filename=None, encoding='utf-8', errors='strict', multiline=False, **json_kwargs): if (filename and multiline): lines = [_to_json(item, filename=False, encoding=encoding, errors=errors, **json_kwargs) for item in self] with open(filename, 'w', encoding=encoding, errors=errors) as f: f.write(('\n'.join(lines).decode('utf-8') if (sys.version_info < (3, 0)) else '\n'.join(lines))) else: return _to_json(self.to_list(), filename=filename, encoding=encoding, errors=errors, **json_kwargs) def from_json(cls, json_string=None, filename=None, encoding='utf-8', errors='strict', multiline=False, **kwargs): bx_args = {} for arg in kwargs.copy(): if (arg in BOX_PARAMETERS): bx_args[arg] = kwargs.pop(arg) data = _from_json(json_string, filename=filename, encoding=encoding, errors=errors, multiline=multiline, **kwargs) if (not isinstance(data, list)): raise BoxError('json data not returned as a list, but rather a {0}'.format(type(data).__name__)) return cls(data, **bx_args) if yaml_support: def to_yaml(self, filename=None, default_flow_style=False, encoding='utf-8', errors='strict', **yaml_kwargs): return _to_yaml(self.to_list(), filename=filename, default_flow_style=default_flow_style, encoding=encoding, errors=errors, **yaml_kwargs) def from_yaml(cls, yaml_string=None, filename=None, encoding='utf-8', errors='strict', loader=yaml.SafeLoader, **kwargs): bx_args = {} for arg in kwargs.copy(): if (arg in BOX_PARAMETERS): bx_args[arg] = kwargs.pop(arg) data = _from_yaml(yaml_string=yaml_string, filename=filename, encoding=encoding, errors=errors, Loader=loader, **kwargs) if (not isinstance(data, list)): raise BoxError('yaml data not returned as a listbut rather a {0}'.format(type(data).__name__)) return cls(data, **bx_args) def box_it_up(self): for v in self: if (hasattr(v, 'box_it_up') and (v is not self)): v.box_it_up()
def train(args): hvd.init() if args.cuda: torch.cuda.set_device(hvd.local_rank()) kwargs = ({'num_workers': args.num_workers, 'pin_memory': True} if args.cuda else {}) train_dataset = VCDBPairDataset(annotation_path=args.annotation_path, feature_path=args.feature_path, padding_size=args.padding_size, random_sampling=args.random_sampling, neg_num=args.neg_num) train_sampler = DistributedSampler(train_dataset, num_replicas=hvd.size(), rank=hvd.rank(), shuffle=True) train_loader = DataLoader(train_dataset, batch_size=args.batch_sz, sampler=train_sampler, drop_last=True, **kwargs) model = TCA(feature_size=args.pca_components, nlayers=args.num_layers, dropout=0.2) model = MoCo(model, dim=args.output_dim, K=args.moco_k, m=args.moco_m, T=args.moco_t) lr_scaler = (hvd.size() if (not args.use_adasum) else 1) if args.cuda: model.cuda() if (args.use_adasum and hvd.nccl_built()): lr_scaler = hvd.local_size() criterion = utils.CircleLoss(m=0.25, gamma=256).cuda() if False: optimizer = torch.optim.SGD(model.parameters(), lr=(args.learning_rate * lr_scaler), momentum=args.momentum, weight_decay=args.weight_decay) else: optimizer = torch.optim.Adam(model.parameters(), lr=(args.learning_rate * lr_scaler), weight_decay=args.weight_decay) hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) compression = (hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none) optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters(), compression=compression, op=(hvd.Adasum if args.use_adasum else hvd.Average)) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs) start = datetime.now() model.train() for epoch in range(1, (args.epochs + 1)): train_sampler.set_epoch(epoch) for (batch_idx, (a, p, n, len_a, len_p, len_n)) in enumerate(train_loader): if args.cuda: (a, p, n) = (a.cuda(), p.cuda(), n.cuda()) (len_a, len_p, len_n) = (len_a.cuda(), len_p.cuda(), len_n.cuda()) (output, target) = model(a, p, n, len_a, len_p, len_n) loss = criterion(output, target) optimizer.zero_grad() loss.backward() optimizer.step() if ((((batch_idx + 1) % args.print_freq) == 0) and (hvd.rank() == 0)): print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, ((batch_idx + 1) * len(a)), len(train_sampler), (((100.0 * (batch_idx + 1)) * len(a)) / len(train_sampler)), loss.item())) scheduler.step() if ((hvd.rank() == 0) and ((epoch % 10) == 0)): print(('Epoch complete in: ' + str((datetime.now() - start)))) print('Saving model...') torch.save(model.encoder_q.state_dict(), 'models/model.pth')
def evaluate(args, accelerator, dataloader, eval_set, model, checkpoint, has_labels=True, write_to_file=True): num_examples = args.num_examples[eval_set] eval_metric = None completed_steps = 0 eval_loss = 0.0 all_predictions = None all_references = None all_probabilities = None if has_labels: eval_metric = load_metric(args.eval_metric) eval_results = {} model.eval() for (_, batch) in enumerate(dataloader): with torch.no_grad(): outputs = model(**batch) eval_loss += outputs.loss.item() logits = outputs.logits predictions = (logits.argmax(dim=(- 1)) if (not args.is_regression) else logits.squeeze()) predictions = accelerator.gather(predictions) if (all_predictions is None): all_predictions = predictions.detach().cpu().numpy() else: all_predictions = np.append(all_predictions, predictions.detach().cpu().numpy(), axis=0) if (not args.is_regression): probabilities = logits.softmax(dim=(- 1)).max(dim=(- 1)).values probabilities = accelerator.gather(probabilities) if (all_probabilities is None): all_probabilities = probabilities.detach().cpu().numpy() else: all_probabilities = np.append(all_probabilities, probabilities.detach().cpu().numpy(), axis=0) if has_labels: references = batch['labels'] references = accelerator.gather(references) if (all_references is None): all_references = references.detach().cpu().numpy() else: all_references = np.append(all_references, references.detach().cpu().numpy(), axis=0) eval_metric.add_batch(predictions=predictions, references=references) completed_steps += 1 if has_labels: eval_results.update(eval_metric.compute()) eval_results['completed_steps'] = completed_steps eval_results['avg_eval_loss'] = (eval_loss / completed_steps) if write_to_file: accelerator.wait_for_everyone() if accelerator.is_main_process: results_file = os.path.join(args.output_dir, f'{eval_set}_results_{checkpoint}.json') with open(results_file, 'w') as f: json.dump(eval_results, f, indent=4, sort_keys=True) if write_to_file: accelerator.wait_for_everyone() if accelerator.is_main_process: output_file = os.path.join(args.output_dir, f'{eval_set}_output_{checkpoint}.csv') if (not args.is_regression): assert (len(all_predictions) == len(all_probabilities)) df = pd.DataFrame(list(zip(all_predictions, all_probabilities)), columns=['prediction', 'probability']) else: df = pd.DataFrame(all_predictions, columns=['prediction']) df = df.head(num_examples) df.to_csv(output_file, header=True, index=False) return eval_results
def main(args): cfg = setup_cfg(args) if (cfg.SEED >= 0): print('Setting fixed seed: {}'.format(cfg.SEED)) set_random_seed(cfg.SEED) setup_logger(cfg.OUTPUT_DIR) if (torch.cuda.is_available() and cfg.USE_CUDA): torch.backends.cudnn.benchmark = True print_args(args, cfg) print('Collecting env info ...') print('** System info **\n{}\n'.format(collect_env_info())) trainer = build_trainer(cfg) if args.eval_only: trainer.load_model(args.model_dir, epoch=args.load_epoch) trainer.test() return if (not args.no_train): trainer.train()
class LAVertex(): def __init__(self, point, edge_left, edge_right, direction_vectors=None): self.point = point self.edge_left = edge_left self.edge_right = edge_right self.prev = None self.next = None self.lav = None self._valid = True creator_vectors = ((edge_left.v.normalized() * (- 1)), edge_right.v.normalized()) if (direction_vectors is None): direction_vectors = creator_vectors self._is_reflex = (cross(*direction_vectors) < 0) self._bisector = Ray2(self.point, (op.add(*creator_vectors) * ((- 1) if self.is_reflex else 1))) def bisector(self): return self._bisector def is_reflex(self): return self._is_reflex def original_edges(self): return self.lav._slav._original_edges def next_event(self): events = [] if self.is_reflex: for edge in self.original_edges: if ((edge.edge == self.edge_left) or (edge.edge == self.edge_right)): continue leftdot = abs(self.edge_left.v.normalized().dot(edge.edge.v.normalized())) rightdot = abs(self.edge_right.v.normalized().dot(edge.edge.v.normalized())) selfedge = (self.edge_left if (leftdot < rightdot) else self.edge_right) i = Line2(selfedge).intersect(Line2(edge.edge)) if ((i is not None) and (not approximately_equals(i, self.point))): linvec = (self.point - i).normalized() edvec = edge.edge.v.normalized() if (linvec.dot(edvec) < 0): edvec = (- edvec) bisecvec = (edvec + linvec) if (abs(bisecvec) == 0): continue bisector = Line2(i, bisecvec) b = bisector.intersect(self.bisector) if (b is None): continue xleft = (cross(edge.bisector_left.v.normalized(), (b - edge.bisector_left.p).normalized()) > 0) xright = (cross(edge.bisector_right.v.normalized(), (b - edge.bisector_right.p).normalized()) < 0) xedge = (cross(edge.edge.v.normalized(), (b - edge.edge.p).normalized()) < 0) if (not (xleft and xright and xedge)): continue events.append(SplitEvent(Line2(edge.edge).distance(b), b, 0, self, edge.edge)) i_prev = self.bisector.intersect(self.prev.bisector) i_next = self.bisector.intersect(self.next.bisector) if (i_prev is not None): events.append(EdgeEvent(Line2(self.edge_left).distance(i_prev), i_prev, 1, self.prev, self)) if (i_next is not None): events.append(EdgeEvent(Line2(self.edge_right).distance(i_next), i_next, 1, self, self.next)) if (not events): return None ev = min(events, key=(lambda event: self.point.distance(event.intersection_point))) return ev def invalidate(self): if (self.lav is not None): self.lav.invalidate(self) else: self._valid = False def is_valid(self): return self._valid def __str__(self): return 'Vertex ({:.2f};{:.2f})'.format(self.point.x, self.point.y) def __lt__(self, other): if isinstance(other, LAVertex): return (self.point.x < other.point.x) def __repr__(self): return 'Vertex ({}) ({:.2f};{:.2f}), bisector {}, edges {} {}'.format(('reflex' if self.is_reflex else 'convex'), self.point.x, self.point.y, self.bisector, self.edge_left, self.edge_right)
() ('--full-report/--short-report', default=False, cls=MutuallyExclusiveOption, mutually_exclusive=['output'], with_values={'output': ['json', 'bare']}, help='Full reports include a security advisory (if available). Default: --short-report') ('--output', '-o', type=click.Choice(['screen', 'text', 'json', 'bare'], case_sensitive=False), default='screen', callback=active_color_if_needed) ('file', '--file', '-f', type=click.File(), required=True, help='Read input from an insecure report file. Default: empty') _context def review(ctx, full_report, output, file): LOG.info('Running check command') report = {} try: report = read_vulnerabilities(file) except SafetyError as e: LOG.exception('Expected SafetyError happened: %s', e) output_exception(e, exit_code_output=True) except Exception as e: LOG.exception('Unexpected Exception happened: %s', e) exception = (e if isinstance(e, SafetyException) else SafetyException(info=e)) output_exception(exception, exit_code_output=True) params = {'file': file} (vulns, remediations, packages) = safety.review(report, params=params) announcements = safety.get_announcements(key=None, proxy=None, telemetry=ctx.parent.telemetry) output_report = SafetyFormatter(output=output).render_vulnerabilities(announcements, vulns, remediations, full_report, packages) found_vulns = list(filter((lambda v: (not v.ignored)), vulns)) click.secho(output_report, nl=should_add_nl(output, found_vulns), file=sys.stdout) sys.exit(EXIT_CODE_OK)
class SEConvOp(BaseOp): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, transposed=False, dropout_rate=0, ops_order='weight_norm'): super().__init__(in_channels, out_channels, dropout_rate, ops_order=(ops_order if (stride > 1) else 'weight')) self.stride = stride padding = max(0, ceil(((((dilation * (kernel_size - 1)) - stride) + 1) / 2))) self.avg_pool = nn.AdaptiveAvgPool3d(1) self.fc = nn.Sequential(nn.Linear(in_channels, 1), nn.ReLU(), nn.Linear(1, out_channels), nn.Sigmoid()) if (stride > 1): if transposed: self.conv = nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=(0 if (stride == 1) else 1)) else: self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding) def weight_call(self, x): (batch_size, channels) = x.size()[:2] y = self.avg_pool(x).view(batch_size, channels) y = self.fc(y).view(batch_size, channels, 1, 1, 1) res = (self.conv((x * y)) if (self.stride >= 2) else (x * y)) return res
class ThemedWidget(): size: int theme: str primary_color: str secondary_color: str def __init__(self, theme: str, color_palette: Tuple[(str, str)], options: Dict=None) -> None: if options: self.setup_options(options) else: self.size = 5 self.apply_theme(theme) self.apply_color_palette(color_palette) def setup_options(self, options: Dict) -> None: if ('size' in options): self.setup_size(options['size']) def setup_size(self, size: int) -> None: self.size = size def apply_theme(self, theme: str) -> None: self.theme = theme def apply_color_palette(self, color_palette: Tuple[(str, str)]) -> None: (self.primary_color, self.secondary_color) = color_palette
def np_calculate_dist(anchor, positive): d1 = np.sum((anchor * anchor), axis=1).reshape(1, 1) d2 = np.sum((positive * positive), axis=1).reshape((- 1), 1) eps = 1e-12 a = d1.repeat(int(positive.shape[0])).reshape(1, (- 1)) b = d2.T c = (2.0 * np.dot(anchor, positive.T)) return np.sqrt((np.abs(((a + b) - c)) + eps))
class CombinedController(DictController): def _initialize_action_space(self): super()._initialize_action_space() (self.action_space, self.action_mapping) = flatten_action_spaces(self.action_space.spaces) def set_action(self, action: np.ndarray): action_dim = self.action_space.shape[0] assert (action.shape == (action_dim,)), (action.shape, action_dim) for (uid, controller) in self.controllers.items(): (start, end) = self.action_mapping[uid] controller.set_action(action[start:end]) def to_action_dict(self, action: np.ndarray): action_dim = self.action_space.shape[0] assert (action.shape == (action_dim,)), (action.shape, action_dim) action_dict = {} for (uid, controller) in self.controllers.items(): (start, end) = self.action_mapping[uid] action_dict[uid] = action[start:end] return action_dict def from_action_dict(self, action_dict: dict): return np.hstack([action_dict[uid] for uid in self.controllers])
class ExceptionSaver(): def __enter__(self): return self def __exit__(self, type, exc, tb): if (not exc): return self._saved = UnpickleableException.dump(type, exc) self._tb = tb return True def resume(self): if ('_saved' not in vars(self)): return (type, exc) = map(pickle.loads, self._saved) raise exc.with_traceback(self._tb)
class TransformFactory(object): current_transforms = {'default': Transform, 'simple_qa': QATransform, 'multi_choice_qa': MultiChoiceQATransform, 'db': DBTransform, 'table': TableTransform} def get_transform(transform: str) -> Type[Transform]: if (transform in TransformFactory.current_transforms): return TransformFactory.current_transforms[transform] else: try: from pydoc import locate transform_class = locate('{}.{}'.format(HUB_SOURCE, transform)) return transform_class except Exception: return globals()[transform]
class ScrimsSlotmSelector(discord.ui.Select): def __init__(self, records: List[ScrimsSlotManager]): _o = [] for record in records: _o.append(discord.SelectOption(label=getattr(record.main_channel, 'name', 'channel-not-found'), value=record.id, description=truncate_string(f"Scrims: {', '.join((str(_) for _ in record.scrim_ids))}", 100), emoji=emote.TextChannel)) super().__init__(placeholder='Select a slot-manager channel ...', options=_o) async def callback(self, interaction: discord.Interaction): (await interaction.response.defer()) self.view.custom_id = self.values[0] self.view.stop()
def _scan_qrcode_using_zbar(*, parent: Optional[QWidget], config: 'SimpleConfig', callback: Callable[([bool, str, Optional[str]], None)]) -> None: from electrum import qrscanner data = None try: data = qrscanner.scan_barcode(config.get_video_device()) except UserFacingException as e: success = False error = str(e) except BaseException as e: _logger.exception('camera error') success = False error = repr(e) else: success = True error = '' callback(success, error, data)
class OperatingModes(): def __init__(self, default_mode): self.default_mode = default_mode self.named_modes = ['current', 'voltage', 'power', 'differential power', 'explicit power', 'resistance', 'differential resistance', 'explicit resistance', 'CCCV'] def __contains__(self, value): named_mode = (value in self.named_modes) function = isinstance(value, types.FunctionType) return (named_mode or function) def __getitem__(self, value): return self.default_mode
class GroupDeployTokenManager(RetrieveMixin, CreateMixin, DeleteMixin, RESTManager): _path = '/groups/{group_id}/deploy_tokens' _from_parent_attrs = {'group_id': 'id'} _obj_cls = GroupDeployToken _create_attrs = RequiredOptional(required=('name', 'scopes'), optional=('expires_at', 'username')) _list_filters = ('scopes',) _types = {'scopes': types.ArrayAttribute} def get(self, id: Union[(str, int)], lazy: bool=False, **kwargs: Any) -> GroupDeployToken: return cast(GroupDeployToken, super().get(id=id, lazy=lazy, **kwargs))
def generate_guarded(mod: str, target: str, ignore_errors: bool=True, verbose: bool=False) -> Iterator[None]: if verbose: print(f'Processing {mod}') try: (yield) except Exception as e: if (not ignore_errors): raise e else: print('Stub generation failed for', mod, file=sys.stderr) else: if verbose: print(f'Created {target}')
.with_default_category('Fruits') class CommandSetA(cmd2.CommandSet): def do_apple(self, statement: cmd2.Statement): self._cmd.poutput('Apple!') def do_banana(self, statement: cmd2.Statement): self._cmd.poutput('Banana!!') cranberry_parser = cmd2.Cmd2ArgumentParser() cranberry_parser.add_argument('arg1', choices=['lemonade', 'juice', 'sauce']) .with_argparser(cranberry_parser, with_unknown_args=True) def do_cranberry(self, ns: argparse.Namespace, unknown: List[str]): self._cmd.poutput('Cranberry {}!!'.format(ns.arg1)) if (unknown and len(unknown)): self._cmd.poutput(('Unknown: ' + ', '.join((['{}'] * len(unknown))).format(*unknown))) self._cmd.last_result = {'arg1': ns.arg1, 'unknown': unknown} def help_cranberry(self): self._cmd.stdout.write('This command does diddly squat...\n') .with_argument_list .with_category('Also Alone') def do_durian(self, args: List[str]): self._cmd.poutput('{} Arguments: '.format(len(args))) self._cmd.poutput(', '.join((['{}'] * len(args))).format(*args)) def complete_durian(self, text: str, line: str, begidx: int, endidx: int) -> List[str]: return self._cmd.basic_complete(text, line, begidx, endidx, ['stinks', 'smells', 'disgusting']) elderberry_parser = cmd2.Cmd2ArgumentParser() elderberry_parser.add_argument('arg1') .with_category('Alone') .with_argparser(elderberry_parser) def do_elderberry(self, ns: argparse.Namespace): self._cmd.poutput('Elderberry {}!!'.format(ns.arg1))
class WebhookPathFinder(BasePathFinder): def _get_paths_iter(self, name: str) -> Iterator[Path]: webhooks = (self.spec / 'webhooks') if (not webhooks.exists()): raise PathsNotFound(webhooks.as_uri()) for (webhook_name, path) in list(webhooks.items()): if (name == webhook_name): path_result = TemplateResult(webhook_name, {}) (yield Path(path, path_result)) def _get_servers_iter(self, name: str, operations_iter: Iterator[PathOperation]) -> Iterator[PathOperationServer]: for (path, operation, path_result) in operations_iter: (yield PathOperationServer(path, operation, None, path_result, {}))
.parametrize('mean, sigma, size', [(np.array(100, dtype=config.floatX), np.array(0.01, dtype=config.floatX), None), (np.array(100, dtype=config.floatX), np.array(0.01, dtype=config.floatX), []), (np.full((1, 2), 100, dtype=config.floatX), np.array(0.01, dtype=config.floatX), None)]) def test_normal_samples(mean, sigma, size): compare_sample_values(normal, mean, sigma, size=size)
(SponsorEmailNotificationTemplate) class SponsorEmailNotificationTemplateAdmin(BaseEmailTemplateAdmin): def get_form(self, request, obj=None, **kwargs): help_texts = {'content': SPONSOR_TEMPLATE_HELP_TEXT} kwargs.update({'help_texts': help_texts}) return super().get_form(request, obj, **kwargs)
def test_win_defaults(windows, no_xdg_envs): pp = platform.get_platform_paths('pypyr', 'config.yaml') assert (pp == platform.PlatformPaths(config_user=Path(HOME, '.config/pypyr/config.yaml'), config_common=[Path('C:/ProgramData1/pypyr/config.yaml')], data_dir_user=Path(HOME, '.local/share/pypyr'), data_dir_common=[Path('C:/ProgramData1/pypyr/')]))
_grad() def validate_mrc(model, val_loader): LOGGER.info('start running MRC validation...') val_loss = 0 n_feat = 0 st = time.time() tot_score = 0 for (i, batch) in enumerate(val_loader): (view_logits, view_targets, _, _) = model(batch, task='mrc', compute_loss=False) view_logprobs = F.log_softmax(view_logits, dim=(- 1)) loss = F.kl_div(view_logprobs, view_targets, reduction='sum') tot_score += compute_accuracy_for_soft_targets(view_logits, view_targets) val_loss += loss.item() n_feat += batch['vp_view_mrc_masks'].sum().item() val_loss = sum(all_gather(val_loss)) tot_score = sum(all_gather(tot_score)) n_feat = sum(all_gather(n_feat)) tot_time = (time.time() - st) val_loss /= n_feat val_acc = (tot_score / n_feat) val_log = {'loss': val_loss, 'acc': val_acc, 'feat_per_s': (n_feat / tot_time)} LOGGER.info(f'validation finished in {int(tot_time)} seconds, score: {(val_acc * 100):.2f}') return val_log
def retrieve_nfrms_from_gulp(gulp_dir): id2nfrms = dict() gulp = GulpDirectory(gulp_dir) pbar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar()], maxval=gulp.num_chunks).start() i = 0 for chunk in gulp: for (frames, meta) in chunk: id2nfrms[meta['id']] = len(frames) pbar.update((i + 1)) i += 1 return id2nfrms
.parametrize('version, expected', [('1.2a1', '1.2a2'), ('2!1.2a1', '2!1.2a2'), ('1.2dev0', '1.2a0'), ('1.2a1.dev0', '1.2a1'), ('1.2a1.post1.dev0', '1.2a2')]) def test_next_prerelease(version: str, expected: str) -> None: v = PEP440Version.parse(version) assert (v.next_prerelease().text == expected)
def test_abi_vision_decoder(): model = ABIVisionDecoder(in_channels=128, num_channels=16, max_seq_len=10, use_result=None) x = torch.randn(2, 128, 8, 32) result = model(x, None) assert (result['feature'].shape == torch.Size([2, 10, 128])) assert (result['logits'].shape == torch.Size([2, 10, 90])) assert (result['attn_scores'].shape == torch.Size([2, 10, 8, 32]))
def main(): import argparse parser = argparse.ArgumentParser(description='Serial port enumeration') parser.add_argument('regexp', nargs='?', help='only show ports that match this regex') parser.add_argument('-v', '--verbose', action='store_true', help='show more messages') parser.add_argument('-q', '--quiet', action='store_true', help='suppress all messages') parser.add_argument('-n', type=int, help='only output the N-th entry') parser.add_argument('-s', '--include-links', action='store_true', help='include entries that are symlinks to real devices') args = parser.parse_args() hits = 0 if args.regexp: if (not args.quiet): sys.stderr.write('Filtered list with regexp: {!r}\n'.format(args.regexp)) iterator = sorted(grep(args.regexp, include_links=args.include_links)) else: iterator = sorted(comports(include_links=args.include_links)) for (n, (port, desc, hwid)) in enumerate(iterator, 1): if ((args.n is None) or (args.n == n)): sys.stdout.write('{:20}\n'.format(port)) if args.verbose: sys.stdout.write(' desc: {}\n'.format(desc)) sys.stdout.write(' hwid: {}\n'.format(hwid)) hits += 1 if (not args.quiet): if hits: sys.stderr.write('{} ports found\n'.format(hits)) else: sys.stderr.write('no ports found\n')
_existing_mirrors ('util.repomirror.skopeomirror.SkopeoMirror.run_skopeo') def test_successful_mirror_verbose_logs(run_skopeo_mock, initialized_db, app, monkeypatch): (mirror, repo) = create_mirror_repo_robot(['latest', '7.1']) skopeo_calls = [{'args': ['/usr/bin/skopeo', '--debug', 'list-tags', '--tls-verify=True', 'docker://registry.example.com/namespace/repository'], 'results': SkopeoResults(True, [], '{"Tags": ["latest"]}', '')}, {'args': ['/usr/bin/skopeo', '--debug', 'copy', '--all', '--remove-signatures', '--src-tls-verify=True', '--dest-tls-verify=True', '--dest-creds', ('%s:%s' % (mirror.internal_robot.username, retrieve_robot_token(mirror.internal_robot))), 'docker://registry.example.com/namespace/repository:latest', 'docker://localhost:5000/mirror/repo:latest'], 'results': SkopeoResults(True, [], 'Success', '')}] def skopeo_test(args, proxy): try: skopeo_call = skopeo_calls.pop(0) assert (args == skopeo_call['args']) assert (proxy == {}) return skopeo_call['results'] except Exception as e: skopeo_calls.append(skopeo_call) raise e run_skopeo_mock.side_effect = skopeo_test monkeypatch.setenv('DEBUGLOG', 'true') worker = RepoMirrorWorker() worker._process_mirrors() assert ([] == skopeo_calls)
def get_file_paths(): test_files = [] for (_, _, files) in os.walk(_EXAMPLES_PATH, topdown=True): for filename in files: if ((filename not in IGNORED_TESTS) and filename.endswith('.py')): test_files.append((_EXAMPLES_PATH + filename)) return test_files
def main(): (path, output) = getParams() mrc_header = io_file.read_mrc_header(path) voxel_spacing_in_nm = ((mrc_header['MRC']['xlen'] / mrc_header['MRC']['nx']) / 10) print(('voxel_spacing_in_nm: %s' % voxel_spacing_in_nm)) sigma1 = 2 try: sigma1 = max(int((7 / voxel_spacing_in_nm)), sigma1) except Exception as e: pass print(('sigma1=%d' % sigma1)) partition_op = {'nonoverlap_width': (sigma1 * 20), 'overlap_width': (sigma1 * 10), 'save_vg': False} result = picking(path, s1=sigma1, s2=(sigma1 * 1.1), t=3, find_maxima=False, partition_op=partition_op, multiprocessing_process_num=100) print(('%d particles detected, containing redundant peaks' % len(result))) result = do_filter(pp=result, peak_dist_min=sigma1, op=None) print(('peak number reduced to %d' % len(result))) pprint(result[:5]) json_data = [] for i in range(len(result)): loc_np = result[i]['x'] loc = [] for j in range(len(loc_np)): loc.append(loc_np[j].tolist()) json_data.append({'peak': {'loc': loc}}) with open(output, 'w') as f: json.dump(json_data, f)
class ConnectionManager(object): def __init__(self, sock): config = H2Configuration(client_side=False) self.sock = sock self.conn = H2Connection(config=config) def run_forever(self): self.conn.initiate_connection() self.sock.sendall(self.conn.data_to_send()) while True: data = self.sock.recv(65535) if (not data): break events = self.conn.receive_data(data) for event in events: if isinstance(event, RequestReceived): self.request_received(event.headers, event.stream_id) elif isinstance(event, DataReceived): self.conn.reset_stream(event.stream_id) self.sock.sendall(self.conn.data_to_send()) def request_received(self, headers, stream_id): headers = collections.OrderedDict(headers) data = json.dumps({'headers': headers}, indent=4).encode('utf-8') response_headers = ((':status', '200'), ('content-type', 'application/json'), ('content-length', str(len(data))), ('server', 'eventlet-h2')) self.conn.send_headers(stream_id, response_headers) self.conn.send_data(stream_id, data, end_stream=True)
def make_sockaddr_in6(): class in6_addr(ctypes.BigEndianStructure): _fields_ = (('Byte', (ctypes.c_uint8 * 16)),) class sockaddr_in6(ctypes.BigEndianStructure): _fields_ = (('sin6_family', ctypes.c_int16), ('sin6_port', ctypes.c_uint16), ('sin6_flowinfo', ctypes.c_uint32), ('sin6_addr', in6_addr), ('sin6_scope_id', ctypes.c_uint32)) return sockaddr_in6
class PythonEnvironment(BaseEnvironment): def __init__(self, num_agents=2, max_episode_length=200, env_task_set=None, observation_types=None, agent_goals=None, output_folder=None, seed=123): self.seed = seed random.seed(seed) np.random.seed(seed) self.steps = 0 self.env_id = None self.max_ids = {} self.pythnon_graph = None self.env_task_set = env_task_set self.num_agents = num_agents self.max_episode_length = max_episode_length self.output_folder = output_folder if (observation_types is not None): self.observation_types = observation_types else: self.observation_types = ['mcts' for _ in range(num_agents)] if (agent_goals is not None): self.agent_goals = agent_goals else: self.agent_goals = ['full' for _ in range(num_agents)] (self.task_goal, self.goal_spec) = ({0: {}, 1: {}}, {0: {}, 1: {}}) self.changed_graph = False self.rooms = None self.id2node = None self.offset_cameras = None self.env = vh_env.VhGraphEnv(n_chars=self.num_agents) def reward(self): reward = 0.0 done = True (satisfied, unsatisfied) = utils.check_progress(self.get_graph(), self.goal_spec[0]) for (key, value) in satisfied.items(): (preds_needed, mandatory, reward_per_pred) = self.goal_spec[0][key] value_pred = min(len(value), preds_needed) reward += (value_pred * reward_per_pred) if (mandatory and (unsatisfied[key] > 0)): done = False return (reward, done, {}) def step(self, action_dict): new_action_dict = {char_id: action for (char_id, action) in action_dict.items() if (action is not None)} self.env.step(new_action_dict) self.changed_graph = True (reward, done, info) = self.reward() obs = self.get_observations() self.steps += 1 info['finished'] = done info['graph'] = self.get_graph() if (self.steps == self.max_episode_length): done = True return (obs, reward, done, info) def python_graph_reset(self, graph): new_graph = utils.inside_not_trans(graph) self.python_graph = new_graph self.env.reset(new_graph, self.task_goal) self.env.to_pomdp() def get_goal(self, task_spec, agent_goal): if (agent_goal == 'full'): task_spec_new = {goal_name: [cnt_val, True, 0] for (goal_name, cnt_val) in task_spec.items()} return task_spec_new elif (agent_goal == 'grab'): candidates = [x.split('_')[1] for (x, y) in task_spec.items() if ((y > 0) and (x.split('_')[0] in ['on', 'inside']))] object_grab = random.choice(candidates) return {((('holds_' + object_grab) + '_') + '1'): [1, True, 10], ((('close_' + object_grab) + '_') + '1'): [1, False, 0.1]} elif (agent_goal == 'put'): pred = random.choice([x for (x, y) in task_spec.items() if ((y > 0) and (x.split('_')[0] in ['on', 'inside']))]) object_grab = pred.split('_')[1] return {pred: [1, True, 60], ((('holds_' + object_grab) + '_') + '1'): [1, False, 2], ((('close_' + object_grab) + '_') + '1'): [1, False, 0.05]} else: raise NotImplementedError def reset(self, environment_graph=None, task_id=None): if (task_id is None): env_task = random.choice(self.env_task_set) else: env_task = self.env_task_set[task_id] self.task_id = env_task['task_id'] self.init_graph = env_task['init_graph'] self.init_rooms = env_task['init_rooms'] self.task_goal = env_task['task_goal'] self.task_name = env_task['task_name'] self.env_id = env_task['env_id'] self.goal_spec = {agent_id: self.get_goal(self.task_goal[agent_id], self.agent_goals[agent_id]) for agent_id in range(self.num_agents)} print('Goal: ', self.goal_spec) if (environment_graph is None): environment_graph = env_task['init_graph'] if (self.init_rooms[0] not in ['kitchen', 'bedroom', 'livingroom', 'bathroom']): rooms = random.sample(['kitchen', 'bedroom', 'livingroom', 'bathroom'], 2) else: rooms = list(self.init_rooms) environment_graph = copy.deepcopy(environment_graph) for i in range(self.num_agents): new_char_node = {'id': (i + 1), 'class_name': 'character', 'states': [], 'category': 'Characters', 'properties': []} room_name = rooms[i] room_id = [node['id'] for node in environment_graph['nodes'] if (node['class_name'] == room_name)][0] environment_graph['nodes'].append(new_char_node) environment_graph['edges'].append({'from_id': (i + 1), 'relation_type': 'INSIDE', 'to_id': room_id}) self.python_graph_reset(environment_graph) self.rooms = [(node['class_name'], node['id']) for node in environment_graph['nodes'] if (node['category'] == 'Rooms')] self.id2node = {node['id']: node for node in environment_graph['nodes']} obs = self.get_observations() self.steps = 0 return obs def get_graph(self): out_graph = self.env.state return out_graph def get_observations(self): dict_observations = {} for agent_id in range(self.num_agents): obs_type = self.observation_types[agent_id] dict_observations[agent_id] = self.get_observation(agent_id, obs_type) return dict_observations def get_action_space(self): dict_action_space = {} for agent_id in range(self.num_agents): if (self.observation_types[agent_id] not in ['mcts', 'full']): raise NotImplementedError else: obs_type = 'mcts' visible_graph = self.get_observation(agent_id, obs_type) dict_action_space[agent_id] = [node['id'] for node in visible_graph['nodes']] return dict_action_space def get_observation(self, agent_id, obs_type, info={}): if (obs_type == 'mcts'): return self.env.get_observations(char_index=agent_id) elif (obs_type == 'full'): return self.get_graph() else: pdb.set_trace() raise NotImplementedError
def test_nested_while_with_break() -> None: src = '\n while n > 10:\n while n > 20:\n break\n print(n - 1)\n break\n print(n)\n ' cfg = build_cfg(src) expected_blocks = [['n > 10'], ['n > 20'], ['break'], ['print(n - 1)', 'break'], ['print(n)'], []] assert (expected_blocks == _extract_blocks(cfg)) expected_edges = [[['n > 10'], ['n > 20']], [['n > 20'], ['break']], [['break'], ['print(n - 1)', 'break']], [['print(n - 1)', 'break'], ['print(n)']], [['print(n)'], []], [['n > 20'], ['print(n - 1)', 'break']], [['n > 10'], ['print(n)']]] assert (expected_edges == _extract_edges(cfg))
class EventLoop(asyncio.SelectorEventLoop): def __init__(self): self._greenlet = None selector = _Selector(self) super(EventLoop, self).__init__(selector=selector) if _GEVENT10: def time(self): return gevent.core.time() def call_soon(self, callback, *args, context=None): handle = super(EventLoop, self).call_soon(callback, *args) if ((self._selector is not None) and self._selector._event): self._write_to_self() return handle def call_at(self, when, callback, *args, context=None): handle = super(EventLoop, self).call_at(when, callback, *args) if ((self._selector is not None) and self._selector._event): self._write_to_self() return handle def run_forever(self): self._greenlet = gevent.getcurrent() try: super(EventLoop, self).run_forever() finally: self._greenlet = None
def _make_handshake_rejection(status_code: int, body: Optional[bytes]=None) -> List[h11.Event]: client = h11.Connection(h11.CLIENT) server = WSConnection(SERVER) nonce = generate_nonce() server.receive_data(client.send(h11.Request(method='GET', target='/', headers=[(b'Host', b'localhost'), (b'Connection', b'Keep-Alive, Upgrade'), (b'Upgrade', b'websocket'), (b'Sec-WebSocket-Version', b'13'), (b'Sec-WebSocket-Key', nonce)]))) if (body is not None): client.receive_data(server.send(RejectConnection(headers=[(b'content-length', (b'%d' % len(body)))], status_code=status_code, has_body=True))) client.receive_data(server.send(RejectData(data=body))) else: client.receive_data(server.send(RejectConnection(status_code=status_code))) events = [] while True: event = client.next_event() events.append(cast(h11.Event, event)) if isinstance(event, h11.EndOfMessage): return events
() ('--prompt', '-p', 'prompt', type=str, required=True, help='Input prompt.') ('--backend', '-b', 'backends', type=str, multiple=True, default=['openai|text-davinci-003'], help='LLM APIs to use as backends. Use "backend|model_name" notation. For example: "openai|text-davinci-003".') ('--max-length', '-l', 'max_lengths', type=int, multiple=True, default=[100], help='Maximum number of tokens to generate for each prompt.') _temperatures _num_samples _options _path _single_file def texts(prompt: str, num_samples: int, max_lengths: List[int], temperatures: List[int], backends: List[str], options: List[Tuple[(str, str)]], path: str, single_file: bool) -> None: dataset_writer = DatasetWriter(path, single_file) generator_config = TextsGeneratorConfig(prompt=prompt, backends=backends, num_samples=num_samples, max_lengths=max_lengths, temperatures=temperatures, options=options) texts_generator = TextsGenerator(generator_config) for text_object in texts_generator: dataset_writer.save_intermediate_result(text_object)
def test_select_device(): assert (_select_device('cpu') == torch.device('cpu')) assert (_select_device('gpu') == torch.device('cuda')) assert (_select_device('cuda:0') == torch.device('cuda', index=0)) if torch.cuda.is_available(): assert (_select_device('auto') == torch.device('cuda')) else: assert (_select_device('auto') == torch.device('cpu'))
def build_optim_other(args, model, checkpoint): if (checkpoint is not None): optim = checkpoint['optims'][1] saved_optimizer_state_dict = optim.optimizer.state_dict() optim.optimizer.load_state_dict(saved_optimizer_state_dict) if (args.visible_gpus != '-1'): for state in optim.optimizer.state.values(): for (k, v) in state.items(): if torch.is_tensor(v): state[k] = v.cuda() if ((optim.method == 'adam') and (len(optim.optimizer.state) < 1)): raise RuntimeError(('Error: loaded Adam optimizer from existing model' + ' but optimizer state is empty')) else: optim = Optimizer(args.optim, args.lr_other, args.max_grad_norm, beta1=args.beta1, beta2=args.beta2, decay_method='noam', warmup_steps=args.warmup_steps_other) if (args.encoder == 'bert'): params = [(n, p) for (n, p) in list(model.named_parameters()) if ((not n.startswith('encoder.model')) and (not n.startswith('topic_model')))] else: params = [(n, p) for (n, p) in list(model.named_parameters()) if (not n.startswith('topic_model'))] optim.set_parameters(params) return optim
() ('-e', '--extension', multiple=True, help='File or module path to a zipline extension to load.') ('--strict-extensions/--non-strict-extensions', is_flag=True, help='If --strict-extensions is passed then zipline will not run if it cannot load all of the specified extensions. If this is not passed or --non-strict-extensions is passed then the failure will be logged but execution will continue.') ('--default-extension/--no-default-extension', is_flag=True, default=True, help="Don't load the default zipline extension.py file in $ZIPLINE_HOME.") ('-x', multiple=True, help='Any custom command line arguments to define, in key=value form.') _context def main(ctx, extension, strict_extensions, default_extension, x): logbook.StderrHandler().push_application() create_args(x, zipline.extension_args) load_extensions(default_extension, extension, strict_extensions, os.environ)
class Chat(): def __init__(self, msg: object) -> None: if isinstance(msg, str): self.msg = {'text': msg} else: self.msg = msg def from_string(cls, text: str) -> Chat: return cls({'text': text}) def to_string(self, mode: str) -> str: def parse(msg): if isinstance(msg, str): if (mode == 'plain'): return re.sub('.', '', msg) if (mode == 'normal'): return self.msg if (mode == 'color'): colored = '' for (i, c) in enumerate(msg): if (c == ''): colored += TERMINAL_CODES[msg[(i + 1)]] continue return colored elif isinstance(msg, list): return ''.join([parse(e) for e in msg]) elif isinstance(msg, dict): text = '' if (mode != 'plain'): for (name, code) in FORMAT_BY_NAME.items(): if msg.get(name): text += ('' + code) if ('text' in msg): text += parse(msg['text']) if ('extra' in msg): text += parse(msg['extra']) elif (msg is None): return '' else: return str(self.msg) return parse(self.msg)
class TestHooks(): (autouse=True) def create_test_file(self, pytester: pytest.Pytester) -> None: pytester.makepyfile('\n import os\n def test_a(): pass\n def test_b(): pass\n def test_c(): pass\n ') def test_runtest_logreport(self, pytester: pytest.Pytester) -> None: pytester.makeconftest('\n def pytest_runtest_logreport(report):\n if hasattr(report, \'node\'):\n if report.when == "call":\n workerid = report.node.workerinput[\'workerid\']\n testrunuid = report.node.workerinput[\'testrunuid\']\n if workerid != report.worker_id:\n print("HOOK: Worker id mismatch: %s %s"\n % (workerid, report.worker_id))\n elif testrunuid != report.testrun_uid:\n print("HOOK: Testrun uid mismatch: %s %s"\n % (testrunuid, report.testrun_uid))\n else:\n print("HOOK: %s %s %s"\n % (report.nodeid, report.worker_id, report.testrun_uid))\n ') res = pytester.runpytest('-n1', '-s') res.stdout.fnmatch_lines(['*HOOK: test_runtest_logreport.py::test_a gw0 *', '*HOOK: test_runtest_logreport.py::test_b gw0 *', '*HOOK: test_runtest_logreport.py::test_c gw0 *', '*3 passed*']) def test_node_collection_finished(self, pytester: pytest.Pytester) -> None: pytester.makeconftest('\n def pytest_xdist_node_collection_finished(node, ids):\n workerid = node.workerinput[\'workerid\']\n stripped_ids = [x.split(\'::\')[1] for x in ids]\n print("HOOK: %s %s" % (workerid, \', \'.join(stripped_ids)))\n ') res = pytester.runpytest('-n2', '-s') res.stdout.fnmatch_lines_random(['*HOOK: gw0 test_a, test_b, test_c', '*HOOK: gw1 test_a, test_b, test_c']) res.stdout.fnmatch_lines(['*3 passed*'])
class Service(BaseImageObject): mode = 'service' def __init__(self, docker_client): super().__init__(docker_client) self.monitored = self.monitor_filter() def monitor_filter(self): services = self.client.services.list(filters={'label': 'com.ouroboros.enable'}) monitored_services = [] for service in services: ouro_label = service.attrs['Spec']['Labels'].get('com.ouroboros.enable') if ((not self.config.label_enable) or (ouro_label.lower() in ['true', 'yes'])): monitored_services.append(service) self.data_manager.monitored_containers[self.socket] = len(monitored_services) self.data_manager.set(self.socket) return monitored_services def pull(self, tag): return self._pull(tag) def update(self): updated_service_tuples = [] self.monitored = self.monitor_filter() if (not self.monitored): self.logger.info('No services monitored') for service in self.monitored: image_string = service.attrs['Spec']['TaskTemplate']['ContainerSpec']['Image'] if ('' in image_string): tag = image_string.split('')[0] sha256 = remove_sha_prefix(image_string.split('')[1]) else: self.logger.error('No image SHA for %s. Skipping', image_string) continue try: latest_image = self.pull(tag) except ConnectionError: continue latest_image_sha256 = get_digest(latest_image) self.logger.debug('Latest sha256 for %s is %s', tag, latest_image_sha256) if (sha256 != latest_image_sha256): if self.config.dry_run: self.logger.info('dry run : %s would be updated', service.name) continue updated_service_tuples.append((service, sha256[(- 10):], latest_image)) if (('ouroboros' in service.name) and self.config.self_update): self.data_manager.total_updated[self.socket] += 1 self.data_manager.add(label=service.name, socket=self.socket) self.data_manager.add(label='all', socket=self.socket) self.notification_manager.send(container_tuples=updated_service_tuples, socket=self.socket, kind='update', mode='service') self.logger.info('%s will be updated', service.name) service.update(image=f'{tag}:{latest_image_sha256}') self.data_manager.total_updated[self.socket] += 1 self.data_manager.add(label=service.name, socket=self.socket) self.data_manager.add(label='all', socket=self.socket) if updated_service_tuples: self.notification_manager.send(container_tuples=updated_service_tuples, socket=self.socket, kind='update', mode='service')
class NInfinity(object): def __lt__(self, other): if isinstance(other, NInfinity): return False return True def __gt__(self, other): return False def __eq__(self, other): return isinstance(other, NInfinity) def __neg__(self): return infinity def __pos__(self): return self
def attach_player_object_to_vehicle(player_id: int, object_id: int, vehicle_id: int, offset_x: float, offset_y: float, offset_z: float, rotation_x: float, rotation_y: float, rotation_z: float) -> bool: return AttachPlayerObjectToVehicle(player_id, object_id, vehicle_id, offset_x, offset_y, offset_z, rotation_x, rotation_y, rotation_z)
def get_filtered_command_list(cpaddrs=[], isrunning=None, goodwords=[], badwords=[]): base = 'commandmetadata' i = 0 retval = list() while True: i += 1 good = True try: dsz.cmd.data.Get('commandmetadata::id', dsz.TYPE_INT, i)[0] except: break try: if (isrunning is not None): if (dsz.cmd.data.ObjectGet(base, 'isrunning', dsz.TYPE_BOOL, i)[0] != isrunning): good = False continue try: dest = dsz.cmd.data.ObjectGet(base, 'destination', dsz.TYPE_STRING, i)[0] if ((cpaddrs != []) and (dest not in cpaddrs)): continue except: dest = '' continue fullcommand = dsz.cmd.data.ObjectGet('commandmetadata', 'fullcommand', dsz.TYPE_STRING, i)[0] for word in goodwords: if (fullcommand.find(word) < 0): good = False break for bad in badwords: if (fullcommand.find(bad) > (- 1)): good = False break if (not good): continue retval.append(i) except: break return retval
class BoundSymbol(object): def __init__(self, symbol, database): if (symbol.type == 'proc'): proc = database.proc(symbol) self.method = proc.__call__ self.object = proc else: ps = database.prepare(symbol) m = symbol.method if (m is None): self.method = ps.__call__ else: self.method = getattr(ps, m) self.object = ps def __call__(self, *args, **kw): return self.method(*args, **kw)
def check_tasks_unique(tasks, env_names): env_to_rand_vecs = {} for env_name in env_names: env_to_rand_vecs[env_name] = np.array([pickle.loads(task.data)['rand_vec'] for task in tasks if (task.env_name == env_name)]) unique_task_rand_vecs = np.unique(np.array(env_to_rand_vecs[env_name]), axis=0) assert (unique_task_rand_vecs.shape[0] == metaworld._N_GOALS) return env_to_rand_vecs
class _ModelZooUrls(object): S3_PREFIX = ' CONFIG_PATH_TO_URL_SUFFIX = {'COCO-Detection/faster_rcnn_R_50_C4_1x': '/model_final_721ade.pkl', 'COCO-Detection/faster_rcnn_R_50_DC5_1x': '/model_final_51d356.pkl', 'COCO-Detection/faster_rcnn_R_50_FPN_1x': '/model_final_b275ba.pkl', 'COCO-Detection/faster_rcnn_R_50_C4_3x': '/model_final_f97cb7.pkl', 'COCO-Detection/faster_rcnn_R_50_DC5_3x': '/model_final_68d202.pkl', 'COCO-Detection/faster_rcnn_R_50_FPN_3x': '/model_final_280758.pkl', 'COCO-Detection/faster_rcnn_R_101_C4_3x': '/model_final_298dad.pkl', 'COCO-Detection/faster_rcnn_R_101_DC5_3x': '/model_final_3e0943.pkl', 'COCO-Detection/faster_rcnn_R_101_FPN_3x': '/model_final_f6e8b1.pkl', 'COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x': '/model_final_68b088.pkl', 'COCO-Detection/retinanet_R_50_FPN_1x': '/model_final_bfca0b.pkl', 'COCO-Detection/retinanet_R_50_FPN_3x': '/model_final_5bd44e.pkl', 'COCO-Detection/retinanet_R_101_FPN_3x': '/model_final_971ab9.pkl', 'COCO-Detection/rpn_R_50_C4_1x': '/model_final_450694.pkl', 'COCO-Detection/rpn_R_50_FPN_1x': '/model_final_02ce48.pkl', 'COCO-Detection/fast_rcnn_R_50_FPN_1x': '/model_final_e5f7ce.pkl', 'COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x': '/model_final_9243eb.pkl', 'COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x': '/model_final_4f86c3.pkl', 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x': '/model_final_a54504.pkl', 'COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x': '/model_final_4ce675.pkl', 'COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x': '/model_final_84107b.pkl', 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x': '/model_final_f10217.pkl', 'COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x': '/model_final_a2914c.pkl', 'COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x': '/model_final_0464b7.pkl', 'COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x': '/model_final_a3ec72.pkl', 'COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x': '/model_final_2d9806.pkl', 'new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ': '/model_final_bb69de.pkl', 'new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ': '/model_final_89a8d3.pkl', 'new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ': '/model_final_14d201.pkl', 'new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ': '/model_final_4f7b58.pkl', 'new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ': '/model_final_0bb7ae.pkl', 'new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ': '/model_final_f96b26.pkl', 'new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ': '/model_final_b7fbab.pkl', 'new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ': '/model_final_5d87c1.pkl', 'new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ': '/model_final_f1362d.pkl', 'new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ': '/model_final_6ba57e.pkl', 'new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ': '/model_final_27b9c1.pkl', 'new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ': '/model_final_ef3a80.pkl', 'COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x': '/model_final_04e291.pkl', 'COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x': '/model_final_a6e10b.pkl', 'COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x': '/model_final_997cc7.pkl', 'COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x': '/model_final_5ad38f.pkl', 'COCO-PanopticSegmentation/panoptic_fpn_R_50_1x': '/model_final_dbfeb4.pkl', 'COCO-PanopticSegmentation/panoptic_fpn_R_50_3x': '/model_final_c10459.pkl', 'COCO-PanopticSegmentation/panoptic_fpn_R_101_3x': '/model_final_cafdb1.pkl', 'LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x': '/model_final_571f7c.pkl', 'LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x': '/model_final_824ab5.pkl', 'LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x': '/model_final_5e3439.pkl', 'Cityscapes/mask_rcnn_R_50_FPN': '/model_final_af9cf5.pkl', 'PascalVOC-Detection/faster_rcnn_R_50_C4': '/model_final_b1acc2.pkl', 'Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5': '/model_final_65c703.pkl', 'Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5': '/model_final_821d0b.pkl', 'Misc/cascade_mask_rcnn_R_50_FPN_1x': '/model_final_e9d89b.pkl', 'Misc/cascade_mask_rcnn_R_50_FPN_3x': '/model_final_480dd8.pkl', 'Misc/mask_rcnn_R_50_FPN_3x_syncbn': '/model_final_3b3c51.pkl', 'Misc/mask_rcnn_R_50_FPN_3x_gn': '/model_final_dc5d9e.pkl', 'Misc/scratch_mask_rcnn_R_50_FPN_3x_gn': '/model_final_01ca85.pkl', 'Misc/scratch_mask_rcnn_R_50_FPN_9x_gn': '/model_final_da7b4c.pkl', 'Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn': '/model_final_5ce33e.pkl', 'Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x': '/model_final_be35db.pkl', 'Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv': '/model_0039999_e76410.pkl', 'Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x': '/model_final_7ab50c.pkl', 'Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x': '/model_final_62ca52.pkl', 'Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x': '/model_final_cce136.pkl'} def query(config_path: str) -> Optional[str]: name = config_path.replace('.yaml', '').replace('.py', '') if (name in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX): suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[name] return (((_ModelZooUrls.S3_PREFIX + name) + '/') + suffix) return None
def _check_if_params_are_ray_dmatrix(X, sample_weight, base_margin, eval_set, sample_weight_eval_set, base_margin_eval_set, eval_qid=None): train_dmatrix = None evals = () eval_set = (eval_set or ()) if isinstance(X, RayDMatrix): params_to_warn_about = ['y'] if (sample_weight is not None): params_to_warn_about.append('sample_weight') if (base_margin is not None): params_to_warn_about.append('base_margin') warnings.warn(f"X is a RayDMatrix, {', '.join(params_to_warn_about)} will be ignored!") train_dmatrix = X if eval_set: if any((((not isinstance(eval_data, RayDMatrix)) or (not isinstance(eval_name, str))) for (eval_data, eval_name) in eval_set)): raise ValueError('If X is a RayDMatrix, all elements of `eval_set` must be (RayDMatrix, str) tuples.') params_to_warn_about = [] if (sample_weight_eval_set is not None): params_to_warn_about.append('sample_weight_eval_set') if (base_margin_eval_set is not None): params_to_warn_about.append('base_margin_eval_set') if (eval_qid is not None): params_to_warn_about.append('eval_qid') if params_to_warn_about: warnings.warn(f"`eval_set` is composed of RayDMatrix tuples, {', '.join(params_to_warn_about)} will be ignored!") evals = (eval_set or ()) elif any(((isinstance(eval_x, RayDMatrix) or isinstance(eval_y, RayDMatrix)) for (eval_x, eval_y) in eval_set)): raise ValueError('If X is not a RayDMatrix, all `eval_set` elements must be (array_like, array_like) tuples.') return (train_dmatrix, evals)
class IntermediateLayerGetter(nn.ModuleDict): _version = 2 __annotations__ = {'return_layers': Dict[(str, str)]} def __init__(self, model, return_layers): if (not set(return_layers).issubset([name for (name, _) in model.named_children()])): raise ValueError('return_layers are not present in model') orig_return_layers = return_layers return_layers = {str(k): str(v) for (k, v) in return_layers.items()} layers = OrderedDict() for (name, module) in model.named_children(): layers[name] = module if (name in return_layers): del return_layers[name] if (not return_layers): break super(IntermediateLayerGetter, self).__init__(layers) self.return_layers = orig_return_layers def forward(self, x): out = OrderedDict() for (name, module) in self.items(): x = module(x) if (name in self.return_layers): out_name = self.return_layers[name] out[out_name] = x return out
class MobileNetV2(nn.Module): cfg = [(1, 16, 1, 1), (6, 24, 2, 1), (6, 32, 3, 2), (6, 64, 4, 2), (6, 96, 3, 1), (6, 160, 3, 2), (6, 320, 1, 1)] def __init__(self, num_classes=10): super(MobileNetV2, self).__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.layers = self._make_layers(in_planes=32) self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(1280) self.linear = nn.Linear(1280, num_classes) def _make_layers(self, in_planes): layers = [] for (expansion, out_planes, num_blocks, stride) in self.cfg: strides = ([stride] + ([1] * (num_blocks - 1))) for stride in strides: layers.append(Block(in_planes, out_planes, expansion, stride)) in_planes = out_planes return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layers(out) out = F.relu(self.bn2(self.conv2(out))) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def geth_generate_poa_genesis(genesis_path: str, genesis_description: GenesisDescription, seal_account: Address) -> None: alloc = {to_normalized_address(account.address): {'balance': str(account.balance)} for account in genesis_description.prefunded_accounts} seal_address_normalized = remove_0x_prefix(encode_hex(seal_account)) extra_data = geth_clique_extradata(genesis_description.random_marker, seal_address_normalized) genesis = GENESIS_STUB.copy() genesis['alloc'].update(alloc) genesis['config']['ChainID'] = genesis_description.chain_id genesis['config']['clique'] = {'period': 1, 'epoch': 30000} genesis['extraData'] = extra_data with open(genesis_path, 'w') as handler: json.dump(genesis, handler)
class StyleEdit(QtWidgets.QWidget): styleChanged = QtCore.Signal(str, str) def __init__(self, defaultStyle, *args, **kwargs): super().__init__(*args, **kwargs) self.styleKey = defaultStyle.key self.layout = layout = QtWidgets.QHBoxLayout() self.setters = {} fmtParts = defaultStyle.defaultFormat._parts if ('fore' in fmtParts): self.__add_clrLineEdit('fore', 'Foreground') if ('back' in fmtParts): self.__add_clrLineEdit('back', 'Background') if ('bold' in fmtParts): self.__add_checkBox('bold', 'Bold') if ('italic' in fmtParts): self.__add_checkBox('italic', 'Italic') if ('underline' in fmtParts): self.__add_comboBox('underline', 'Underline', 'No', 'Dotted', 'Wave', 'Full', 'Yes') if ('linestyle' in fmtParts): self.__add_comboBox('linestyle', 'Linestyle', 'Dashed', 'Dotted', 'Full') self.setLayout(layout) self.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum) def __add_clrLineEdit(self, key, name): clrEdit = ColorLineEdit(name) clrEdit.textChanged.connect((lambda txt, key=key: self.__update(key, txt))) self.setters[key] = clrEdit.setText self.layout.addWidget(TitledWidget(name, clrEdit), 0) def __add_checkBox(self, key, name): checkBox = QtWidgets.QCheckBox() self.setters[key] = (lambda val, check=checkBox: check.setCheckState([QtCore.Qt.CheckState.Unchecked, QtCore.Qt.CheckState.Checked][(val == 'yes')])) checkBox.stateChanged.connect((lambda state, key=key: self.__update(key, ('yes' if state else 'no')))) self.layout.addWidget(TitledWidget(name, checkBox)) def __add_comboBox(self, key, name, *items): combo = QtWidgets.QComboBox() combo.addItems(items) combo.currentTextChanged.connect((lambda txt, key=key: self.__update(key, txt))) self.setters[key] = (lambda txt, cmb=combo: cmb.setCurrentText(txt.capitalize())) self.layout.addWidget(TitledWidget(name, combo)) def __update(self, key, value): self.styleChanged.emit(self.styleKey, ((key + ':') + value)) def setStyle(self, text): style = StyleFormat(text) for (key, setter) in self.setters.items(): setter(style[key]) def setFocus(self): self.layout.itemAt(0).widget().setFocus()
def load_pretrian_model(model, model_path): checkpoint = torch.load(model_path, map_location=(lambda storage, loc: storage)) state_dict_ = checkpoint state_dict = {} for k in state_dict_: if (k.startswith('module') and (not k.startswith('module_list'))): state_dict[k[7:]] = state_dict_[k] else: state_dict[k] = state_dict_[k] model_state_dict = model.state_dict() msg = ((('If you see this, your model does not fully load the ' + 'pre-trained weight. Please make sure ') + 'you have correctly specified --arch xxx ') + 'or set the correct --num_classes for your own dataset.') for k in state_dict: if (k in model_state_dict): if (state_dict[k].shape != model_state_dict[k].shape): print('Skip loading parameter {}, required shape{}, loaded shape{}. {}'.format(k, model_state_dict[k].shape, state_dict[k].shape, msg)) state_dict[k] = model_state_dict[k] else: print(('Drop parameter {}.'.format(k) + msg)) for k in model_state_dict: if (not (k in state_dict)): print(('No param {}.'.format(k) + msg)) state_dict[k] = model_state_dict[k] model.load_state_dict(state_dict, strict=False) return model
_fixtures(FieldFixture) def test_date_validation(fixture): field = DateField() obj = fixture.model_object field.bind('date_value', obj) with expected(DateConstraint): field.set_user_input('sdfdf') with expected(NoException): field.set_user_input('13 Dec') limit_date = datetime.date(2012, 11, 13) before_limit = '12 Nov 2012' after_limit = '14 Nov 2012' field = DateField(max_value=limit_date) with expected(MaxValueConstraint): field.set_user_input(after_limit) field = DateField(min_value=limit_date) with expected(MinValueConstraint): field.set_user_input(before_limit)
class MixChannelParent(CommonBaseTesting): channels = CommonBase.MultiChannelCreator(GenericBase, ('A', 'B', 'C')) ch_D = CommonBase.ChannelCreator(GenericBase, 'D') output_Z = CommonBase.ChannelCreator(GenericBase, 'Z') analog = CommonBase.MultiChannelCreator(GenericBase, list(range(0, 10)), prefix='an_', test=True)
.parametrize('executor_config,expected_quay_builder_unit_contents', [({'CONTAINER_RUNTIME': 'docker'}, '[Unit]\nWants=docker.service network-online.target\nAfter=docker.service network-online.target\nRequires=docker.service\n\n[Service]\nType=oneshot\nTimeoutStartSec=10800\nTimeoutStopSec=2000\n\nExecStartPre=/usr/bin/docker login -u quay_username -p quay_password quay.io\nExecStart=/usr/bin/docker run --user 0 --rm --net=host --privileged --env-file /root/overrides.list -v /var/run/docker.sock:/var/run/docker.sock -v /etc/pki/ca-trust-source/anchors:/certs --name quay-builder quay.io/example/quay-builder:worker_tag\nExecStopPost=/bin/sh -xc "/bin/sleep 120; /usr/bin/systemctl --no-block poweroff"\n\n[Install]\nWantedBy=multi-user.target'), ({'CONTAINER_RUNTIME': 'podman'}, '[Unit]\nWants=podman.service network-online.target\nAfter=podman.service network-online.target\nRequires=podman.service\n\n[Service]\nType=oneshot\nTimeoutStartSec=10800\nTimeoutStopSec=2000\n\nExecStartPre=/usr/bin/podman login -u quay_username -p quay_password quay.io\nExecStart=/usr/bin/podman run --user 0 --rm --privileged --env-file /root/overrides.list -v /var/run/podman/podman.sock:/var/run/podman/podman.sock -v /etc/pki/ca-trust-source/anchors:/certs -e CONTAINER_RUNTIME=podman -e DOCKER_HOST=unix:/var/run/podman/podman.sock --name quay-builder quay.io/example/quay-builder:worker_tag\nExecStopPost=/bin/sh -xc "/bin/sleep 120; /usr/bin/systemctl --no-block poweroff"\n\n[Install]\nWantedBy=multi-user.target'), ({'CONTAINER_RUNTIME': 'podman', 'MAX_LIFETIME_S': 7200, 'DEBUG': True}, '[Unit]\nWants=podman.service network-online.target\nAfter=podman.service network-online.target\nRequires=podman.service\n\n[Service]\nType=oneshot\nTimeoutStartSec=7200\nTimeoutStopSec=2000\n\nExecStartPre=/usr/bin/podman login -u quay_username -p quay_password quay.io\nExecStart=/usr/bin/podman run --user 0 --rm --privileged --env-file /root/overrides.list -v /var/run/podman/podman.sock:/var/run/podman/podman.sock -v /etc/pki/ca-trust-source/anchors:/certs -e CONTAINER_RUNTIME=podman -e DOCKER_HOST=unix:/var/run/podman/podman.sock --name quay-builder quay.io/example/quay-builder:worker_tag\n\n[Install]\nWantedBy=multi-user.target')]) def test_builder_cloud_config(executor_config, expected_quay_builder_unit_contents): executor_config = ({'CA_CERT': b'ca_cert', 'QUAY_PASSWORD': 'quay_password', 'QUAY_USERNAME': 'quay_username', 'WORKER_IMAGE': 'quay.io/example/quay-builder', 'WORKER_TAG': 'worker_tag'} | executor_config) executor = BuilderExecutor(executor_config, 'registry_hostname', 'manager_hostname') generated_cloud_config_json = executor.generate_cloud_config('token', 'build_uuid', 'manager_hostname') generated_cloud_config = json.loads(generated_cloud_config_json) quay_builder_unit = generated_cloud_config['systemd']['units'][0] assert (quay_builder_unit['name'] == 'quay-builder.service') assert quay_builder_unit['enabled'] assert (quay_builder_unit['contents'] == expected_quay_builder_unit_contents)
class BloomTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = None def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<unk>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', add_prefix_space=False, **kwargs): super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, **kwargs) pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if (pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space): pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type')) pre_tok_state['add_prefix_space'] = add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) self.add_prefix_space = add_prefix_space def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if (not (self.add_prefix_space or (not is_split_into_words))): raise Exception(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if (not (self.add_prefix_space or (not is_split_into_words))): raise Exception(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def _build_conversation_input_ids(self, conversation: 'Conversation') -> List[int]: input_ids = [] for (is_user, text) in conversation.iter_texts(): input_ids.extend((self.encode(text, add_special_tokens=False) + [self.eos_token_id])) if (len(input_ids) > self.model_max_length): input_ids = input_ids[(- self.model_max_length):] return input_ids
def _sampled_video_weight_inputs(non_zeros, radius: float, shape): (num_frames, num_classes) = shape weights = np.zeros((num_frames, num_classes)) (frame_indexes, class_indexes) = non_zeros for (frame_index, class_index) in zip(frame_indexes, class_indexes): weight_range = create_frame_range(frame_index, radius, num_frames) weights[(weight_range, class_index)] = 1.0 return weights
class SparseIndexParams(BaseModel, extra='forbid'): full_scan_threshold: Optional[int] = Field(default=None, description='We prefer a full scan search upto (excluding) this number of vectors. Note: this is number of vectors, not KiloBytes.') on_disk: Optional[bool] = Field(default=None, description='Store index on disk. If set to false, the index will be stored in RAM. Default: false')
def ratio_threshold(depth1, depth2, threshold): assert (threshold > 0.0) assert np.all((((np.isfinite(depth1) & np.isfinite(depth2)) & (depth1 >= 0)) & (depth2 >= 0))) log_diff = (np.log(depth1) - np.log(depth2)) num_pixels = float(log_diff.size) if (num_pixels == 0): return np.nan else: return (float(np.sum((np.absolute(log_diff) < np.log(threshold)))) / num_pixels)
def _moverc(src, dst, overwrite): exists = os.path.exists move = shutil.move removedirs = os.removedirs for (src_dir, dirnames, filenames) in os.walk(src): dst_dir = src_dir.replace(src, dst, 1) if exists(dst_dir): _shdorc(move, filenames, src_dir, dst_dir, overwrite) else: _shdo(move, src_dir, dst_dir, overwrite, dirnames) try: removedirs(src_dir) except Exception: pass
def prepare_lima_data_and_index(r_tokenizer, r_model): lima_dataset = load_dataset('GAIR/lima') idx2alignment_data = dict() alignment_prompt_list = [e['conversations'][0] for e in lima_dataset['train']] alignment_data_prompt_embeddings = compute_embeddings(r_tokenizer, r_model, alignment_prompt_list) for (i, e) in enumerate(lima_dataset['train']): context = '' prompt = e['conversations'][0] response = e['conversations'][1] idx2alignment_data[i] = {'context': context.strip(), 'prompt': prompt.strip(), 'response': response.strip()} return (idx2alignment_data, alignment_data_prompt_embeddings)
class CmdWield(Command): key = 'wield' help_category = 'combat' def func(self): if is_in_combat(self.caller): if (not is_turn(self.caller)): self.caller.msg('You can only do that on your turn.') return if (not self.args): self.caller.msg('Usage: wield <obj>') return weapon = self.caller.search(self.args, candidates=self.caller.contents) if (not weapon): return if (not weapon.is_typeclass('evennia.contrib.turnbattle.tb_equip.TBEWeapon')): self.caller.msg("That's not a weapon!") return if (not self.caller.db.wielded_weapon): self.caller.db.wielded_weapon = weapon self.caller.location.msg_contents(('%s wields %s.' % (self.caller, weapon))) else: old_weapon = self.caller.db.wielded_weapon self.caller.db.wielded_weapon = weapon self.caller.location.msg_contents(('%s lowers %s and wields %s.' % (self.caller, old_weapon, weapon))) if is_in_combat(self.caller): spend_action(self.caller, 1, action_name='wield')
class RequiredImgAssetConfigurationTests(TestCase): def setUp(self): self.sponsor_benefit = baker.make(SponsorBenefit, sponsorship__sponsor__name='Foo') self.config = baker.make(RequiredImgAssetConfiguration, related_to=AssetsRelatedTo.SPONSOR.value, internal_name='config_name') def test_get_benefit_feature_respecting_configuration(self): benefit_feature = self.config.get_benefit_feature(sponsor_benefit=self.sponsor_benefit) self.assertIsInstance(benefit_feature, RequiredImgAsset) self.assertEqual(benefit_feature.max_width, self.config.max_width) self.assertEqual(benefit_feature.min_width, self.config.min_width) self.assertEqual(benefit_feature.max_height, self.config.max_height) self.assertEqual(benefit_feature.min_height, self.config.min_height) def test_create_benefit_feature_and_sponsor_generic_img_assets(self): sponsor = self.sponsor_benefit.sponsorship.sponsor feature = self.config.create_benefit_feature(self.sponsor_benefit) asset = ImgAsset.objects.get() self.assertIsInstance(feature, RequiredImgAsset) self.assertTrue(feature.pk) self.assertEqual(self.config.internal_name, asset.internal_name) self.assertEqual(sponsor, asset.content_object) self.assertFalse(asset.image.name) def test_clone_configuration_for_new_sponsorship_benefit_without_due_date(self): sp_benefit = baker.make(SponsorshipBenefit, year=2023) (new_cfg, created) = self.config.clone(sp_benefit) self.assertTrue(created) self.assertEqual(2, RequiredImgAssetConfiguration.objects.count()) self.assertEqual(new_cfg.internal_name, f'{self.config.internal_name}_2023') self.assertEqual(new_cfg.max_width, self.config.max_width) self.assertEqual(new_cfg.min_width, self.config.min_width) self.assertEqual(new_cfg.max_height, self.config.max_height) self.assertEqual(new_cfg.min_height, self.config.min_height) self.assertEqual(new_cfg.due_date, new_cfg.due_date) self.assertEqual(sp_benefit, new_cfg.benefit) (repeated, created) = self.config.clone(sp_benefit) self.assertFalse(created) self.assertEqual(new_cfg.pk, repeated.pk)
.parametrize('version, index', [('1.2.3', (- 1)), ('1.2.3', (- 2)), ('1.2.3', slice((- 2), 2)), ('1.2.3', slice(2, (- 2))), ('1.2.3', slice((- 2), (- 2)))]) def test_version_info_should_throw_index_error_when_negative_index(version, index): version_info = Version.parse(version) with pytest.raises(IndexError, match='Version index cannot be negative'): version_info[index]
class Closer(object): def __init__(self, atexit_register=True): self.lock = threading.Lock() self.next_id = (- 1) self.closeables = weakref.WeakValueDictionary() if atexit_register: atexit.register(self.close) def generate_next_id(self): with self.lock: self.next_id += 1 return self.next_id def register(self, closeable): assert hasattr(closeable, 'close'), 'No close method for {}'.format(closeable) next_id = self.generate_next_id() self.closeables[next_id] = closeable return next_id def unregister(self, id): assert (id is not None) if (id in self.closeables): del self.closeables[id] def close(self): closeables = list(self.closeables.values()) for closeable in closeables: closeable.close()
class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.average = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.average = (self.sum / float(self.count))
def create_bar_from_face(bm, face, trans, trans_space, depth, vertical=False): dup = duplicate_face_translate_scale(bm, face, trans, trans_space).get('geom') edges = [filter_horizontal_edges, filter_vertical_edges][vertical](filter_geom(dup, BMEdge)) extrude_edges_to_depth(bm, edges, depth)
def word2vec(post, word_id_map, W): word_embedding = [] mask = [] for sentence in post: sen_embedding = [] seq_len = (len(sentence) - 1) mask_seq = np.zeros(args.sequence_len, dtype=np.float32) mask_seq[:len(sentence)] = 1.0 for (i, word) in enumerate(sentence): sen_embedding.append(word_id_map[word]) while (len(sen_embedding) < args.sequence_len): sen_embedding.append(0) word_embedding.append(copy.deepcopy(sen_embedding)) mask.append(copy.deepcopy(mask_seq)) return (word_embedding, mask)
class Signature(object): def from_der(der): d = get_bytes(der) if (len(d) < 8): raise ValueError('DER signature string is too short.') if (len(d) > 72): raise ValueError('DER signature string is too long.') if (d[0] != 48): raise ValueError('DER signature does not start with 0x30.') if (d[1] != len(d[2:])): raise ValueError('DER signature length incorrect.') total_length = d[1] if (d[2] != 2): raise ValueError('DER signature no 1st int marker.') if ((d[3] <= 0) or (d[3] > (total_length - 7))): raise ValueError('DER signature incorrect R length.') rlen = d[3] s_magic_index = (4 + rlen) rb = d[4:s_magic_index] if ((rb[0] & 128) != 0): raise ValueError('DER signature R is negative.') if ((len(rb) > 1) and (rb[0] == 0) and ((rb[1] & 128) != 128)): raise ValueError('DER signature R is excessively padded.') r = int.from_bytes(rb, 'big') if (d[s_magic_index] != 2): raise ValueError('DER signature no 2nd int marker.') slen_index = (s_magic_index + 1) slen = d[slen_index] if ((slen <= 0) or (slen > (len(d) - (slen_index + 1)))): raise ValueError('DER signature incorrect S length.') sb = d[(slen_index + 1):] if ((sb[0] & 128) != 0): raise ValueError('DER signature S is negative.') if ((len(sb) > 1) and (sb[0] == 0) and ((sb[1] & 128) != 128)): raise ValueError('DER signature S is excessively padded.') s = int.from_bytes(sb, 'big') if ((r < 1) or (r >= bitcoin_curve.n)): raise ValueError('DER signature R is not between 1 and N - 1.') if ((s < 1) or (s >= bitcoin_curve.n)): raise ValueError('DER signature S is not between 1 and N - 1.') return Signature(r, s) def from_base64(b64str): return Signature.from_bytes(base64.b64decode(b64str)) def from_bytes(b): if (len(b) != 64): raise ValueError('from_bytes: Signature length != 64.') r = int.from_bytes(b[0:32], 'big') s = int.from_bytes(b[32:64], 'big') return Signature(r, s) def from_hex(h): return Signature.from_bytes(bytes.fromhex(h)) def __init__(self, r, s, recovery_id=None): self.r = r self.s = s self.recovery_id = recovery_id def x(self): return self.r def y(self): return self.s def _canonicalize(self): rv = [] for x in [self.r, self.s]: bl = math.ceil((x.bit_length() / 8)) if (bl == 0): bl += 1 x_bytes = x.to_bytes(bl, 'big') if (x_bytes[0] & 128): x_bytes = (bytes([0]) + x_bytes) rv.append(x_bytes) return rv def to_der(self): (r, s) = self._canonicalize() total_length = ((6 + len(r)) + len(s)) der = (((bytes([48, (total_length - 2), 2, len(r)]) + r) + bytes([2, len(s)])) + s) return der def to_hex(self): return bytes_to_str(bytes(self)) def to_base64(self): return base64.b64encode(bytes(self)) def __bytes__(self): nbytes = math.ceil((bitcoin_curve.nlen / 8)) return (self.r.to_bytes(nbytes, 'big') + self.s.to_bytes(nbytes, 'big'))
class Dictionary(PymiereBaseObject): def __init__(self, pymiere_id=None): super(Dictionary, self).__init__(pymiere_id) def getGroups(self): return Array(**self._eval_on_this_object('getGroups()')) def getClasses(self): return Array(**self._eval_on_this_object('getClasses()')) def getClass(self): return _format_object_to_py(self._eval_on_this_object('getClass()')) def toXML(self, prefix): self._check_type(prefix, str, 'arg "prefix" of function "Dictionary.toXML"') return _format_object_to_py(self._eval_on_this_object('toXML({})'.format(_format_object_to_es(prefix))))
class QnliProcessor(DataProcessor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning) def get_example_from_tensor_dict(self, tensor_dict): return InputExample(tensor_dict['idx'].numpy(), tensor_dict['question'].numpy().decode('utf-8'), tensor_dict['sentence'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy())) def get_train_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train') def get_dev_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev') def get_test_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test') def get_labels(self): return ['entailment', 'not_entailment'] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): if (i == 0): continue guid = f'{set_type}-{line[0]}' text_a = line[1] text_b = line[2] label = (None if (set_type == 'test') else line[(- 1)]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples
class ExtraKeyTLV(TLV): typ = 8 __slots__ = ['appid', 'appdata'] def __init__(self, appid, appdata): super(ExtraKeyTLV, self).__init__() self.appid = appid self.appdata = appdata if (appdata is None): self.appdata = b'' def getPayload(self): return (self.appid + self.appdata) def parsePayload(cls, data): return cls(data[:4], data[4:])
def parse_rst(text: str) -> docutils.nodes.document: import docutils.nodes import docutils.parsers.rst import docutils.utils parser = docutils.parsers.rst.Parser() components = (docutils.parsers.rst.Parser,) settings = docutils.frontend.OptionParser(components=components).get_default_values() document = docutils.utils.new_document('<rst-doc>', settings=settings) parser.parse(text, document) return document
def split_images_by_identify(src_dir, dst_dir): plant_identifier = plantid.PlantIdentifier() filenames = glob.glob(os.path.join(src_dir, '*')) start_time = time.time() for (k, filename) in enumerate(filenames): image = khandy.imread_cv(filename) outputs = plant_identifier.identify(image, topk=1) if (outputs['status'] == 0): chinese_name = outputs['results'][0]['chinese_name'] latin_name = outputs['results'][0]['latin_name'] confidence = outputs['results'][0]['probability'] if (latin_name == ''): taxon_name = chinese_name else: taxon_name = '{} {}'.format(chinese_name, latin_name) if (confidence > 0.1): dst_subdir = os.path.join(dst_dir, taxon_name) os.makedirs(dst_subdir, exist_ok=True) dst_filename = os.path.join(dst_subdir, '{:.3f}_{}'.format(confidence, os.path.basename(filename))) shutil.move(filename, dst_filename) print('[{}/{}] Time: {:.3f}s {}'.format((k + 1), len(filenames), (time.time() - start_time), filename)) start_time = time.time()
class QMysqlServer(): def __init__(self, **kwargs): self.auto_disabled = None self.process = None self.uuid = (((('honeypotslogger' + '_') + __class__.__name__) + '_') + str(uuid4())[:8]) self.config = kwargs.get('config', '') if self.config: self.logs = setup_logger(__class__.__name__, self.uuid, self.config) set_local_vars(self, self.config) else: self.logs = setup_logger(__class__.__name__, self.uuid, None) self.ip = (kwargs.get('ip', None) or (hasattr(self, 'ip') and self.ip) or '0.0.0.0') self.port = ((kwargs.get('port', None) and int(kwargs.get('port', None))) or (hasattr(self, 'port') and self.port) or 3306) self.username = (kwargs.get('username', None) or (hasattr(self, 'username') and self.username) or 'test') self.password = (kwargs.get('password', None) or (hasattr(self, 'password') and self.password) or 'test') self.options = (kwargs.get('options', '') or (hasattr(self, 'options') and self.options) or getenv('HONEYPOTS_OPTIONS', '') or '') self.words = [self.password.encode()] disable_logger(1, tlog) def load_words(self): with open(self.file_name, 'r', encoding='utf-8') as file: self.words = file.read().splitlines() def greeting(self): base = ['\n', ('5.7.00' + '\x00'), '6\x00\x00\x00', ('' + '\x00'), 'y', '!', '\x02\x00', '\x0f\x81', '\x15', ('\x00' * 10), ('' + '\x00'), ('mysql_native_password' + '\x00')] payload_len = list(pack('<I', len(''.join(base)))) string_ = ((((chr(payload_len[0]) + chr(payload_len[1])) + chr(payload_len[2])) + '\x00') + ''.join(base)) string_ = bytes([ord(c) for c in string_]) return string_ def too_many(self): base = ['y', '\x10\x04', '#08004', 'Too many connections'] payload_len = list(pack('<I', len(''.join(base)))) string_ = ((((chr(payload_len[0]) + chr(payload_len[1])) + chr(payload_len[2])) + '\x02') + ''.join(base)) string_ = bytes([ord(c) for c in string_]) return string_ def access_denied(self): base = ['y', '\x15\x04', '#28000', 'Access denied..'] payload_len = list(pack('<I', len(''.join(base)))) string_ = ((((chr(payload_len[0]) + chr(payload_len[1])) + chr(payload_len[2])) + '\x02') + ''.join(base)) string_ = bytes([ord(c) for c in string_]) return string_ def parse_data(self, data): (username, password) = ('', '') with suppress(Exception): username_len = data[36:].find(b'\x00') username = data[36:].split(b'\x00')[0] password_len = data[((36 + username_len) + 1)] password = data[((36 + username_len) + 2):(((36 + username_len) + 2) + password_len)] rest_ = data[(((36 + username_len) + 2) + password_len):] if (len(password) == 20): return (username, password, True) return (username, password, False) def decode(self, hash): with suppress(Exception): for word in self.words: temp = word word = word.strip(b'\n') hash1 = sha1(word).digest() hash2 = sha1(hash1).digest() encrypted = [(a ^ b) for (a, b) in zip(hash1, sha1((b'' + hash2)).digest())] if (encrypted == list([i for i in hash])): return temp return None def mysql_server_main(self): _q_s = self class CustomMysqlProtocol(Protocol): _state = None def check_bytes(self, string): with suppress(Exception): if isinstance(string, bytes): return string.decode('utf-8', 'ignore') else: return str(string) return string def connectionMade(self): self._state = 1 self.transport.write(_q_s.greeting()) _q_s.logs.info({'server': 'mysql_server', 'action': 'connection', 'src_ip': self.transport.getPeer().host, 'src_port': self.transport.getPeer().port, 'dest_ip': _q_s.ip, 'dest_port': _q_s.port}) def dataReceived(self, data): try: if (self._state == 1): ret_access_denied = False (username, password, good) = _q_s.parse_data(data) username = self.check_bytes(username) status = 'failed' if good: if password: password_decoded = _q_s.decode(password) if ((password_decoded is not None) and (username == _q_s.username)): password = self.check_bytes(password_decoded) status = 'success' else: password = password.hex() ret_access_denied = True else: ret_access_denied = True password = ':'.join((hex(c)[2:] for c in data)) _q_s.logs.info({'server': 'mysql_server', 'action': 'login', 'status': status, 'src_ip': self.transport.getPeer().host, 'src_port': self.transport.getPeer().port, 'username': username, 'password': password, 'dest_ip': _q_s.ip, 'dest_port': _q_s.port}) if ret_access_denied: self.transport.write(_q_s.access_denied()) else: self.transport.write(_q_s.too_many()) else: self.transport.loseConnection() except BaseException: self.transport.write(_q_s.too_many()) self.transport.loseConnection() def connectionLost(self, reason): self._state = None factory = Factory() factory.protocol = CustomMysqlProtocol reactor.listenTCP(port=self.port, factory=factory, interface=self.ip) reactor.run() def run_server(self, process=False, auto=False): status = 'error' run = False if process: if (auto and (not self.auto_disabled)): port = get_free_port() if (port > 0): self.port = port run = True elif (self.close_port() and self.kill_server()): run = True if run: self.process = Popen(['python3', path.realpath(__file__), '--custom', '--ip', str(self.ip), '--port', str(self.port), '--username', str(self.username), '--password', str(self.password), '--options', str(self.options), '--config', str(self.config), '--uuid', str(self.uuid)]) if ((self.process.poll() is None) and check_if_server_is_running(self.uuid)): status = 'success' self.logs.info({'server': 'mysql_server', 'action': 'process', 'status': status, 'src_ip': self.ip, 'src_port': self.port, 'username': self.username, 'password': self.password, 'dest_ip': self.ip, 'dest_port': self.port}) if (status == 'success'): return True else: self.kill_server() return False else: self.mysql_server_main() def close_port(self): ret = close_port_wrapper('mysql_server', self.ip, self.port, self.logs) return ret def kill_server(self): ret = kill_server_wrapper('mysql_server', self.uuid, self.process) return ret def test_server(self, ip=None, port=None, username=None, password=None): with suppress(Exception): from mysql.connector import connect as mysqlconnect _ip = (ip or self.ip) _port = (port or self.port) _username = (username or self.username) _password = (password or self.password) cnx = mysqlconnect(user=_username, password=_password, host=_ip, port=_port, database='test', connect_timeout=1000)
def test_transform_direction__string(scalar_and_array): forward_transformer = Transformer.from_crs(4326, 3857) inverse_transformer = Transformer.from_crs(3857, 4326) assert_array_equal(inverse_transformer.transform(scalar_and_array((- 33)), scalar_and_array(24), direction='INVERSE'), forward_transformer.transform(scalar_and_array((- 33)), scalar_and_array(24), direction='FORWARD')) ident_transformer = Transformer.from_crs(4326, 3857) assert_array_equal(ident_transformer.transform(scalar_and_array((- 33)), scalar_and_array(24), direction='IDENT'), (scalar_and_array((- 33)), scalar_and_array(24)))
class TestDriverPsi4(QiskitNatureTestCase, TestDriver): ((not _optionals.HAS_PSI4), 'psi4 not available.') def setUp(self): super().setUp() driver = Psi4Driver(['molecule h2 {', ' 0 1', ' H 0.0 0.0 0.0', ' H 0.0 0.0 0.735', ' no_com', ' no_reorient', '}', '', 'set {', ' basis sto-3g', ' scf_type pk', '}']) self.driver_result = driver.run()
def test_explicit_timer_with_initial_text_true(capsys: pytest.CaptureFixture[str]) -> None: t = Timer(text=TIME_MESSAGE, initial_text=True) t.start() waste_time() t.stop() (stdout, stderr) = capsys.readouterr() assert RE_TIME_MESSAGE_INITIAL_TEXT_TRUE.match(stdout) assert (stdout.count('\n') == 2) assert (stderr == '')
class ReadFileRecordResponse(ModbusResponse): function_code = 20 _rtu_byte_count_pos = 2 def __init__(self, records=None, **kwargs): ModbusResponse.__init__(self, **kwargs) self.records = (records or []) def encode(self): total = sum(((record.response_length + 1) for record in self.records)) packet = struct.pack('B', total) for record in self.records: packet += struct.pack('>BB', 6, record.record_length) packet += record.record_data return packet def decode(self, data): (count, self.records) = (1, []) byte_count = int(data[0]) while (count < byte_count): (response_length, reference_type) = struct.unpack('>BB', data[count:(count + 2)]) count += (response_length + 1) record = FileRecord(response_length=response_length, record_data=data[((count - response_length) + 1):count]) if (reference_type == 6): self.records.append(record)
def cardiac_data(): data = {} for i in range(5): case_id = str((i + 1)).zfill(3) ct_arr = (np.ones((60, 128, 128)) * (- 1000)) mask_arr = np.zeros((60, 128, 128)) submask_arr = np.zeros((60, 128, 128)) ct_arr = insert_sphere(ct_arr, sp_radius=25, sp_centre=((30 + i), (64 + i), 64)) mask_arr = insert_sphere(mask_arr, sp_radius=25, sp_centre=((30 + i), (64 + i), 64)) submask_arr = insert_sphere(submask_arr, sp_radius=5, sp_centre=((30 + i), (60 + i), 60)) ct = sitk.GetImageFromArray(ct_arr) ct.SetSpacing(((0.9 + (i * 0.01)), (0.9 + (i * 0.01)), (2.5 + (i * 0.01)))) ct.SetOrigin((320, (- 52), 60)) mask = sitk.GetImageFromArray(mask_arr) mask.CopyInformation(ct) mask = sitk.Cast(mask, sitk.sitkUInt8) submask = sitk.GetImageFromArray(submask_arr) submask.CopyInformation(ct) submask = sitk.Cast(submask, sitk.sitkUInt8) data[case_id] = {'CT': ct, 'WHOLEHEART': mask, 'SUBSTRUCTURE': submask} return data
def maybe_resume_checkpoint(args, model, optimizer, scheduler, reporter, train_dl): if ((args.resume is not None) and Path(args.resume).is_file()): checkpoint = args.resume logging.info(f'Resume from the provided checkpoitn {args.resume}') else: ckpts = list(Path(args.exp_dir).glob('ep*.checkpoint')) if (len(ckpts) == 0): logging.info('Training from a randomly initialized model') return else: ckpts.sort(key=(lambda x: os.stat(str(x)).st_ctime)) checkpoint = str(ckpts[(- 1)]) logging.info(f'Automatically resume from the latest checkpoint {checkpoint}') state_dict = torch.load(checkpoint, map_location='cpu') FSDP.set_state_dict_type(model, StateDictType.FULL_STATE_DICT, FullStateDictConfig(rank0_only=False)) model.load_state_dict(state_dict['model']) optimizer.load_state_dict(FSDP.optim_state_dict_to_load(state_dict['optimizer'], model, optimizer)) scheduler.load_state_dict(state_dict['scheduler']) reporter.load_state_dict(state_dict['reporter']) train_dl.sampler.set_epoch((reporter.get_epoch() + 1)) train_dl.sampler.refresh() del state_dict
class TestFDDBBCD(TestFDDB): def eval(self): bcd = build_whole_network.DetectionNetworkBCD(cfgs=self.cfgs, is_training=False) all_boxes_r = self.eval_with_plac(img_dir=self.args.img_dir, det_net=bcd, image_ext=self.args.image_ext) imgs = os.listdir(self.args.img_dir) real_test_imgname_list = [i.split(self.args.image_ext)[0] for i in imgs] print((10 * '**')) print('rotation eval:') evaler = EVAL(self.cfgs) evaler.voc_evaluate_detections(all_boxes=all_boxes_r, test_imgid_list=real_test_imgname_list, test_annotation_path=self.args.test_annotation_path)
class SecondPipeline(FeatureSetPipeline): def __init__(self): super(SecondPipeline, self).__init__(source=Source(readers=[TableReader(id='t', database='db', table='table')], query=f'select * from t'), feature_set=FeatureSet(name='second', entity='entity', description='description', features=[Feature(name='feature1', description='test', dtype=DataType.STRING), Feature(name='feature2', description='another test', dtype=DataType.FLOAT)], keys=[KeyFeature(name='id', description='identifier', dtype=DataType.BIGINT)], timestamp=TimestampFeature()), sink=Sink(writers=[HistoricalFeatureStoreWriter(), OnlineFeatureStoreWriter()]))
class LogicExpressionASTVisitor(ast.NodeVisitor): def __init__(self, globals=dict()): self.arg_pos = {} self.iddefs = {} self.globals = globals super(ast.NodeVisitor).__init__() def generic_visit(self, node): print(ast.dump(node)) raise NotImplementedError def get_arg_pos(self, node): arg_pos = {} for (i, arg) in enumerate(node.args.args): arg_pos[arg.arg] = i return arg_pos def visit_FunctionDef(self, node): self.arg_pos = self.get_arg_pos(node) for b in node.body: body_tree = self.visit(b) return FunDef(self.arg_pos, self.iddefs, body_tree.as_bool()) def visit_Lambda(self, node): self.arg_pos = self.get_arg_pos(node) body_tree = self.visit(node.body) return FunDef(self.arg_pos, self.iddefs, body_tree.as_bool()) def visit_Return(self, node): return self.visit(node.value).as_bool() def visit_UnaryOp(self, node): supported = {ast.Not: (lambda opr: Not(opr.as_bool()))} op_func = supported[type(node.op)] opr = self.visit(node.operand) return op_func(opr) def visit_Name(self, node): if (node.id in self.iddefs): iddef = self.iddefs[node.id] return IdentifierRef(iddef) elif (node.id in self.arg_pos): arg_name = node.id arg_pos = self.arg_pos[node.id] return Arg(arg_name, arg_pos) else: source = astor.to_source(node).strip() value = eval(source, self.globals) print('eval:', source, '->', value) return Const(value) def visit_Index(self, node): return self.visit(node.value) def visit_Slice(self, node): return Slice(node.lower, node.step, node.upper) def visit_ExtSlice(self, node): return ExtSlice([self.visit(dim) for dim in node.dims]) def visit_Subscript(self, node): arg = self.visit(node.value) select = self.visit(node.slice) if isinstance(arg, Const): assert isinstance(select, Const) return Const(arg.value[select.value]) if isinstance(select, Const): return VarList(arg, [select.value]) elif isinstance(select, List): assert all([isinstance(e, Const) for e in select.elts]) return VarList(arg, [e.value for e in select.elts]) elif isinstance(select, ExtSlice): assert all([isinstance(e, (Const, Slice)) for e in select.slices]) return VarList(arg, select) else: return VarCond(arg, select) def visit_Assign(self, node): assert (len(node.targets) == 1) id = node.targets[0].id definition = self.visit(node.value) iddef = IdentifierDef(id, definition) assert (id not in self.iddefs) self.iddefs[id] = iddef return iddef def visit_List(self, node): elts = [self.visit(elt) for elt in node.elts] return List(elts) def visit_Call(self, node): if isinstance(node.func, ast.Name): fname = node.func.id if (fname == 'all'): return Forall(self.visit(node.args[0])) if (fname == 'any'): return Exists(self.visit(node.args[0])) elif isinstance(node.func, ast.Attribute): fname = node.func.attr args = [] caller = self.visit(node.func.value) if (not (isinstance(caller, Const) and (caller.value == torch))): args.append(caller) args.extend(map(self.visit, node.args)) if (fname == 'logical_not'): return Not(*args) if (fname == 'logical_and'): return And(*args) if (fname == 'logical_or'): return Or(*args) if (fname == 'all'): return ForallAlong(self.visit(node.func.value), (self.visit(node.args[0]) if (len(node.args) >= 1) else None)) if (fname == 'exists'): return ExistsAlong(self.visit(node.func.value), (self.visit(node.args[0]) if (len(node.args) >= 1) else None)) raise NotImplementedError(node) def visit_NameConstant(self, node): return Const(node.value) def visit_Num(self, node): return Const(node.n) def visit_Str(self, node): return Const(node.s) def visit_Constant(self, node): return Const(node.value) def visit_BoolOp(self, node): supported = {ast.And: (lambda left, right: And(left.as_bool(), right.as_bool())), ast.Or: (lambda left, right: Or(left.as_bool(), right.as_bool()))} op_func = supported[type(node.op)] trees = map(self.visit, node.values) return reduce(op_func, trees) def visit_Compare(self, node): supported = {ast.Eq: (lambda left, right: IsEq(left, right)), ast.NotEq: (lambda left, right: Not(IsEq(left, right))), ast.LtE: (lambda left, right: Implication(left.as_bool(), right.as_bool()))} assert len(node.ops) op_func = supported[type(node.ops[0])] ltree = self.visit(node.left) assert len(node.comparators) rtree = self.visit(node.comparators[0]) return op_func(ltree, rtree)
class TestBindCollector(CollectorTestCase): def setUp(self): config = get_collector_config('BindCollector', {'interval': 10}) self.collector = BindCollector(config, None) def test_import(self): self.assertTrue(BindCollector) (Collector, 'publish') def test_should_work_with_real_data(self, publish_mock): patch_urlopen = patch('urllib2.urlopen', Mock(return_value=self.getFixture('bind.xml'))) patch_urlopen.start() self.collector.collect() patch_urlopen.stop() metrics = {'view._default.resstat.Queryv4': 0.0, 'view._default.resstat.Queryv6': 0.0, 'view._default.resstat.Responsev4': 0.0, 'view._default.resstat.Responsev6': 0.0, 'view._default.resstat.NXDOMAIN': 0.0, 'view._default.resstat.SERVFAIL': 0.0, 'view._default.resstat.FORMERR': 0.0, 'view._default.resstat.OtherError': 0.0, 'view._default.resstat.EDNS0Fail': 0.0, 'view._default.resstat.Mismatch': 0.0, 'view._default.resstat.Truncated': 0.0, 'view._default.resstat.Lame': 0.0, 'view._default.resstat.Retry': 0.0, 'view._default.resstat.QueryAbort': 0.0, 'view._default.resstat.QuerySockFail': 0.0, 'view._default.resstat.QueryTimeout': 0.0, 'view._default.resstat.GlueFetchv4': 0.0, 'view._default.resstat.GlueFetchv6': 0.0, 'view._default.resstat.GlueFetchv4Fail': 0.0, 'view._default.resstat.GlueFetchv6Fail': 0.0, 'view._default.resstat.ValAttempt': 0.0, 'view._default.resstat.ValOk': 0.0, 'view._default.resstat.ValNegOk': 0.0, 'view._default.resstat.ValFail': 0.0, 'view._default.resstat.QryRTT10': 0.0, 'view._default.resstat.QryRTT100': 0.0, 'view._default.resstat.QryRTT500': 0.0, 'view._default.resstat.QryRTT800': 0.0, 'view._default.resstat.QryRTT1600': 0.0, 'view._default.resstat.QryRTT1600+': 0.0, 'requests.QUERY': 0.0, 'queries.A': 0.0, 'nsstat.Requestv4': 0.0, 'nsstat.Requestv6': 0.0, 'nsstat.ReqEdns0': 0.0, 'nsstat.ReqBadEDNSVer': 0.0, 'nsstat.ReqTSIG': 0.0, 'nsstat.ReqSIG0': 0.0, 'nsstat.ReqBadSIG': 0.0, 'nsstat.ReqTCP': 0.0, 'nsstat.AuthQryRej': 0.0, 'nsstat.RecQryRej': 0.0, 'nsstat.XfrRej': 0.0, 'nsstat.UpdateRej': 0.0, 'nsstat.Response': 0.0, 'nsstat.TruncatedResp': 0.0, 'nsstat.RespEDNS0': 0.0, 'nsstat.RespTSIG': 0.0, 'nsstat.RespSIG0': 0.0, 'nsstat.QrySuccess': 0.0, 'nsstat.QryAuthAns': 0.0, 'nsstat.QryNoauthAns': 0.0, 'nsstat.QryReferral': 0.0, 'nsstat.QryNxrrset': 0.0, 'nsstat.QrySERVFAIL': 0.0, 'nsstat.QryFORMERR': 0.0, 'nsstat.QryNXDOMAIN': 0.0, 'nsstat.QryRecursion': 0.0, 'nsstat.QryDuplicate': 0.0, 'nsstat.QryDropped': 0.0, 'nsstat.QryFailure': 0.0, 'nsstat.XfrReqDone': 0.0, 'nsstat.UpdateReqFwd': 0.0, 'nsstat.UpdateRespFwd': 0.0, 'nsstat.UpdateFwdFail': 0.0, 'nsstat.UpdateDone': 0.0, 'nsstat.UpdateFail': 0.0, 'nsstat.UpdateBadPrereq': 0.0, 'zonestat.NotifyOutv4': 0.0, 'zonestat.NotifyOutv6': 0.0, 'zonestat.NotifyInv4': 0.0, 'zonestat.NotifyInv6': 0.0, 'zonestat.NotifyRej': 0.0, 'zonestat.SOAOutv4': 0.0, 'zonestat.SOAOutv6': 0.0, 'zonestat.AXFRReqv4': 0.0, 'zonestat.AXFRReqv6': 0.0, 'zonestat.IXFRReqv4': 0.0, 'zonestat.IXFRReqv6': 0.0, 'zonestat.XfrSuccess': 0.0, 'zonestat.XfrFail': 0.0, 'sockstat.UDP4Open': 0.0, 'sockstat.UDP6Open': 0.0, 'sockstat.TCP4Open': 0.0, 'sockstat.TCP6Open': 0.0, 'sockstat.UnixOpen': 0.0, 'sockstat.UDP4OpenFail': 0.0, 'sockstat.UDP6OpenFail': 0.0, 'sockstat.TCP4OpenFail': 0.0, 'sockstat.TCP6OpenFail': 0.0, 'sockstat.UnixOpenFail': 0.0, 'sockstat.UDP4Close': 0.0, 'sockstat.UDP6Close': 0.0, 'sockstat.TCP4Close': 0.0, 'sockstat.TCP6Close': 0.0, 'sockstat.UnixClose': 0.0, 'sockstat.FDWatchClose': 0.0, 'sockstat.UDP4BindFail': 0.0, 'sockstat.UDP6BindFail': 0.0, 'sockstat.TCP4BindFail': 0.0, 'sockstat.TCP6BindFail': 0.0, 'sockstat.UnixBindFail': 0.0, 'sockstat.FdwatchBindFail': 0.0, 'sockstat.UDP4ConnFail': 0.0, 'sockstat.UDP6ConnFail': 0.0, 'sockstat.TCP4ConnFail': 0.0, 'sockstat.TCP6ConnFail': 0.0, 'sockstat.UnixConnFail': 0.0, 'sockstat.FDwatchConnFail': 0.0, 'sockstat.UDP4Conn': 0.0, 'sockstat.UDP6Conn': 0.0, 'sockstat.TCP4Conn': 0.0, 'sockstat.TCP6Conn': 0.0, 'sockstat.UnixConn': 0.0, 'sockstat.FDwatchConn': 0.0, 'sockstat.TCP4AcceptFail': 0.0, 'sockstat.TCP6AcceptFail': 0.0, 'sockstat.UnixAcceptFail': 0.0, 'sockstat.TCP4Accept': 0.0, 'sockstat.TCP6Accept': 0.0, 'sockstat.UnixAccept': 0.0, 'sockstat.UDP4SendErr': 0.0, 'sockstat.UDP6SendErr': 0.0, 'sockstat.TCP4SendErr': 0.0, 'sockstat.TCP6SendErr': 0.0, 'sockstat.UnixSendErr': 0.0, 'sockstat.FDwatchSendErr': 0.0, 'sockstat.UDP4RecvErr': 0.0, 'sockstat.UDP6RecvErr': 0.0, 'sockstat.TCP4RecvErr': 0.0, 'sockstat.TCP6RecvErr': 0.0, 'sockstat.UnixRecvErr': 0.0, 'sockstat.FDwatchRecvErr': 0.0} self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path']) self.assertPublishedMany(publish_mock, metrics)
_fixtures(WebFixture) def test_bookmarks(web_fixture): fixture = web_fixture user_interface = UserInterface(None, '/a_ui', {}, False, 'test_ui') view = UrlBoundView(user_interface, '/aview', 'A View title') bookmark = view.as_bookmark() assert (bookmark.href.path == '/a_ui/aview') assert (bookmark.description == 'A View title') assert (bookmark.base_path == '/a_ui') assert (bookmark.relative_path == '/aview') a = A.from_bookmark(fixture.view, bookmark) assert (str(a.href) == str(bookmark.href))
class PackagesSectionCamelCase_TestCase(TestCase): def setUp(self): super(PackagesSectionCamelCase_TestCase, self).setUp() ks_content = '%packages --instLangs=en\n%end\n' self._ks_path = mktempfile(ks_content) def runTest(self): (retval, _out) = ksvalidator.main([self._ks_path, '-v', 'F9']) self.assertEqual(retval, 0) (retval, _out) = ksvalidator.main([self._ks_path, '-v', 'F32']) self.assertNotEqual(retval, 0) def tearDown(self): super(PackagesSectionCamelCase_TestCase, self).tearDown() os.unlink(self._ks_path)
_grad() def main(): args = parse_args() accelerator = Accelerator(mixed_precision=args.mixed_precision) if accelerator.is_main_process: if (args.output_dir is not None): os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() if (args.seed is not None): set_seed(args.seed) weight_dtype = torch.float32 if (accelerator.mixed_precision == 'fp16'): weight_dtype = torch.float16 elif (accelerator.mixed_precision == 'bf16'): weight_dtype = torch.bfloat16 pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, torch_dtype=weight_dtype) pipe.set_progress_bar_config(disable=True) pipe.safety_checker = None pipe.requires_safety_checker = False model = StoryModel.from_pretrained(args) ckpt_name = 'pytorch_model.bin' model.load_state_dict(torch.load((Path(args.finetuned_model_path) / ckpt_name), map_location='cpu')) model = model.to(device=accelerator.device, dtype=weight_dtype) pipe.unet = model.unet if args.enable_xformers_memory_efficient_attention: pipe.unet.enable_xformers_memory_efficient_attention() pipe.text_encoder = model.text_encoder pipe.image_encoder = model.image_encoder pipe.postfuse_module = model.postfuse_module pipe.inference = types.MethodType(stable_diffusion_call_with_references_delayed_conditioning, pipe) del model pipe = pipe.to(accelerator.device) tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer', revision=args.revision) object_transforms = get_object_transforms(args) unique_token = '<|image|>' import json with open(f'{args.dataset_name}/split.json', 'r') as f: split = json.load(f) data = json.load(open(f'{args.dataset_name}/cleaned_annotations.json', 'r')) with open(f'{args.dataset_name}/following_cache3.pkl', 'rb') as f: following_cache = pickle.load(f) demo_dataset = EvalDataset(tokenizer=tokenizer, object_transforms=object_transforms, device=accelerator.device, max_num_objects=args.max_num_objects, root=args.dataset_name, ref_image=args.ref_image) for image_id in tqdm(split['test']): gen_images = [] image_id_first = image_id following_ids = [image_id] following_ids.extend(following_cache[image_id]) for (idx, image_id) in enumerate(following_ids): segments = data[image_id]['segments'] segments = sorted(segments, key=(lambda x: x['end'])) try: tokens = data[image_id]['tokens'] except: tokens = data[image_id]['captions'][random.randrange(len(data[image_id]['captions']))].split(' ') chars = ['fred', 'wilma', 'barney', 'betty', 'pebbles', 'mr slate', 'dino'] if (args.ref_image == 'same'): char_names = [] inserted_tokens = [] for token in tokens: inserted_tokens.append(token) if (token.lower() in chars): inserted_tokens.append(unique_token) char_names.append({'word': token.lower()}) elif (args.ref_image == 'ori'): char_names = [] inserted_tokens = tokens.copy() for segment in reversed(segments): if (segment['word'] in chars): char_names.append({'word': segment['word'].lower()}) else: end = segment['end'] inserted_tokens.insert(int(end), unique_token) char_names.append(segment) inserted_tokens_new = [] for (idx, token) in enumerate(inserted_tokens): inserted_tokens_new.append(token) if (token.lower() in chars): if (inserted_tokens[(idx + 1)] != unique_token): inserted_tokens_new.append(unique_token) char_names.append({'word': token.lower()}) inserted_tokens = inserted_tokens_new else: char_names = [] inserted_tokens = tokens prompt = ' '.join(inserted_tokens) prompt_text_only = prompt.replace(unique_token, '') os.makedirs(args.output_dir, exist_ok=True) batch = demo_dataset.get_data(prompt, char_names, image_id) input_ids = batch['input_ids'].to(accelerator.device) image_token_mask = batch['image_token_mask'].to(accelerator.device) all_object_pixel_values = batch['object_pixel_values'].unsqueeze(0).to(accelerator.device) num_objects = batch['num_objects'].unsqueeze(0).to(accelerator.device) all_object_pixel_values = all_object_pixel_values.to(dtype=weight_dtype, device=accelerator.device) object_pixel_values = all_object_pixel_values if (pipe.image_encoder is not None): object_embeds = pipe.image_encoder(object_pixel_values) else: object_embeds = None with torch.no_grad(): encoder_hidden_states = pipe.text_encoder(input_ids)[0] encoder_hidden_states_text_only = pipe._encode_prompt(prompt_text_only, accelerator.device, args.num_images_per_prompt, do_classifier_free_guidance=False) encoder_hidden_states = pipe.postfuse_module(encoder_hidden_states, object_embeds, image_token_mask, num_objects) cross_attention_kwargs = {} images = pipe.inference(prompt_embeds=encoder_hidden_states, num_inference_steps=args.inference_steps, height=args.generate_height, width=args.generate_width, guidance_scale=args.guidance_scale, num_images_per_prompt=args.num_images_per_prompt, cross_attention_kwargs=cross_attention_kwargs, prompt_embeds_text_only=encoder_hidden_states_text_only, start_merge_step=args.start_merge_step).images for instance_id in range(args.num_images_per_prompt): save_dir = f'{args.output_dir}/{image_id_first}' os.makedirs(save_dir, exist_ok=True) images[instance_id].save(os.path.join(save_dir, f'{image_id}.png')) gen_images.append(images[0])
def meteor_chained_flow_module(xyz, xyz_flowed, time, points, npoint, radius, nsample, mlp, mlp2, group_all, is_training, bn_decay, scope, module_type='ind', fps=True, bn=True, pooling='max', knn=False, use_xyz=True, use_nchw=False): data_format = ('NCHW' if use_nchw else 'NHWC') sample_idx = None batch_size = xyz.get_shape()[0].value ndataset = xyz.get_shape()[1].value nframes = xyz_flowed.get_shape()[2].value with tf.variable_scope(scope) as sc: if fps: sample_idx = farthest_point_sample(npoint, xyz) else: sample_idx = tf.tile(tf.expand_dims(tf.range(npoint, dtype=tf.int32), 0), [batch_size, 1]) new_xyz = gather_point(xyz, sample_idx) new_points = gather_point(points, sample_idx) xyz_flowed_reshaped = tf.reshape(xyz_flowed, [batch_size, ndataset, (- 1)]) new_xyz_flowed = gather_point(xyz_flowed_reshaped, sample_idx) new_xyz_flowed = tf.reshape(new_xyz_flowed, [batch_size, (- 1), nframes, 3]) new_time = gather_point(time, sample_idx) time_ = tf.reshape(time, [batch_size, 1, (- 1)]) new_time_ = tf.abs((new_time - time_)) radius_ = tf.gather(radius, tf.cast(new_time_, tf.int32)) time_squeeze = tf.squeeze(time) (idx, pts_cnt) = query_ball_point_var_rad_var_seed(radius_, nsample, xyz, time_squeeze, new_xyz_flowed) grouped_xyz = group_point(xyz, idx) grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]) if (points is not None): grouped_points = group_point(points, idx) grouped_time = group_point(time, idx) if use_xyz: if (module_type == 'ind'): new_points = tf.concat([grouped_xyz, grouped_time, grouped_points], axis=(- 1)) else: new_points_expand = tf.tile(tf.expand_dims(new_points, 2), [1, 1, nsample, 1]) new_points = tf.concat([grouped_xyz, grouped_time, grouped_points, new_points_expand], axis=(- 1)) else: new_points = grouped_points else: new_points = grouped_xyz for (i, num_out_channel) in enumerate(mlp): new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope=('conv%d' % i), bn_decay=bn_decay, data_format=data_format) new_points = tf.reduce_max(new_points, axis=[2], name='maxpool') return (new_xyz, None, new_time, new_points, idx)