code
stringlengths
281
23.7M
class ExampleModel(nn.Module): def __init__(self): super().__init__() self.param1 = nn.Parameter(torch.ones(1)) self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False) self.conv2 = nn.Conv2d(4, 2, kernel_size=1) self.bn = nn.BatchNorm2d(2) self.sub = SubModel() if OPS_AVAILABLE: from mmcv.ops import DeformConv2dPack self.dcn = DeformConv2dPack(3, 4, kernel_size=3, deformable_groups=1) def forward(self, x): return x
def removeByPossibleDsep(graph: Graph, independence_test_method: CIT, alpha: float, sep_sets: Dict[(Tuple[(int, int)], Set[int])]): def _contains_all(set_a: Set[Node], set_b: Set[Node]): for node_b in set_b: if (not set_a.__contains__(node_b)): return False return True edges = graph.get_graph_edges() for edge in edges: node_a = edge.get_node1() node_b = edge.get_node2() possibleDsep = getPossibleDsep(node_a, node_b, graph, (- 1)) gen = DepthChoiceGenerator(len(possibleDsep), len(possibleDsep)) choice = gen.next() while (choice is not None): origin_choice = choice choice = gen.next() if (len(origin_choice) < 2): continue sepset = tuple([possibleDsep[index] for index in origin_choice]) if _contains_all(set(graph.get_adjacent_nodes(node_a)), set(sepset)): continue if _contains_all(set(graph.get_adjacent_nodes(node_b)), set(sepset)): continue (X, Y) = (graph.get_node_map()[node_a], graph.get_node_map()[node_b]) condSet_index = tuple([graph.get_node_map()[possibleDsep[index]] for index in origin_choice]) p_value = independence_test_method(X, Y, condSet_index) independent = (p_value > alpha) if independent: graph.remove_edge(edge) sep_sets[(X, Y)] = set(condSet_index) break if graph.contains_edge(edge): possibleDsep = getPossibleDsep(node_b, node_a, graph, (- 1)) gen = DepthChoiceGenerator(len(possibleDsep), len(possibleDsep)) choice = gen.next() while (choice is not None): origin_choice = choice choice = gen.next() if (len(origin_choice) < 2): continue sepset = tuple([possibleDsep[index] for index in origin_choice]) if _contains_all(set(graph.get_adjacent_nodes(node_a)), set(sepset)): continue if _contains_all(set(graph.get_adjacent_nodes(node_b)), set(sepset)): continue (X, Y) = (graph.get_node_map()[node_a], graph.get_node_map()[node_b]) condSet_index = tuple([graph.get_node_map()[possibleDsep[index]] for index in origin_choice]) p_value = independence_test_method(X, Y, condSet_index) independent = (p_value > alpha) if independent: graph.remove_edge(edge) sep_sets[(X, Y)] = set(condSet_index) break
class Migration(migrations.Migration): dependencies = [('proposals', '0016_auto__0240')] operations = [migrations.AddField(model_name='proposalcomment', name='type', field=models.PositiveSmallIntegerField(default=0, choices=[(0, 'Unclassified'), (1, 'Second phase voting')]), preserve_default=True)]
def test_with_relative_markers(item_names_for): test_content = '\n import pytest\n\n def test_1():\n pass\n\n .order(before="test_1")\n .order(2)\n def test_2():\n pass\n\n .order(1)\n .order(before="test_1")\n def test_3():\n pass\n\n .order(-1)\n .order(-3)\n def test_4():\n pass\n\n .order(-2)\n .order(-4)\n def test_5():\n pass\n ' assert (item_names_for(test_content) == ['test_3[index=1]', 'test_2[index=2]', 'test_3[before=test_1]', 'test_2[before=test_1]', 'test_1', 'test_5[index=-4]', 'test_4[index=-3]', 'test_5[index=-2]', 'test_4[index=-1]'])
class EditInlineText(): async def edit_inline_text(self: 'pyrogram.Client', inline_message_id: str, text: str, parse_mode: Optional['enums.ParseMode']=None, disable_web_page_preview: bool=None, reply_markup: 'types.InlineKeyboardMarkup'=None) -> bool: unpacked = utils.unpack_inline_message_id(inline_message_id) dc_id = unpacked.dc_id session = (await get_session(self, dc_id)) return (await session.invoke(raw.functions.messages.EditInlineBotMessage(id=unpacked, no_webpage=(disable_web_page_preview or None), reply_markup=((await reply_markup.write(self)) if reply_markup else None), **(await self.parser.parse(text, parse_mode))), sleep_threshold=self.sleep_threshold))
def _parse_item(source, info): element = _parse_element(source, info) counts = _parse_quantifier(source, info) if (counts is not None): (min_count, max_count) = (counts.min_count, counts.max_count) if (element.is_empty() or (min_count == max_count == 1)): return element if source.match(u'?'): return LazyRepeat(element, min_count, max_count) elif source.match(u'+'): if counts.limited_quantifier: return GreedyRepeat(GreedyRepeat(element, min_count, max_count), 1, MAXREPEAT) else: return make_atomic(info, GreedyRepeat(element, min_count, max_count)) else: return GreedyRepeat(element, min_count, max_count) return element
def _get_ade20k_pairs(folder, mode='train'): img_paths = [] mask_paths = [] if (mode == 'train'): img_folder = os.path.join(folder, 'images/training') mask_folder = os.path.join(folder, 'annotations/training') else: img_folder = os.path.join(folder, 'images/validation') mask_folder = os.path.join(folder, 'annotations/validation') for filename in os.listdir(img_folder): (basename, _) = os.path.splitext(filename) if filename.endswith('.jpg'): imgpath = os.path.join(img_folder, filename) maskname = (basename + '.png') maskpath = os.path.join(mask_folder, maskname) if os.path.isfile(maskpath): img_paths.append(imgpath) mask_paths.append(maskpath) else: logging.info('cannot find the mask:', maskpath) return (img_paths, mask_paths)
def main(): global logger args = get_args() args = set_seed_logger(args) (device, n_gpu) = init_device(args, args.local_rank) tokenizer = ClipTokenizer() assert (args.task_type == 'retrieval') model = init_model(args, device, n_gpu, args.local_rank) assert ((args.freeze_layer_num <= 12) and (args.freeze_layer_num >= (- 1))) if (hasattr(model, 'clip') and (args.freeze_layer_num > (- 1))): for (name, param) in model.clip.named_parameters(): if ((name.find('ln_final.') == 0) or (name.find('text_projection') == 0) or (name.find('logit_scale') == 0) or (name.find('visual.ln_post.') == 0) or (name.find('visual.proj') == 0)): continue elif ((name.find('visual.transformer.resblocks.') == 0) or (name.find('transformer.resblocks.') == 0)): layer_num = int(name.split('.resblocks.')[1].split('.')[0]) if (layer_num >= args.freeze_layer_num): continue if ((args.linear_patch == '3d') and name.find('conv2.')): continue else: param.requires_grad = False assert (args.datatype in DATALOADER_DICT) assert ((DATALOADER_DICT[args.datatype]['test'] is not None) or (DATALOADER_DICT[args.datatype]['val'] is not None)) (test_dataloader, test_length) = (None, 0) if (DATALOADER_DICT[args.datatype]['test'] is not None): (test_dataloader, test_length) = DATALOADER_DICT[args.datatype]['test'](args, tokenizer) if (DATALOADER_DICT[args.datatype]['val'] is not None): (val_dataloader, val_length) = DATALOADER_DICT[args.datatype]['val'](args, tokenizer, subset='val') else: (val_dataloader, val_length) = (test_dataloader, test_length) if (test_dataloader is None): (test_dataloader, test_length) = (val_dataloader, val_length) if (args.local_rank == 0): logger.info('***** Running test *****') logger.info(' Num examples = %d', test_length) logger.info(' Batch size = %d', args.batch_size_val) logger.info(' Num steps = %d', len(test_dataloader)) logger.info('***** Running val *****') logger.info(' Num examples = %d', val_length) if args.do_train: (train_dataloader, train_length, train_sampler) = DATALOADER_DICT[args.datatype]['train'](args, tokenizer) num_train_optimization_steps = ((int(((len(train_dataloader) + args.gradient_accumulation_steps) - 1)) / args.gradient_accumulation_steps) * args.epochs) coef_lr = args.coef_lr (optimizer, scheduler, model) = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.local_rank, coef_lr=coef_lr) if (args.local_rank == 0): logger.info('***** Running training *****') logger.info(' Num examples = %d', train_length) logger.info(' Batch size = %d', args.batch_size) logger.info(' Num steps = %d', (num_train_optimization_steps * args.gradient_accumulation_steps)) best_score = 1e-05 best_output_model_file = 'None' resumed_epoch = 0 if args.resume_model: checkpoint = torch.load(args.resume_model, map_location='cpu') optimizer.load_state_dict(checkpoint['optimizer_state_dict']) resumed_epoch = (checkpoint['epoch'] + 1) resumed_loss = checkpoint['loss'] global_step = 0 for epoch in range(resumed_epoch, args.epochs): train_sampler.set_epoch(epoch) (tr_loss, global_step) = train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=args.local_rank) if (args.local_rank == 0): logger.info('Epoch %d/%s Finished, Train Loss: %f', (epoch + 1), args.epochs, tr_loss) output_model_file = save_model(epoch, args, model, optimizer, tr_loss, type_name='') logger.info('Eval on val dataset') R1 = eval_epoch(args, model, val_dataloader, device, n_gpu) if (best_score <= R1): best_score = R1 best_output_model_file = output_model_file logger.info('The best model is: {}, the R1 is: {:.4f}'.format(best_output_model_file, best_score)) if (args.local_rank == 0): model = load_model((- 1), args, n_gpu, device, model_file=best_output_model_file) eval_epoch(args, model, test_dataloader, device, n_gpu) elif args.do_eval: if (args.local_rank == 0): eval_epoch(args, model, test_dataloader, device, n_gpu)
class CommandHandler(BaseHandler[(Update, CCT)]): __slots__ = ('commands', 'filters', 'has_args') def __init__(self, command: SCT[str], callback: HandlerCallback[(Update, CCT, RT)], filters: Optional[filters_module.BaseFilter]=None, block: DVType[bool]=DEFAULT_TRUE, has_args: Optional[Union[(bool, int)]]=None): super().__init__(callback, block=block) if isinstance(command, str): commands = frozenset({command.lower()}) else: commands = frozenset((x.lower() for x in command)) for comm in commands: if (not re.match('^[\\da-z_]{1,32}$', comm)): raise ValueError(f'Command `{comm}` is not a valid bot command') self.commands: FrozenSet[str] = commands self.filters: filters_module.BaseFilter = (filters if (filters is not None) else filters_module.UpdateType.MESSAGES) self.has_args: Optional[Union[(bool, int)]] = has_args if (isinstance(self.has_args, int) and (self.has_args < 0)): raise ValueError('CommandHandler argument has_args cannot be a negative integer') def _check_correct_args(self, args: List[str]) -> Optional[bool]: if ((self.has_args is None) or ((self.has_args is True) and args) or ((self.has_args is False) and (not args)) or (isinstance(self.has_args, int) and (len(args) == self.has_args))): return True return False def check_update(self, update: object) -> Optional[Union[(bool, Tuple[(List[str], Optional[Union[(bool, FilterDataDict)]])])]]: if (isinstance(update, Update) and update.effective_message): message = update.effective_message if (message.entities and (message.entities[0].type == MessageEntity.BOT_COMMAND) and (message.entities[0].offset == 0) and message.text and message.get_bot()): command = message.text[1:message.entities[0].length] args = message.text.split()[1:] command_parts = command.split('') command_parts.append(message.get_bot().username) if (not ((command_parts[0].lower() in self.commands) and (command_parts[1].lower() == message.get_bot().username.lower()))): return None if (not self._check_correct_args(args)): return None filter_result = self.filters.check_update(update) if filter_result: return (args, filter_result) return False return None def collect_additional_context(self, context: CCT, update: Update, application: 'Application[Any, CCT, Any, Any, Any, Any]', check_result: Optional[Union[(bool, Tuple[(List[str], Optional[bool])])]]) -> None: if isinstance(check_result, tuple): context.args = check_result[0] if isinstance(check_result[1], dict): context.update(check_result[1])
def _run_segmentation_evaluation(all_predictions, all_labels, num_classes): intersection_counts_per_class = np.zeros(num_classes, dtype=np.float32) union_counts_per_class = np.zeros(num_classes, dtype=np.float32) all_targets = [segmentation_targets_from_change_labels(labels) for labels in all_labels] for (video_predictions, video_targets) in zip(all_predictions, all_targets): video_predictions_integers = video_predictions.argmax(axis=1) video_targets_integers = video_targets.argmax(axis=1) for class_index in range(num_classes): target_mask = (video_targets_integers == class_index) prediction_mask = (video_predictions_integers == class_index) intersection_count = np.sum(np.logical_and(target_mask, prediction_mask), dtype=np.float32) union_count = np.sum(np.logical_or(target_mask, prediction_mask), dtype=np.float32) intersection_counts_per_class[class_index] += intersection_count union_counts_per_class[class_index] += union_count per_class_iou = np.divide(intersection_counts_per_class, union_counts_per_class) mean_iou = float(np.mean(per_class_iou)) (f1_macro, f1_micro, f1_manual) = calculate_f1_scores(all_targets, all_predictions, num_classes) return (f1_macro, f1_micro, f1_manual, mean_iou, per_class_iou)
def train(is_training, logits, input_x, labels, sess, images_train, labels_train): global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False) val_step = tf.get_variable('val_step', [], initializer=tf.constant_initializer(0), trainable=False) loss_ = loss(logits, labels) predictions = tf.nn.softmax(logits) (top1_error, top5_error) = top_1_and_5(predictions, labels) ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) tf.add_to_collection(UPDATE_OPS_COLLECTION, ema.apply([loss_])) ema = tf.train.ExponentialMovingAverage(0.9, val_step) val_op = tf.group(val_step.assign_add(1), ema.apply([top1_error])) top1_error_avg = ema.average(top1_error) learning_rate = tf.placeholder(tf.float32, shape=[]) opt = tf.train.MomentumOptimizer(FLAGS.learning_rate, MOMENTUM) grads = opt.compute_gradients(loss_) apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) batchnorm_updates = tf.get_collection(UPDATE_OPS_COLLECTION) batchnorm_updates_op = tf.group(*batchnorm_updates) train_op = tf.group(apply_gradient_op, batchnorm_updates_op, name='train_op') saver = tf.train.Saver(tf.all_variables(), max_to_keep=((FLAGS.max_steps / FLAGS.test_stride) + 1)) testSteps = range(0, (FLAGS.max_steps + 1), FLAGS.test_stride)[1:] sess.run(tf.initialize_all_variables()) sess.graph.finalize() tf.train.start_queue_runners(sess=sess) lr = FLAGS.learning_rate for x in xrange((FLAGS.max_steps + 1)): if ((x == 32000) or (x == 48000) or (x == 56000)): lr = (lr * 0.1) start_time = time.time() step = sess.run(global_step) i = [train_op, loss_] o = sess.run(i, {is_training: True, learning_rate: lr}) loss_value = o[1] duration = (time.time() - start_time) assert (not np.isnan(loss_value)), 'Model diverged with loss = NaN' if np.isnan(loss_value): continue if ((step % 500) == 0): examples_per_sec = (FLAGS.batch_size / float(duration)) format_str = 'step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)' print((format_str % (step, loss_value, examples_per_sec, duration))) if ((step > 1) and (step in testSteps)): checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=global_step)
def test_drop_when_img(view, imgfilename3x3): mimedata = QtCore.QMimeData() mimedata.setImageData(QtGui.QImage(imgfilename3x3)) event = MagicMock() event.mimeData.return_value = mimedata event.position.return_value = QtCore.QPointF(10.0, 20.0) view.dropEvent(event) assert (len(view.scene.items()) == 1) assert (view.scene.items()[0].isSelected() is True)
.parametrize('arg, confval, used', [('webkit', 'webengine', usertypes.Backend.QtWebKit), (None, 'webkit', usertypes.Backend.QtWebKit)]) def test_get_backend(monkeypatch, args, config_stub, arg, confval, used): real_import = __import__ def fake_import(name, *args, **kwargs): if (name != 'qutebrowser.qt.webkit'): return real_import(name, *args, **kwargs) raise ImportError args.backend = arg config_stub.val.backend = confval monkeypatch.setattr(builtins, '__import__', fake_import) assert (configinit.get_backend(args) == used)
def register_model(fn): mod = sys.modules[fn.__module__] module_name_split = fn.__module__.split('.') module_name = (module_name_split[(- 1)] if len(module_name_split) else '') model_name = fn.__name__ if hasattr(mod, '__all__'): mod.__all__.append(model_name) else: mod.__all__ = [model_name] _model_entrypoints[model_name] = fn _model_to_module[model_name] = module_name _module_to_models[module_name].add(model_name) if (hasattr(mod, 'default_cfgs') and (model_name in mod.default_cfgs)): cfg = mod.default_cfgs[model_name] if (not isinstance(cfg, DefaultCfg)): assert isinstance(cfg, dict) cfg = PretrainedCfg(**cfg) cfg = DefaultCfg(tags=deque(['']), cfgs={'': cfg}) for (tag_idx, tag) in enumerate(cfg.tags): is_default = (tag_idx == 0) pretrained_cfg = cfg.cfgs[tag] if is_default: _model_pretrained_cfgs[model_name] = pretrained_cfg if pretrained_cfg.has_weights: _model_has_pretrained.add(model_name) if tag: model_name_tag = '.'.join([model_name, tag]) _model_pretrained_cfgs[model_name_tag] = pretrained_cfg if pretrained_cfg.has_weights: _model_has_pretrained.add(model_name_tag) _model_with_tags[model_name].append(model_name_tag) else: _model_with_tags[model_name].append(model_name) _model_default_cfgs[model_name] = cfg return fn
class ID2D1RenderTarget(ID2D1Resource, com.pIUnknown): _methods_ = [('CreateBitmap', com.STDMETHOD()), ('CreateBitmapFromWicBitmap', com.STDMETHOD()), ('CreateSharedBitmap', com.STDMETHOD()), ('CreateBitmapBrush', com.STDMETHOD()), ('CreateSolidColorBrush', com.STDMETHOD(POINTER(D2D1_COLOR_F), c_void_p, POINTER(ID2D1SolidColorBrush))), ('CreateGradientStopCollection', com.STDMETHOD()), ('CreateLinearGradientBrush', com.STDMETHOD()), ('CreateRadialGradientBrush', com.STDMETHOD()), ('CreateCompatibleRenderTarget', com.STDMETHOD()), ('CreateLayer', com.STDMETHOD()), ('CreateMesh', com.STDMETHOD()), ('DrawLine', com.STDMETHOD()), ('DrawRectangle', com.STDMETHOD()), ('FillRectangle', com.STDMETHOD()), ('DrawRoundedRectangle', com.STDMETHOD()), ('FillRoundedRectangle', com.STDMETHOD()), ('DrawEllipse', com.STDMETHOD()), ('FillEllipse', com.STDMETHOD()), ('DrawGeometry', com.STDMETHOD()), ('FillGeometry', com.STDMETHOD()), ('FillMesh', com.STDMETHOD()), ('FillOpacityMask', com.STDMETHOD()), ('DrawBitmap', com.STDMETHOD()), ('DrawText', com.STDMETHOD(c_wchar_p, UINT, IDWriteTextFormat, POINTER(D2D1_RECT_F), ID2D1Brush, D2D1_DRAW_TEXT_OPTIONS, DWRITE_MEASURING_MODE)), ('DrawTextLayout', com.METHOD(c_void, D2D_POINT_2F, IDWriteTextLayout, ID2D1Brush, UINT32)), ('DrawGlyphRun', com.METHOD(c_void, D2D_POINT_2F, POINTER(DWRITE_GLYPH_RUN), ID2D1Brush, UINT32)), ('SetTransform', com.METHOD(c_void)), ('GetTransform', com.STDMETHOD()), ('SetAntialiasMode', com.METHOD(c_void, D2D1_TEXT_ANTIALIAS_MODE)), ('GetAntialiasMode', com.STDMETHOD()), ('SetTextAntialiasMode', com.METHOD(c_void, D2D1_TEXT_ANTIALIAS_MODE)), ('GetTextAntialiasMode', com.STDMETHOD()), ('SetTextRenderingParams', com.STDMETHOD(IDWriteRenderingParams)), ('GetTextRenderingParams', com.STDMETHOD()), ('SetTags', com.STDMETHOD()), ('GetTags', com.STDMETHOD()), ('PushLayer', com.STDMETHOD()), ('PopLayer', com.STDMETHOD()), ('Flush', com.STDMETHOD(c_void_p, c_void_p)), ('SaveDrawingState', com.STDMETHOD()), ('RestoreDrawingState', com.STDMETHOD()), ('PushAxisAlignedClip', com.STDMETHOD()), ('PopAxisAlignedClip', com.STDMETHOD()), ('Clear', com.METHOD(c_void, POINTER(D2D1_COLOR_F))), ('BeginDraw', com.METHOD(c_void)), ('EndDraw', com.STDMETHOD(c_void_p, c_void_p)), ('GetPixelFormat', com.STDMETHOD()), ('SetDpi', com.STDMETHOD()), ('GetDpi', com.STDMETHOD()), ('GetSize', com.STDMETHOD()), ('GetPixelSize', com.STDMETHOD()), ('GetMaximumBitmapSize', com.STDMETHOD()), ('IsSupported', com.STDMETHOD())]
class TPlayer(TestCase): NAME = None def setUp(self): config.init() config.set('player', 'gst_pipeline', 'fakesink') config.set('settings', 'xine_driver', 'none') module = player.init_backend(self.NAME) lib = library.init() self.player = module.init(lib.librarian) source = PlaylistModel() source.set(FILES) self.events = [] def start_end_handler(player, song, *args): self.events.append((args[(- 1)], song)) self.player.connect('song-started', start_end_handler, 'started') self.player.connect('song-ended', start_end_handler, 'ended') self.player.setup(source, None, 0) self.signals = [] def handler(type_, *args): self.signals.append(type_) connect_obj(self.player, 'unpaused', handler, 'unpaused') connect_obj(self.player, 'paused', handler, 'paused') def _check_events(self): stack = [] old = self.events[:] for (type_, song) in self.events: if (type_ == 'started'): stack.append(song) elif (type_ == 'ended'): self.assertTrue((stack.pop((- 1)) is song), msg=old) self.assertFalse(stack, msg=old) def tearDown(self): self.player.destroy() self._check_events() del self.events del self.signals config.quit()
.parametrize('example', ('\n [project]\n name = "myproj"\n version = "1.2"\n\n [my-tool.that-disrespect.pep518]\n value = 42\n ',)) def test_ignore_unrelated_config(tmp_path, example): pyproject = (tmp_path / 'pyproject.toml') pyproject.write_text(cleandoc(example)) assert (read_configuration(pyproject) is not None)
class Geometry(): def _connect_unimplemented(self, other): raise AttributeError(('Cannot connect %s to %s' % (self.__class__, other.__class__))) def _intersect_unimplemented(self, other): raise AttributeError(('Cannot intersect %s and %s' % (self.__class__, other.__class__))) _intersect_point2 = _intersect_unimplemented _intersect_line2 = _intersect_unimplemented _connect_point2 = _connect_unimplemented _connect_line2 = _connect_unimplemented def intersect(self, other): raise NotImplementedError def connect(self, other): raise NotImplementedError def distance(self, other): c = self.connect(other) if c: return c.length return 0.0
_dataframe_method _alias(column='column_name') def convert_excel_date(df: pd.DataFrame, column_name: Hashable) -> pd.DataFrame: if (not is_numeric_dtype(df[column_name])): raise ValueError('There are non-numeric values in the column. All values must be numeric.') df[column_name] = (pd.TimedeltaIndex(df[column_name], unit='d') + dt.datetime(1899, 12, 30)) return df
class F16_TestCase(F12_TestCase): def runTest(self): F12_TestCase.runTest(self) if ('--type' not in self.optionList): self.assert_parse('autopart --nolvm', 'autopart --nolvm\n') self.assert_parse_error('autopart --nolvm=asdf') self.assert_parse_error('autopart --nolvm True') self.assert_parse_error('autopart --nolvm=1') self.assert_parse_error('autopart --nolvm 0')
_exempt _manager.tracked def vote(request: WSGIRequest) -> HttpResponse: key_param = request.POST.get('key') amount_param = request.POST.get('amount') if ((key_param is None) or (amount_param is None)): return HttpResponseBadRequest() key = int(key_param) amount = int(amount_param) if ((amount < (- 2)) or (amount > 2)): return HttpResponseBadRequest() if (storage.get('ip_checking') and (not user_manager.try_vote(user_manager.get_client_ip(request), key, amount))): return HttpResponseBadRequest('nice try') models.CurrentSong.objects.filter(queue_key=key).update(votes=(F('votes') + amount)) try: current_song = models.CurrentSong.objects.get() if ((current_song.queue_key == key) and (current_song.votes <= (- storage.get('downvotes_to_kick')))): _skip() except models.CurrentSong.DoesNotExist: pass removed = playback.queue.vote(key, amount, (- storage.get('downvotes_to_kick'))) if (removed is not None): if (not removed.manually_requested): playback.handle_autoplay((removed.external_url or removed.title)) else: playback.handle_autoplay() musiq.update_state() return HttpResponse()
class TestAssertError(TestNameCheckVisitorBase): _passes() def test(self) -> None: from pyanalyze.extensions import assert_error def f(x: int) -> None: pass def capybara() -> None: with assert_error(): f('x') with assert_error(): f(1)
class Filter(): def __init__(self, config=None, regex_list=None, logging_fp=None): if (not regex_list): regex_list = default_regex regex_list.append(kaomoji_regex_generator()) regex_list.insert(5, crazy_fans_regex_generator()) self.regex_list = [] for (name, pattern, func) in regex_list: pattern = re.compile(pattern) self.regex_list.append([name, pattern, func]) if (not config): config = filter_config self.min_len = config['min_len'] self.max_len = config['max_len'] self.max_text_freq = config['max_text_freq'] self.max_reread_patience = config['max_reread_patience'] self.balance_factor = config['balance_factor'] self.config = config if (not logging_fp): logging_fp = 'logging/filter.log' log_formatter = logging.Formatter('%(message)s') log_handler = logging.FileHandler(logging_fp, mode='w') log_handler.setFormatter(log_formatter) logger = logging.getLogger('filter') logger.addHandler(log_handler) logger.setLevel(level=logging.INFO) self.logger = logger def clean_pattern_in_sentence(self, sentence, pattern, task_name=None, replace=None): s = sentence find_count = 1 if (task_name == 'emoji'): s = emoji.demojize(s) while find_count: find_count = 0 try: obj = pattern.finditer(s, timeout=5) except TimeoutError: self.logger.info(f'regex_unit time_out {sentence}') return '' for match in reversed(list(obj)): find_count += 1 (start, end) = match.span() if replace: item = replace(match) if (item == DELETE_TEXT): return '' s = ((((s[:start] + (' ' if ((start > 0) and (s[(start - 1)] not in string.whitespace)) else '')) + item) + (' ' if ((end < len(s)) and (s[end] not in string.whitespace)) else '')) + s[end:]) else: s = ((s[:start] + (' ' if ((start > 0) and (end < len(s)) and (s[(start - 1)] not in string.whitespace) and (s[end] not in string.whitespace)) else '')) + s[end:]) return s def regex_unit(self, text): for (task_name, pattern, replace_func) in self.regex_list: old = text text = self.clean_pattern_in_sentence(text, pattern, task_name, replace_func) if (old != text): self.logger.info(f'regex_unit {task_name} {old} {text}') return text def clip_length_unit(self, text): if ((self.min_len <= len(self.pure_chinese_text(text))) and (len(text) <= self.max_len)): return text return '' def balance_length_unit(self, text1, text2): t1 = self.pure_chinese_text(text1) t2 = self.pure_chinese_text(text2) if ((len(t1) * self.balance_factor) < len(t2)): self.logger.info(f'balance_length_unit {text1} {text2}') return '' return text2 def language_unit(self, text, lan_model=False, min_freq=0.3): if lan_model: chinese_freq = 1.0 try: lan = detect(text) except: lan = 'unk' else: lan = 'freq' chinese_num = 0 for c in list(text): if self.is_chinese(c): chinese_num += 1 chinese_freq = ((1.0 * chinese_num) / (len(text) + 1)) if (((lan != 'zh-cn') and (lan != 'ko') and (lan != 'unk') and (lan != 'freq')) or (chinese_freq < min_freq)): if text: self.logger.info(f'language_unit: {chinese_freq} {text}') return '' return text def clip_text_freq_unit(self, text, text2freq): pure = self.pure_chinese_text(text) freq = text2freq.get(pure, 0) if (freq <= self.max_text_freq): return text roll = random.random() if (roll < ((1.0 * self.max_text_freq) / (freq + 1e-05))): return text self.logger.info(f'clip_text_freq_unit {text}') return '' def reread_machine_unit(self, text, text2freq): pure = self.pure_chinese_text(text) if (text2freq.get(pure, 0) >= self.max_reread_patience): self.logger.info(f'reread_machine_unit {text}') return '' return text def single_text_pipe(self, text): text = re.sub('\\s+', ' ', text).strip() if text: text = self.regex_unit(text) if text: text = self.clip_length_unit(text) if text: text = self.language_unit(text) return text def text2frequency(self, data, threshold): freq = {} for text in data: if (not text): continue pure = self.pure_chinese_text(text) freq[pure] = (freq.get(pure, 0) + 1) safe_text = set([text for (text, cnt) in freq.items() if (cnt <= threshold)]) for k in safe_text: del freq[k] return freq def pure_chinese_text(text): return re.sub('\\W', '', text).replace('_', '') def is_chinese(uchar): if ((uchar >= u'') and (uchar <= u'')): return True else: return False
def Give(opt, datapath): image_sourcepath = (datapath + '/images') image_classes = sorted([x for x in os.listdir(image_sourcepath) if ('._' not in x)], key=(lambda x: int(x.split('.')[0]))) total_conversion = {(int(x.split('.')[0]) - 1): x.split('.')[(- 1)] for x in image_classes} image_list = {(int(key.split('.')[0]) - 1): sorted([((((image_sourcepath + '/') + key) + '/') + x) for x in os.listdir(((image_sourcepath + '/') + key)) if ('._' not in x)]) for key in image_classes} image_list = [[(key, img_path) for img_path in image_list[key]] for key in image_list.keys()] image_list = [x for y in image_list for x in y] image_dict = {} for (key, img_path) in image_list: if (not (key in image_dict.keys())): image_dict[key] = [] image_dict[key].append(img_path) keys = sorted(list(image_dict.keys())) (train, test) = (keys[:(len(keys) // 2)], keys[(len(keys) // 2):]) if opt.use_tv_split: if (not opt.tv_split_by_samples): train_val_split = int((len(train) * opt.tv_split_perc)) (train, val) = (train[:train_val_split], train[train_val_split:]) train_image_dict = {i: image_dict[key] for (i, key) in enumerate(train)} val_image_dict = {i: image_dict[key] for (i, key) in enumerate(val)} test_image_dict = {i: image_dict[key] for (i, key) in enumerate(test)} else: val = train (train_image_dict, val_image_dict) = ({}, {}) for key in train: train_ixs = np.array(list(set(np.round(np.linspace(0, (len(image_dict[key]) - 1), int((len(image_dict[key]) * opt.tv_split_perc))))))).astype(int) val_ixs = np.array([x for x in range(len(image_dict[key])) if (x not in train_ixs)]) train_image_dict[key] = np.array(image_dict[key])[train_ixs] val_image_dict[key] = np.array(image_dict[key])[val_ixs] val_dataset = BaseDataset(val_image_dict, opt, is_validation=True) val_conversion = {i: total_conversion[key] for (i, key) in enumerate(val)} val_dataset.conversion = val_conversion else: train_image_dict = {key: image_dict[key] for key in train} val_image_dict = None val_dataset = None train_conversion = {i: total_conversion[key] for (i, key) in enumerate(train)} test_conversion = {i: total_conversion[key] for (i, key) in enumerate(test)} test_image_dict = {key: image_dict[key] for key in test} print('\nDataset Setup:\nUsing Train-Val Split: {0}\n#Classes: Train ({1}) | Val ({2}) | Test ({3})\n'.format(opt.use_tv_split, len(train_image_dict), (len(val_image_dict) if val_image_dict else 'X'), len(test_image_dict))) train_dataset = BaseDataset(train_image_dict, opt) test_dataset = BaseDataset(test_image_dict, opt, is_validation=True) eval_dataset = BaseDataset(train_image_dict, opt, is_validation=True) eval_train_dataset = BaseDataset(train_image_dict, opt, is_validation=False) train_dataset.conversion = train_conversion test_dataset.conversion = test_conversion eval_dataset.conversion = test_conversion eval_train_dataset.conversion = train_conversion return {'training': train_dataset, 'validation': val_dataset, 'testing': test_dataset, 'evaluation': eval_dataset, 'evaluation_train': eval_train_dataset}
class StripDiacriticals(FilterCheckButton): _label = _('Strip _diacritical marks') _section = 'rename' _key = 'diacriticals' _order = 1.2 def filter(self, original, filename): return ''.join(filter((lambda s: (not unicodedata.combining(s))), unicodedata.normalize('NFKD', filename)))
def profile(input, ops, n=10, device=None): results = [] logging.basicConfig(format='%(message)s', level=logging.INFO) device = (device or select_device()) print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}{'input':>24s}{'output':>24s}") for x in (input if isinstance(input, list) else [input]): x = x.to(device) x.requires_grad = True for m in (ops if isinstance(ops, list) else [ops]): m = (m.to(device) if hasattr(m, 'to') else m) m = (m.half() if (hasattr(m, 'half') and isinstance(x, torch.Tensor) and (x.dtype is torch.float16)) else m) (tf, tb, t) = (0.0, 0.0, [0.0, 0.0, 0.0]) try: flops = ((thop.profile(m, inputs=(x,), verbose=False)[0] / .0) * 2) except: flops = 0 try: for _ in range(n): t[0] = time_sync() y = m(x) t[1] = time_sync() try: _ = (sum([yi.sum() for yi in y]) if isinstance(y, list) else y).sum().backward() t[2] = time_sync() except Exception as e: print(e) t[2] = float('nan') tf += (((t[1] - t[0]) * 1000) / n) tb += (((t[2] - t[1]) * 1000) / n) mem = ((torch.cuda.memory_reserved() / .0) if torch.cuda.is_available() else 0) s_in = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list') s_out = (tuple(y.shape) if isinstance(y, torch.Tensor) else 'list') p = (sum(list((x.numel() for x in m.parameters()))) if isinstance(m, nn.Module) else 0) print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') results.append([p, flops, mem, tf, tb, s_in, s_out]) except Exception as e: print(e) results.append(None) torch.cuda.empty_cache() return results
def test_does_not_put_src_on_path(pytester: Pytester) -> None: ensure_file((pytester.path / 'src/nope/__init__.py')) pytester.makepyfile('import pytest\ndef test():\n with pytest.raises(ImportError):\n import nope\n') result = pytester.runpytest() assert (result.ret == ExitCode.OK)
_task('wsc') class WSCTask(FairseqTask): def add_args(parser): parser.add_argument('data', metavar='DIR', help='path to data directory; we load <split>.jsonl') parser.add_argument('--init-token', type=int, default=None, help='add token at the beginning of each batch item') def __init__(self, args, vocab): super().__init__(args) self.vocab = vocab self.mask = vocab.add_symbol('<mask>') self.bpe = encoders.build_bpe(args) self.tokenizer = encoders.build_tokenizer(args) if (args.bpe == 'gpt2'): self.leading_space = True self.trailing_space = False else: self.leading_space = False self.trailing_space = True def load_dictionary(cls, filename): dictionary = Dictionary.load(filename) dictionary.add_symbol('<mask>') return dictionary def setup_task(cls, args, **kwargs): assert (args.criterion == 'wsc'), 'Must set --criterion=wsc' vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt')) print('| dictionary: {} types'.format(len(vocab))) return cls(args, vocab) def binarize(self, s: str, append_eos: bool=False): if (self.tokenizer is not None): s = self.tokenizer.encode(s) if (self.bpe is not None): s = self.bpe.encode(s) tokens = self.vocab.encode_line(s, append_eos=append_eos, add_if_not_exist=False).long() if (self.args.init_token is not None): tokens = torch.cat([tokens.new([self.args.init_token]), tokens]) return tokens def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space): toks = self.binarize(((((prefix + leading_space) + txt) + trailing_space) + suffix), append_eos=True) mask = torch.zeros_like(toks, dtype=torch.uint8) mask_start = len(self.binarize(prefix)) mask_size = len(self.binarize((leading_space + txt))) mask[mask_start:(mask_start + mask_size)] = 1 return (toks, mask) def load_dataset(self, split, epoch=0, combine=False, data_path=None, return_only=False, **kwargs): if (data_path is None): data_path = os.path.join(self.args.data, (split + '.jsonl')) if (not os.path.exists(data_path)): raise FileNotFoundError('Cannot find data: {}'.format(data_path)) query_tokens = [] query_masks = [] query_lengths = [] candidate_tokens = [] candidate_masks = [] candidate_lengths = [] labels = [] for (sentence, pronoun_span, query, label) in wsc_utils.jsonl_iterator(data_path): prefix = sentence[:pronoun_span.start].text suffix = sentence[pronoun_span.end:].text_with_ws leading_space = (' ' if sentence[:pronoun_span.start].text_with_ws.endswith(' ') else '') trailing_space = (' ' if pronoun_span.text_with_ws.endswith(' ') else '') cand_spans = wsc_utils.filter_noun_chunks(wsc_utils.extended_noun_chunks(sentence), exclude_pronouns=True, exclude_query=query, exact_match=False) if (query is not None): (query_toks, query_mask) = self.binarize_with_mask(query, prefix, suffix, leading_space, trailing_space) query_len = len(query_toks) else: (query_toks, query_mask, query_len) = (None, None, 0) query_tokens.append(query_toks) query_masks.append(query_mask) query_lengths.append(query_len) (cand_toks, cand_masks) = ([], []) for cand_span in cand_spans: (toks, mask) = self.binarize_with_mask(cand_span.text, prefix, suffix, leading_space, trailing_space) cand_toks.append(toks) cand_masks.append(mask) cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad()) cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0) assert (cand_toks.size() == cand_masks.size()) candidate_tokens.append(cand_toks) candidate_masks.append(cand_masks) candidate_lengths.append(cand_toks.size(1)) labels.append(label) query_lengths = np.array(query_lengths) query_tokens = ListDataset(query_tokens, query_lengths) query_masks = ListDataset(query_masks, query_lengths) candidate_lengths = np.array(candidate_lengths) candidate_tokens = ListDataset(candidate_tokens, candidate_lengths) candidate_masks = ListDataset(candidate_masks, candidate_lengths) labels = ListDataset(labels, ([1] * len(labels))) dataset = {'id': IdDataset(), 'query_tokens': query_tokens, 'query_masks': query_masks, 'candidate_tokens': candidate_tokens, 'candidate_masks': candidate_masks, 'labels': labels, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(query_tokens, reduce=True)} nested_dataset = NestedDictionaryDataset(dataset, sizes=[query_lengths]) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(query_tokens)) dataset = SortDataset(nested_dataset, sort_order=[shuffle]) if return_only: return dataset self.datasets[split] = dataset return self.datasets[split] def build_dataset_for_inference(self, sample_json): with tempfile.NamedTemporaryFile(buffering=0) as h: h.write((json.dumps(sample_json) + '\n').encode('utf-8')) dataset = self.load_dataset('disambiguate_pronoun', data_path=h.name, return_only=True) return dataset def disambiguate_pronoun(self, model, sentence, use_cuda=False): sample_json = wsc_utils.convert_sentence_to_json(sentence) dataset = self.build_dataset_for_inference(sample_json) sample = dataset.collater([dataset[0]]) if use_cuda: sample = utils.move_to_cuda(sample) def get_masked_input(tokens, mask): masked_tokens = tokens.clone() masked_tokens[mask] = self.mask return masked_tokens def get_lprobs(tokens, mask): (logits, _) = model(src_tokens=get_masked_input(tokens, mask)) lprobs = F.log_softmax(logits, dim=(- 1), dtype=torch.float) scores = lprobs.gather(2, tokens.unsqueeze((- 1))).squeeze((- 1)) mask = mask.type_as(scores) scores = ((scores * mask).sum(dim=(- 1)) / mask.sum(dim=(- 1))) return scores cand_lprobs = get_lprobs(sample['candidate_tokens'][0], sample['candidate_masks'][0]) if (sample['query_tokens'][0] is not None): query_lprobs = get_lprobs(sample['query_tokens'][0].unsqueeze(0), sample['query_masks'][0].unsqueeze(0)) return ((query_lprobs >= cand_lprobs).all().item() == 1) else: best_idx = cand_lprobs.argmax().item() full_cand = sample['candidate_tokens'][0][best_idx] mask = sample['candidate_masks'][0][best_idx] toks = full_cand[mask] return self.bpe.decode(self.source_dictionary.string(toks)).strip() def source_dictionary(self): return self.vocab def target_dictionary(self): return self.vocab
def mcad_svc(app: AppDef, svc_name: str, namespace: str, service_port: str) -> 'V1Service': from kubernetes.client.models import V1Container, V1ContainerPort, V1EmptyDirVolumeSource, V1EnvVar, V1HostPathVolumeSource, V1ObjectMeta, V1PersistentVolumeClaimVolumeSource, V1Pod, V1PodSpec, V1ResourceRequirements, V1SecurityContext, V1Service, V1ServicePort, V1ServiceSpec, V1ServiceStatus, V1Volume, V1VolumeMount labels = object_labels(app, svc_name) return V1Service(api_version='v1', kind='Service', metadata=V1ObjectMeta(name=svc_name, namespace=namespace, labels=labels), spec=V1ServiceSpec(cluster_ip='None', publish_not_ready_addresses=True, ports=[V1ServicePort(protocol='TCP', port=int(service_port), target_port=int(service_port))], selector={LABEL_UNIQUE_NAME: svc_name}, session_affinity='None', type='ClusterIP'), status=V1ServiceStatus(load_balancer={}))
def _get_dp_sharding_perf(batch_sizes: List[int], world_size: int, local_world_size: int, input_lengths: List[float], grad_num_elem: int, emb_dim: int, input_data_type_size: float, table_data_type_size: float, num_poolings: List[float], device_bw: float, inter_host_bw: float, bwd_compute_multiplier: float, is_pooled: bool, is_weighted: bool=False) -> Perf: batch_inputs = sum([((x * y) * z) for (x, y, z) in zip(input_lengths, num_poolings, batch_sizes)]) batch_outputs = (sum([(x * y) for (x, y) in zip(num_poolings, batch_sizes)]) if is_pooled else batch_inputs) input_read_size = math.ceil((batch_inputs * input_data_type_size)) if is_weighted: input_read_size *= 2 embedding_lookup_size = ((batch_inputs * emb_dim) * table_data_type_size) output_write_size = ((batch_outputs * emb_dim) * table_data_type_size) table_size = (grad_num_elem * table_data_type_size) fwd_compute = (((input_read_size + embedding_lookup_size) + output_write_size) / device_bw) num_nodes = min((world_size / local_world_size), 2) all_reduce = (((table_size * ((2 * num_nodes) - 1)) / num_nodes) / (inter_host_bw * local_world_size)) if (world_size > (2 * local_world_size)): all_reduce *= 2 optimizer_kernels = ((table_size * DP_ELEMENTWISE_KERNELS_PERF_FACTOR) / device_bw) bwd_compute = (fwd_compute * bwd_compute_multiplier) bwd_grad_indice_weights_kernel = ((fwd_compute * WEIGHTED_KERNEL_MULTIPLIER) if is_weighted else 0) return Perf(fwd_compute=fwd_compute, fwd_comms=0, bwd_compute=(bwd_compute + bwd_grad_indice_weights_kernel), bwd_comms=(all_reduce + optimizer_kernels))
def _freeze_except_roi_heads_id(model): for v in model.parameters(): v.requires_grad = False try: for child in model.module.roi_heads.children(): if (child._get_name() == 'Sequential'): continue print('unfreezing', child._get_name()) for v in child.parameters(): v.requires_grad = True except: for child in model.roi_heads.children(): print('unfreezing', child._get_name()) for v in child.parameters(): v.requires_grad = True return model
class DatabaseConfig(): def __init__(self, db_url_scheme): self.db_url_scheme = db_url_scheme self.username = None self.password = None self.hostname = None self.port = None self.database_name = None def url(self): username_part = (self.username if self.username else '') password_part = ((':%s' % self.password) if self.password else '') hostname_part = (('%s' % self.hostname) if self.hostname else '') port_part = ((':%s' % self.port) if self.port else '') db_part = (('/%s' % self.database_name) if self.database_name else '') return ('%s://%s%s%s%s%s' % (self.db_url_scheme, username_part, password_part, hostname_part, port_part, db_part)) def hostname_default(self): return '' def database_name_default(self): return 'reahl' def port_default(self): return '' def ask_detail_questions(self): self.username = prompt('username ? ', default='') self.password = prompt('password (will be echoed to screen) ? ', default='') self.hostname = prompt('hostname ? ', default=self.hostname_default) self.port = prompt('port ? ', default=self.port_default) self.database_name = prompt('database name ? ', default=self.database_name_default)
def _upgrade_state_dict(state): if ('optimizer_history' not in state): state['optimizer_history'] = [{'criterion_name': 'CrossEntropyCriterion', 'best_loss': state['best_loss']}] state['last_optimizer_state'] = state['optimizer'] del state['optimizer'] del state['best_loss'] if (('epoch' in state) and ('extra_state' not in state)): state['extra_state'] = {'epoch': state['epoch'], 'batch_offset': state['batch_offset'], 'val_loss': state['val_loss']} del state['epoch'] del state['batch_offset'] del state['val_loss'] if ('optimizer' in state['optimizer_history'][(- 1)]): state['last_optimizer_state'] = state['optimizer_history'][(- 1)]['optimizer'] for optim_hist in state['optimizer_history']: del optim_hist['optimizer'] if ('optimizer_name' not in state['optimizer_history'][(- 1)]): state['optimizer_history'][(- 1)]['optimizer_name'] = 'FairseqNAG' if ('lr_scheduler_state' not in state['optimizer_history'][(- 1)]): state['optimizer_history'][(- 1)]['lr_scheduler_state'] = {'best': state['optimizer_history'][(- 1)]['best_loss']} del state['optimizer_history'][(- 1)]['best_loss'] if ('num_updates' not in state['optimizer_history'][(- 1)]): state['optimizer_history'][(- 1)]['num_updates'] = 0 if (('args' in state) and hasattr(state['args'], 'max_positions') and (not hasattr(state['args'], 'max_source_positions'))): state['args'].max_source_positions = state['args'].max_positions state['args'].max_target_positions = state['args'].max_positions if ('train_iterator' not in state['extra_state']): state['extra_state']['train_iterator'] = {'epoch': state['extra_state']['epoch'], 'iterations_in_epoch': state['extra_state'].get('batch_offset', 0)} if (('args' in state) and (state['args'] is not None)): if (not hasattr(state['args'], 'task')): state['args'].task = 'translation' if getattr(state['args'], 'raw_text', False): state['args'].dataset_impl = 'raw' elif getattr(state['args'], 'lazy_load', False): state['args'].dataset_impl = 'lazy' if (state['extra_state']['train_iterator'] is not None): state['extra_state']['train_iterator']['epoch'] = max(state['extra_state']['train_iterator'].get('epoch', 1), 1) if hasattr(state['args'], 'remove_bpe'): state['args'].post_process = state['args'].remove_bpe if hasattr(state['args'], 'min_lr'): state['args'].stop_min_lr = state['args'].min_lr del state['args'].min_lr if (hasattr(state['args'], 'criterion') and (state['args'].criterion in ['binary_cross_entropy', 'kd_binary_cross_entropy'])): state['args'].criterion = 'wav2vec' if (hasattr(state['args'], 'log_keys') and (state['args'].log_keys is None)): delattr(state['args'], 'log_keys') if (hasattr(state['args'], 'task') and (state['args'].task == 'speech_pretraining')): state['args'].task = 'audio_pretraining' if (hasattr(state['args'], 'arch') and (state['args'].arch == 'audio_cpc')): state['args'].arch = 'wav2vec' if (hasattr(state['args'], 'lr') and isinstance(state['args'].lr, float)): state['args'].lr = [state['args'].lr] if (hasattr(state['args'], 'data') and isinstance(state['args'].data, list) and (len(state['args'].data) > 0)): state['args'].data = state['args'].data[0] for key in ['static_teachers', 'static_teacher_weights', 'dynamic_teachers', 'dynamic_teacher_weights']: if (key in state['args']): delattr(state['args'], key) state['cfg'] = convert_namespace_to_omegaconf(state['args']) if (('cfg' in state) and (state['cfg'] is not None)): cfg = state['cfg'] with open_dict(cfg): if (('task' in cfg) and ('eval_wer_config' in cfg.task) and isinstance(cfg.task.eval_wer_config.print_alignment, bool)): cfg.task.eval_wer_config.print_alignment = 'hard' if (('generation' in cfg) and isinstance(cfg.generation.print_alignment, bool)): cfg.generation.print_alignment = 'hard' if (('model' in cfg) and ('w2v_args' in cfg.model) and (cfg.model.w2v_args is not None) and (hasattr(cfg.model.w2v_args, 'task') or ('task' in cfg.model.w2v_args)) and isinstance(cfg.model.w2v_args.task.eval_wer_config.print_alignment, bool)): cfg.model.w2v_args.task.eval_wer_config.print_alignment = 'hard' return state
class ReleaseFileResource(GenericResource): os = fields.ToOneField(OSResource, 'os') release = fields.ToOneField(ReleaseResource, 'release') class Meta(GenericResource.Meta): queryset = ReleaseFile.objects.all() resource_name = 'downloads/release_file' fields = ['name', 'slug', 'creator', 'last_modified_by', 'os', 'release', 'description', 'is_source', 'url', 'gpg_signature_file', 'md5_sum', 'filesize', 'download_button', 'sigstore_signature_file', 'sigstore_cert_file', 'sigstore_bundle_file'] filtering = {'name': ('exact',), 'slug': ('exact',), 'os': ALL_WITH_RELATIONS, 'release': ALL_WITH_RELATIONS, 'description': ('contains',)} abstract = False
def live_node_waiter(min_live_nodes: int, poll_interval_seconds: float=0.5) -> None: live_nodes = live_node_count() while (live_nodes < min_live_nodes): live_nodes = live_node_count() logger.info(f'Waiting for Live Nodes: {live_nodes}/{min_live_nodes}') time.sleep(poll_interval_seconds)
def get_mapping(src_dir='src'): src_files = glob.glob(os.path.join(src_dir, 'websockets/**/*.py'), recursive=True) test_files = glob.glob('tests/**/*.py', recursive=True) src_files = [os.path.relpath(src_file, src_dir) for src_file in sorted(src_files) if ('legacy' not in os.path.dirname(src_file)) if ((os.path.basename(src_file) != '__init__.py') and (os.path.basename(src_file) != '__main__.py') and (os.path.basename(src_file) != 'compatibility.py'))] test_files = [test_file for test_file in sorted(test_files) if (('legacy' not in os.path.dirname(test_file)) and (os.path.basename(test_file) != '__init__.py') and os.path.basename(test_file).startswith('test_'))] mapping = {} unmapped_test_files = set() for test_file in test_files: (dir_name, file_name) = os.path.split(test_file) assert dir_name.startswith('tests') assert file_name.startswith('test_') src_file = os.path.join(('websockets' + dir_name[len('tests'):]), file_name[len('test_'):]) if (src_file in src_files): mapping[src_file] = test_file else: unmapped_test_files.add(test_file) unmapped_src_files = (set(src_files) - set(mapping)) assert (unmapped_src_files == set(UNMAPPED_SRC_FILES)) assert (unmapped_test_files == set(UNMAPPED_TEST_FILES)) return mapping
def test_save_options(skip_qtbot, tmp_path): options = Options(tmp_path) window = MSRGameExportDialog(options, {}, 'MyHash', True, []) window.luma_radio.setChecked(True) window.save_options() game_options = options.options_for_game(RandovaniaGame.METROID_SAMUS_RETURNS) assert isinstance(game_options, MSRPerGameOptions) assert (game_options.target_platform == MSRModPlatform.LUMA)
def test_charclass_union() -> None: assert ((Charclass('ab') | Charclass('bc')) == Charclass('abc')) assert ((Charclass('ab') | (~ Charclass('bc'))) == (~ Charclass('c'))) assert (((~ Charclass('ab')) | Charclass('bc')) == (~ Charclass('a'))) assert (((~ Charclass('ab')) | (~ Charclass('bc'))) == (~ Charclass('b')))
class F29_RaidData(F25_RaidData): def __init__(self, *args, **kwargs): F25_RaidData.__init__(self, *args, **kwargs) self.luks_version = kwargs.get('luks_version', '') self.pbkdf = kwargs.get('pbkdf', '') self.pbkdf_memory = kwargs.get('pbkdf_memory', 0) self.pbkdf_time = kwargs.get('pbkdf_time', 0) self.pbkdf_iterations = kwargs.get('pbkdf_iterations', 0) def _getArgsAsStr(self): retval = F25_RaidData._getArgsAsStr(self) if (self.encrypted and self.luks_version): retval += (' --luks-version=%s' % self.luks_version) if (self.encrypted and self.pbkdf): retval += (' --pbkdf=%s' % self.pbkdf) if (self.encrypted and self.pbkdf_memory): retval += (' --pbkdf-memory=%s' % self.pbkdf_memory) if (self.encrypted and self.pbkdf_time): retval += (' --pbkdf-time=%s' % self.pbkdf_time) if (self.encrypted and self.pbkdf_iterations): retval += (' --pbkdf-iterations=%s' % self.pbkdf_iterations) return retval
class TestAdjlist(): def setup_method(self): self.knownW = io.open(examples.get_path('columbus.gal')).read() def test_round_trip_drop_islands_true(self): adjlist = self.knownW.to_adjlist(remove_symmetric=False, drop_islands=True).astype(int) w_from_adj = weights.W.from_adjlist(adjlist) np.testing.assert_allclose(w_from_adj.sparse.toarray(), self.knownW.sparse.toarray()) def test_round_trip_drop_islands_false(self): adjlist = self.knownW.to_adjlist(remove_symmetric=False, drop_islands=True).astype(int) w_from_adj = weights.W.from_adjlist(adjlist) np.testing.assert_allclose(w_from_adj.sparse.toarray(), self.knownW.sparse.toarray()) def test_filter(self): grid = lat2W(2, 2) alist = grid.to_adjlist(remove_symmetric=True, drop_islands=True) assert (len(alist) == 4) with pytest.raises(AssertionError): alist_neighbors = alist.groupby('focal').neighbor.apply(list).to_dict() all_ids = set(alist_neighbors.keys()).union(*map(set, alist_neighbors.values())) for idx in set(all_ids).difference(set(alist_neighbors.keys())): alist_neighbors[idx] = [] badgrid = weights.W(alist_neighbors) np.testing.assert_allclose(badgrid.sparse.toarray(), grid.sparse.toarray()) assert (set(alist.focal.unique()) == {0, 1, 2}) assert (set(alist.neighbor.unique()) == {1, 2, 3}) assert (alist.weight.unique().item() == 1) grid = lat2W(2, 2, id_type='string') alist = grid.to_adjlist(remove_symmetric=True, drop_islands=True) assert (len(alist) == 4) with pytest.raises(AssertionError): alist_neighbors = alist.groupby('focal').neighbor.apply(list).to_dict() all_ids = set(alist_neighbors.keys()).union(*map(set, alist_neighbors.values())) for idx in set(all_ids).difference(set(alist_neighbors.keys())): alist_neighbors[idx] = [] badgrid = weights.W(alist_neighbors) np.testing.assert_allclose(badgrid.sparse.toarray(), grid.sparse.toarray()) tuples = {tuple(t) for t in alist[['focal', 'neighbor']].values} full_alist = grid.to_adjlist(drop_islands=True) all_possible = {tuple(t) for t in full_alist[['focal', 'neighbor']].values} assert tuples.issubset(all_possible), 'the de-duped adjlist has links not in the duplicated adjlist.' complements = all_possible.difference(tuples) reversed_complements = {t[::(- 1)] for t in complements} assert (reversed_complements == tuples), 'the remaining links in the duplicated adjlist are not the reverse of the links in the deduplicated adjlist.' assert (alist.weight.unique().item() == 1) def apply_and_compare_columbus(self, col): import geopandas df = geopandas.read_file(examples.get_path('columbus.dbf')).head() w = weights.Queen.from_dataframe(df) alist = adj.adjlist_apply(df[col], W=w, to_adjlist_kws={'drop_islands': True}) right_hovals = alist.groupby('focal').att_focal.unique() assert (right_hovals == df[col]).all() allpairs = np.subtract.outer(df[col].values, df[col].values) flat_diffs = allpairs[w.sparse.toarray().astype(bool)] np.testing.assert_allclose(flat_diffs, alist['subtract'].values) return flat_diffs def test_apply(self): self.apply_and_compare_columbus('HOVAL') def test_mvapply(self): import geopandas df = geopandas.read_file(examples.get_path('columbus.dbf')).head() w = weights.Queen.from_dataframe(df) ssq = (lambda x_y: np.sum(((x_y[0] - x_y[1]) ** 2)).item()) ssq.__name__ = 'sum_of_squares' alist = adj.adjlist_apply(df[['HOVAL', 'CRIME', 'INC']], W=w, func=ssq, to_adjlist_kws={'drop_islands': True}) known_ssq = [1301., 3163., 1301., 499., 594., 3163., 499., 181., 436., 594., 181., 481., 436., 481.] np.testing.assert_allclose(alist.sum_of_squares.values, np.asarray(known_ssq), rtol=RTOL, atol=ATOL) def test_map(self): atts = ['HOVAL', 'CRIME', 'INC'] df = geopandas.read_file(examples.get_path('columbus.dbf')).head() w = weights.Queen.from_dataframe(df) (hoval, crime, inc) = list(map(self.apply_and_compare_columbus, atts)) mapped = adj.adjlist_map(df[atts], W=w, to_adjlist_kws={'drop_islands': True}) for (name, data) in zip(atts, (hoval, crime, inc), strict=True): np.testing.assert_allclose(data, mapped['_'.join(('subtract', name))].values) def test_sort(self): from libpysal import examples from libpysal.weights import Rook us = geopandas.read_file(examples.get_path('us48.shp')) w = Rook.from_dataframe(us.set_index('STATE_FIPS'), use_index=True) unsorted_al = w.to_adjlist(sort_joins=False) sorted_al = w.to_adjlist(sort_joins=True) sv = (['01'] * 4) sv.append('04') sv = np.array(sv) usv = np.array(['53', '53', '30', '30', '30']) np.testing.assert_array_equal(unsorted_al.focal.values[:5], usv) np.testing.assert_array_equal(sorted_al.focal.values[:5], sv) def test_ids(self): df = geopandas.read_file(examples.get_path('columbus.dbf')).head() df['my_id'] = range(3, (len(df) + 3)) w = weights.Queen.from_dataframe(df, ids='my_id') w_adj = w.to_adjlist(drop_islands=True) for i in range(3, 8): assert (i in w_adj.focal) assert (i in w_adj.neighbor) for i in w_adj.focal: assert (i in list(range(3, (len(df) + 3)))) for i in w_adj.neighbor: assert (i in list(range(3, (len(df) + 3)))) def test_str_ids(self): df = geopandas.read_file(examples.get_path('columbus.dbf')).head() snakes = ['mamba', 'boa', 'python', 'rattlesnake', 'cobra'] df['my_str_id'] = snakes w = weights.Queen.from_dataframe(df, ids='my_str_id') w_adj = w.to_adjlist(drop_islands=True) for i in snakes: (w_adj.focal == i).any() (w_adj.neighbor == i).any() for i in w_adj.focal: assert (i in snakes) for i in w_adj.neighbor: assert (i in snakes) def test_lat2w(self): w = lat2W(5, 5) manual_neighbors = w.to_adjlist().groupby('focal').neighbor.agg(list).to_dict() for (focal, neighbors) in w.neighbors.items(): assert (set(manual_neighbors[focal]) == set(neighbors))
def output_parent_function_json(rule_classification_data_bundle): dd = _convert_to_printable_dict(*rule_classification_data_bundle) data = {'rules_classification': []} for (parent, crimes) in dd.items(): data['rules_classification'].append({'parent': parent, 'crime': crimes}) with open('rules_classification.json', 'w') as outfile: json.dump(data, outfile)
def input_parser(user_input): m = re.match('(.+)/([lcru*()0-9]*)(f[0-9]*)?', user_input) if (m and (m.group(2) or m.group(3))): regex = m.group(1) flag = m.group(2) f = m.group(3) else: return [user_input, [['l', 1]], 0] try: rParan = re.compile('\\(([^())]*)\\)\\*([0-9]+)') while True: if (not rParan.search(flag)): break for r in rParan.finditer(flag): flag = flag.replace(r.group(0), (r.group(1) * int(r.group(2))), 1) for r in re.finditer('([lcru][0-9]*)\\*([0-9]+)', flag): flag = flag.replace(r.group(0), (r.group(1) * int(r.group(2))), 1) flag = re.findall('[lcru][0-9]*', flag) flag = list(map((lambda x: ([x[0], 1] if (len(x) == 1) else [x[0], int(x[1:])])), flag)) flag = (flag if flag else [['l', 1]]) f = (0 if (not f) else (1 if (len(f) == 1) else int(f[1:]))) except Exception: [regex, flag, f] = [user_input, [['l', 1]], 0] return [regex, flag, f]
class Decoder(nn.Module): def __init__(self, n_classes=2, n_filters=16, normalization=None, worst_case=False): super(Decoder, self).__init__() self.worst_case = worst_case self.block_five_up = UpsamplingDeconvBlock((n_filters * 16), (n_filters * 8), normalization=normalization) self.block_six = ConvBlock(3, (n_filters * 8), (n_filters * 8), normalization=normalization) self.block_six_up = UpsamplingDeconvBlock((n_filters * 8), (n_filters * 4), normalization=normalization) self.block_seven = ConvBlock(3, (n_filters * 4), (n_filters * 4), normalization=normalization) self.block_seven_up = UpsamplingDeconvBlock((n_filters * 4), (n_filters * 2), normalization=normalization) self.block_eight = ConvBlock(2, (n_filters * 2), (n_filters * 2), normalization=normalization) self.block_eight_up = UpsamplingDeconvBlock((n_filters * 2), n_filters, normalization=normalization) self.block_nine = ConvBlock(1, n_filters, n_filters, normalization=normalization) self.head = nn.Conv3d(n_filters, n_classes, 1, padding=0) self.grl_layer = WarmStartGradientReverseLayer(alpha=1.0, lo=0.0, hi=0.1, max_iters=500, auto_step=False) self.dropout = nn.Dropout3d(p=0.5, inplace=False) def decode(self, features, has_dropout): (x1, x2, x3, x4, x5) = features x5_up = self.block_five_up(x5) x5_up = (x5_up + x4) x6 = self.block_six(x5_up) x6_up = self.block_six_up(x6) x6_up = (x6_up + x3) x7 = self.block_seven(x6_up) x7_up = self.block_seven_up(x7) x7_up = (x7_up + x2) x8 = self.block_eight(x7_up) x8_up = self.block_eight_up(x8) x8_up = (x8_up + x1) x9 = self.block_nine(x8_up) if has_dropout: x9 = self.dropout(x9) out = self.head(x9) return out def decode_worst(self, features, has_dropout): (x1, x2, x3, x4, x5) = features x1 = self.grl_layer(x1) x2 = self.grl_layer(x2) x3 = self.grl_layer(x3) x4 = self.grl_layer(x4) x5 = self.grl_layer(x5) x5_up = self.block_five_up(x5) x5_up = (x5_up + x4) x6 = self.block_six(x5_up) x6_up = self.block_six_up(x6) x6_up = (x6_up + x3) x7 = self.block_seven(x6_up) x7_up = self.block_seven_up(x7) x7_up = (x7_up + x2) x8 = self.block_eight(x7_up) x8_up = self.block_eight_up(x8) x8_up = (x8_up + x1) x9 = self.block_nine(x8_up) if has_dropout: x9 = self.dropout(x9) out = self.head(x9) return out def decode_worst_grl_last(self, features, has_dropout): (x1, x2, x3, x4, x5) = features x5_up = self.block_five_up(x5) x5_up = (x5_up + x4) x6 = self.block_six(x5_up) x6_up = self.block_six_up(x6) x6_up = (x6_up + x3) x7 = self.block_seven(x6_up) x7_up = self.block_seven_up(x7) x7_up = (x7_up + x2) x8 = self.block_eight(x7_up) x8_up = self.block_eight_up(x8) x8_up = (x8_up + x1) x8_up = self.grl_layer(x8_up) x9 = self.block_nine(x8_up) if has_dropout: x9 = self.dropout(x9) out = self.head(x9) return out def forward(self, features, has_dropout): if self.worst_case: return self.decode_worst(features, has_dropout) else: return self.decode(features, has_dropout)
def test_n_slack_svm_as_crf_pickling(): iris = load_iris() (X, y) = (iris.data, iris.target) X_ = [(np.atleast_2d(x), np.empty((0, 2), dtype=np.int)) for x in X] Y = y.reshape((- 1), 1) (X_train, X_test, y_train, y_test) = train_test_split(X_, Y, random_state=1) (_, file_name) = mkstemp() pbl = GraphCRF(n_features=4, n_states=3, inference_method=inference_method) logger = SaveLogger(file_name) svm = NSlackSSVM(pbl, C=100, n_jobs=1, logger=logger) svm.fit(X_train, y_train) assert_less(0.97, svm.score(X_test, y_test)) assert_less(0.97, logger.load().score(X_test, y_test))
class ConvOffset2D(nn.Conv2d): def __init__(self, filters, out_multi_number, init_normal_stddev=0.01, **kwargs): self.filters = filters self._grid_param = None super(ConvOffset2D, self).__init__(self.filters, (self.filters * 2), 3, padding=1, bias=False, **kwargs) self.weight.data.copy_(self._init_weights(self.weight, init_normal_stddev)) self.out_multi_number = out_multi_number def forward(self, x): x_shape = x.size() offsets = super(ConvOffset2D, self).forward(x) offsets = self._to_bc_h_w_2(offsets, x_shape) x = self._to_bc_h_w(x, x_shape) x_offset = th_batch_map_offsets(x, offsets, grid=self._get_grid(self, x)) x_offset = self._to_b_c_h_w(x_offset, x_shape, self.out_multi_number) return x_offset def _get_grid(self, x): (batch_size, input_height, input_width) = (x.size(0), x.size(1), x.size(2)) (dtype, cuda) = (x.data.type(), x.data.is_cuda) if (self._grid_param == (batch_size, input_height, input_width, dtype, cuda)): return self._grid self._grid_param = (batch_size, input_height, input_width, dtype, cuda) self._grid = th_generate_grid(batch_size, input_height, input_width, dtype, cuda) return self._grid def _init_weights(weights, std): fan_out = weights.size(0) fan_in = ((weights.size(1) * weights.size(2)) * weights.size(3)) w = np.random.normal(0.0, std, (fan_out, fan_in)) return torch.from_numpy(w.reshape(weights.size())) def _to_bc_h_w_2(x, x_shape): x = x.contiguous().view((- 1), int(x_shape[2]), int(x_shape[3]), 2) return x def _to_bc_h_w(x, x_shape): x = x.contiguous().view((- 1), int(x_shape[2]), int(x_shape[3])) return x def _to_b_c_h_w(x, x_shape, out_multi_number): x = x.contiguous().view((- 1), int((x_shape[1] * out_multi_number)), int(x_shape[2]), int(x_shape[3])) return x
def get_f1(key, prediction): correct_by_relation = Counter() guessed_by_relation = Counter() gold_by_relation = Counter() for row in range(len(key)): gold = key[row] guess = prediction[row] if ((gold == 0) and (guess == 0)): pass elif ((gold == 0) and (guess != 0)): guessed_by_relation[guess] += 1 elif ((gold != 0) and (guess == 0)): gold_by_relation[gold] += 1 elif ((gold != 0) and (guess != 0)): guessed_by_relation[guess] += 1 gold_by_relation[gold] += 1 if (gold == guess): correct_by_relation[guess] += 1 prec_micro = 1.0 if (sum(guessed_by_relation.values()) > 0): prec_micro = (float(sum(correct_by_relation.values())) / float(sum(guessed_by_relation.values()))) recall_micro = 0.0 if (sum(gold_by_relation.values()) > 0): recall_micro = (float(sum(correct_by_relation.values())) / float(sum(gold_by_relation.values()))) f1_micro = 0.0 if ((prec_micro + recall_micro) > 0.0): f1_micro = (((2.0 * prec_micro) * recall_micro) / (prec_micro + recall_micro)) return (prec_micro, recall_micro, f1_micro)
def _iter_namespace(nsp): prefix = (nsp.__name__ + '.') for pkg in pkgutil.iter_modules(nsp.__path__, prefix): (yield pkg[1]) toc = set() for importer in pkgutil.iter_importers(nsp.__name__.partition('.')[0]): if hasattr(importer, 'toc'): toc |= importer.toc for name in toc: if name.startswith(prefix): (yield name)
class SaveEpochEndCallback(TrainerCallback): def __init__(self, save_epochs: int=None) -> None: super().__init__() self.save_epochs = save_epochs def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): if (self.save_epochs is not None): control.should_save = ((math.ceil(state.epoch) % self.save_epochs) == 0) else: control.should_save = True return control
class PluginManager(pluggy.PluginManager): def _hookexec(self, hook_name: str, methods: Sequence[HookImpl], kwargs: Mapping[(str, object)], firstresult: bool) -> Union[(object, List[object])]: try: return self._inner_hookexec(hook_name, methods, kwargs, firstresult) except Exception as e: log.warning(f'Failed to load hook {hook_name}: {e}', exc_info=True) return []
def compute_f1_and_exact(metrics, preds, labels, loss_key): m = collections.defaultdict(list) for (pred_str, label_str) in zip(preds, labels): (pred_list, label_list) = (pred_str.lower().split(' '), label_str.lower().split(' ')) m['{}/f1'.format(loss_key)].append(metric_util.compute_f1(label_str, pred_str)) for (pred_action, label_action) in zip(pred_list, label_list): m['{}/exact'.format(loss_key)].append(metric_util.compute_exact(label_action, pred_action)) m_averaged = {k: (sum(v) / len(v)) for (k, v) in m.items()} for (k, v) in m_averaged.items(): metrics[k].append(v)
def test_distributionrange(): dr = OSC.DistributionRange(1, OSC.Range(0, 3)) dr2 = OSC.DistributionRange(1, OSC.Range(0, 3)) dr3 = OSC.DistributionRange(2, OSC.Range(0, 3)) dr4 = OSC.DistributionRange(1, OSC.Range(0, 4)) prettyprint(dr) assert (dr == dr2) assert (dr != dr3) assert (dr != dr4) dr5 = OSC.DistributionRange.parse(dr.get_element()) assert (dr5 == dr) assert (version_validation('DistributionRange', dr, 1) == ValidationResponse.OK) assert (version_validation('DistributionRange', dr, 2) == ValidationResponse.OK)
class JciHitachiWindSwingableSwitchEntity(JciHitachiEntity, SwitchEntity): def __init__(self, thing, coordinator): super().__init__(thing, coordinator) def name(self): return f'{self._thing.name} Wind Swingable' def is_on(self): status = self.hass.data[DOMAIN][UPDATED_DATA].get(self._thing.name, None) if status: if (status.wind_swingable == 'disabled'): return False else: return True return None def unique_id(self): return f'{self._thing.gateway_mac_address}_wind_swingable_switch' def turn_on(self): _LOGGER.debug(f'Turn {self.name} on') self.put_queue(status_name='wind_swingable', status_str_value='enabled') self.update() def turn_off(self): _LOGGER.debug(f'Turn {self.name} off') self.put_queue(status_name='wind_swingable', status_str_value='disabled') self.update()
_module() class CityscapesDataset(CustomDataset): CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle') PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]] def __init__(self, img_suffix='_leftImg8bit.png', seg_map_suffix='_gtFine_labelTrainIds.png', **kwargs): super(CityscapesDataset, self).__init__(img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs) def _convert_to_label_id(result): if isinstance(result, str): result = np.load(result) import cityscapesscripts.helpers.labels as CSLabels result_copy = result.copy() for (trainId, label) in CSLabels.trainId2label.items(): result_copy[(result == trainId)] = label.id return result_copy def results2img(self, results, imgfile_prefix, to_label_id): mmcv.mkdir_or_exist(imgfile_prefix) result_files = [] prog_bar = mmcv.ProgressBar(len(self)) for idx in range(len(self)): result = results[idx] if to_label_id: result = self._convert_to_label_id(result) filename = self.img_infos[idx]['filename'] basename = osp.splitext(osp.basename(filename))[0] png_filename = osp.join(imgfile_prefix, f'{basename}.png') output = Image.fromarray(result.astype(np.uint8)).convert('P') import cityscapesscripts.helpers.labels as CSLabels palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8) if to_label_id: for (label_id, label) in CSLabels.id2label.items(): palette[label_id] = label.color else: palette = np.array(self.PALETTE, dtype=np.uint8) output.putpalette(palette) output.save(png_filename) result_files.append(png_filename) prog_bar.update() return result_files def format_results(self, results, imgfile_prefix=None, to_label_id=True): assert isinstance(results, list), 'results must be a list' assert (len(results) == len(self)), f'The length of results is not equal to the dataset len: {len(results)} != {len(self)}' if (imgfile_prefix is None): tmp_dir = tempfile.TemporaryDirectory() imgfile_prefix = tmp_dir.name else: tmp_dir = None result_files = self.results2img(results, imgfile_prefix, to_label_id) return (result_files, tmp_dir) def evaluate(self, results, metric='mIoU', logger=None, imgfile_prefix=None, efficient_test=False): eval_results = dict() metrics = (metric.copy() if isinstance(metric, list) else [metric]) if ('cityscapes' in metrics): eval_results.update(self._evaluate_cityscapes(results, logger, imgfile_prefix)) metrics.remove('cityscapes') if (len(metrics) > 0): eval_results.update(super(CityscapesDataset, self).evaluate(results, metrics, logger, efficient_test)) return eval_results def _evaluate_cityscapes(self, results, logger, imgfile_prefix): try: import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval except ImportError: raise ImportError('Please run "pip install cityscapesscripts" to install cityscapesscripts first.') msg = 'Evaluating in Cityscapes style' if (logger is None): msg = ('\n' + msg) print_log(msg, logger=logger) (result_files, tmp_dir) = self.format_results(results, imgfile_prefix) if (tmp_dir is None): result_dir = imgfile_prefix else: result_dir = tmp_dir.name eval_results = dict() print_log(f'Evaluating results under {result_dir} ...', logger=logger) CSEval.args.evalInstLevelScore = True CSEval.args.predictionPath = osp.abspath(result_dir) CSEval.args.evalPixelAccuracy = True CSEval.args.JSONOutput = False seg_map_list = [] pred_list = [] for seg_map in mmcv.scandir(self.ann_dir, 'gtFine_labelIds.png', recursive=True): seg_map_list.append(osp.join(self.ann_dir, seg_map)) pred_list.append(CSEval.getPrediction(CSEval.args, seg_map)) eval_results.update(CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args)) if (tmp_dir is not None): tmp_dir.cleanup() return eval_results
class ReplayBuffer(object): def __init__(self, state_dim, action_dim, max_size=int(1000000.0), device=torch.device('cuda')): self.max_size = max_size self.ptr = 0 self.size = 0 self.state = np.zeros((max_size, state_dim)) self.action = np.zeros((max_size, action_dim)) self.next_state = np.zeros((max_size, state_dim)) self.reward = np.zeros((max_size, 1)) self.not_done = np.zeros((max_size, 1)) self.device = device def add(self, state, action, next_state, reward, done): self.state[self.ptr] = state self.action[self.ptr] = action self.next_state[self.ptr] = next_state self.reward[self.ptr] = reward self.not_done[self.ptr] = (1.0 - done) self.ptr = ((self.ptr + 1) % self.max_size) self.size = min((self.size + 1), self.max_size) def sample(self, batch_size): ind = np.random.randint(0, self.size, size=batch_size) return (torch.FloatTensor(self.state[ind]).to(self.device), torch.FloatTensor(self.action[ind]).to(self.device), torch.FloatTensor(self.next_state[ind]).to(self.device), torch.FloatTensor(self.reward[ind]).to(self.device), torch.FloatTensor(self.not_done[ind]).to(self.device))
class TrotterStep(metaclass=abc.ABCMeta): def __init__(self, hamiltonian: Hamiltonian) -> None: self.hamiltonian = hamiltonian def prepare(self, qubits: Sequence[cirq.Qid], control_qubit: Optional[cirq.Qid]=None) -> cirq.OP_TREE: return () def trotter_step(self, qubits: Sequence[cirq.Qid], time: float, control_qubit: Optional[cirq.Qid]=None) -> cirq.OP_TREE: def step_qubit_permutation(self, qubits: Sequence[cirq.Qid], control_qubit: Optional[cirq.Qid]=None) -> Tuple[(Sequence[cirq.Qid], Optional[cirq.Qid])]: return (qubits, control_qubit) def finish(self, qubits: Sequence[cirq.Qid], n_steps: int, control_qubit: Optional[cirq.Qid]=None, omit_final_swaps: bool=False) -> cirq.OP_TREE: return ()
def relu_dropout(x, p=0, inplace=False, training=False): if ((not training) or (p == 0)): return (x.clamp_(min=0) if inplace else x.clamp(min=0)) mask = ((x < 0) | (torch.rand_like(x) > (1 - p))) return (x.masked_fill_(mask, 0).div_((1 - p)) if inplace else x.masked_fill(mask, 0).div((1 - p)))
def update_config_from_widgets(unscaled_config: UnscaledTrackerConfig, btrack_widget: btrack.napari.widgets.BtrackWidget) -> UnscaledTrackerConfig: config = unscaled_config.tracker_config motion_model = config.motion_model hypothesis_model = config.hypothesis_model config.update_method = btrack_widget.update_method.currentIndex() config.max_search_radius = btrack_widget.max_search_radius.value() motion_model.max_lost = btrack_widget.max_lost.value() motion_model.prob_not_assign = btrack_widget.prob_not_assign.value() config.enable_optimisation = (btrack_widget.enable_optimisation.checkState() == QtCore.Qt.CheckState.Checked) sigmas: Sigmas = unscaled_config.sigmas for matrix_name in sigmas: sigmas[matrix_name] = btrack_widget[f'{matrix_name}_sigma'].value() motion_model.accuracy = btrack_widget.accuracy.value() hypothesis_model.hypotheses = [hypothesis for (i, hypothesis) in enumerate(btrack.optimise.hypothesis.H_TYPES) if (btrack_widget['hypotheses'].item(i).checkState() == QtCore.Qt.CheckState.Checked)] for scaling_factor in btrack.napari.constants.HYPOTHESIS_SCALING_FACTORS: setattr(hypothesis_model, scaling_factor, btrack_widget[scaling_factor].value()) for threshold in btrack.napari.constants.HYPOTHESIS_THRESHOLDS: setattr(hypothesis_model, threshold, btrack_widget[threshold].value()) hypothesis_model.segmentation_miss_rate = btrack_widget.segmentation_miss_rate.value() hypothesis_model.relax = (btrack_widget.relax.checkState() == QtCore.Qt.CheckState.Checked) return unscaled_config
def create_dataset(dataset, config, min_scale=0.5): normalize = transforms.Normalize((0., 0.4578275, 0.), (0., 0., 0.)) transform_train = transforms.Compose([transforms.RandomResizedCrop(config['image_size'], scale=(min_scale, 1.0), interpolation=InterpolationMode.BICUBIC), transforms.RandomHorizontalFlip(), RandomAugment(2, 5, isPIL=True, augs=['Identity', 'AutoContrast', 'Brightness', 'Sharpness', 'Equalize', 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), transforms.ToTensor(), normalize]) transform_inputsize_224 = transforms.Compose([transforms.RandomResizedCrop(224, scale=(min_scale, 1.0), interpolation=InterpolationMode.BICUBIC), transforms.RandomHorizontalFlip(), RandomAugment(2, 5, isPIL=True, augs=['Identity', 'AutoContrast', 'Brightness', 'Sharpness', 'Equalize', 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), transforms.ToTensor(), normalize]) if (dataset == 'pretrain'): dataset = pretrain_dataset(config['train_file'], transform_train, class_num=config['class_num'], root=config['image_path_root']) return dataset elif (dataset == 'finetune'): dataset = finetune_dataset(config['train_file'], transform_train, transform_inputsize_224, class_num=config['class_num'], root=config['image_path_root']) return dataset
class Migration(migrations.Migration): dependencies = [('adserver', '0046_exclude_publishers')] operations = [migrations.AddField(model_name='advertisement', name='content', field=models.TextField(blank=True, help_text='For most ad types, the combined length of the headline, body, and call to action should be less than 100 characters.', null=True)), migrations.AddField(model_name='advertisement', name='cta', field=models.CharField(blank=True, help_text='An optional call to action displayed at the end of the ad usually in bold', max_length=200, null=True, verbose_name='Call to action')), migrations.AddField(model_name='advertisement', name='headline', field=models.CharField(blank=True, help_text='An optional headline at the end of the ad usually displayed in bold', max_length=200, null=True)), migrations.AlterField(model_name='advertisement', name='text', field=models.TextField(blank=True, help_text='For most ad types, the text should be less than 100 characters.', verbose_name='Text'))]
class CreatecloneTest(tf.test.TestCase): def setUp(self): np.random.seed(0) self._inputs = np.zeros((16, 4)) self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32) self._logdir = self.get_temp_dir() for i in range(16): j = int(((2 * self._labels[i]) + np.random.randint(0, 2))) self._inputs[(i, j)] = 1 def testCreateLogisticClassifier(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = LogisticClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) clone = clones[0] self.assertEqual(len(slim.get_variables()), 2) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(clone.outputs.op.name, 'LogisticClassifier/fully_connected/Sigmoid') self.assertEqual(clone.scope, '') self.assertDeviceEqual(clone.device, 'GPU:0') self.assertEqual(len(slim.losses.get_losses()), 1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(update_ops, []) def testCreateSingleclone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) clone = clones[0] self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(clone.outputs.op.name, 'BatchNormClassifier/fully_connected/Sigmoid') self.assertEqual(clone.scope, '') self.assertDeviceEqual(clone.device, 'GPU:0') self.assertEqual(len(slim.losses.get_losses()), 1) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2) def testCreateMulticlone(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) num_clones = 4 deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, 'CPU:0') self.assertDeviceEqual(v.value().device, 'CPU:0') self.assertEqual(len(clones), num_clones) for (i, clone) in enumerate(clones): self.assertEqual(clone.outputs.op.name, ('clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope) self.assertEqual(len(update_ops), 2) self.assertEqual(clone.scope, ('clone_%d/' % i)) self.assertDeviceEqual(clone.device, ('GPU:%d' % i)) def testCreateOnecloneWithPS(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(clones), 1) clone = clones[0] self.assertEqual(clone.outputs.op.name, 'BatchNormClassifier/fully_connected/Sigmoid') self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0') self.assertEqual(clone.scope, '') self.assertEqual(len(slim.get_variables()), 5) for v in slim.get_variables(): self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0') self.assertDeviceEqual(v.device, v.value().device) def testCreateMulticloneWithPS(self): g = tf.Graph() with g.as_default(): tf.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) model_fn = BatchNormClassifier clone_args = (tf_inputs, tf_labels) deploy_config = model_deploy.DeploymentConfig(num_clones=2, num_ps_tasks=2) self.assertEqual(slim.get_variables(), []) clones = model_deploy.create_clones(deploy_config, model_fn, clone_args) self.assertEqual(len(slim.get_variables()), 5) for (i, v) in enumerate(slim.get_variables()): t = (i % 2) self.assertDeviceEqual(v.device, ('/job:ps/task:%d/device:CPU:0' % t)) self.assertDeviceEqual(v.device, v.value().device) self.assertEqual(len(clones), 2) for (i, clone) in enumerate(clones): self.assertEqual(clone.outputs.op.name, ('clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)) self.assertEqual(clone.scope, ('clone_%d/' % i)) self.assertDeviceEqual(clone.device, ('/job:worker/device:GPU:%d' % i))
def collect_default_updates(outputs: Sequence[Variable], *, inputs: Optional[Sequence[Variable]]=None, must_be_shared: bool=True) -> Dict[(Variable, Variable)]: from pymc.distributions.distribution import SymbolicRandomVariable def find_default_update(clients, rng: Variable) -> Union[(None, Variable)]: rng_clients = clients.get(rng, None) if (not rng_clients): return rng if (len(rng_clients) > 1): warnings.warn(f'RNG Variable {rng} has multiple clients. This is likely an inconsistent random graph.', UserWarning) return None [client, _] = rng_clients[0] if (client == 'output'): return rng if isinstance(client.op, RandomVariable): next_rng = client.outputs[0] elif isinstance(client.op, SymbolicRandomVariable): next_rng = client.op.update(client).get(rng) if (next_rng is None): raise ValueError(f'No update found for at least one RNG used in SymbolicRandomVariable Op {client.op}') elif isinstance(client.op, Scan): rng_idx = client.inputs.index(rng) io_map = client.op.get_oinp_iinp_iout_oout_mappings()['outer_out_from_outer_inp'] out_idx = io_map.get(rng_idx, (- 1)) if (out_idx != (- 1)): next_rng = client.outputs[out_idx] else: raise ValueError(f'''No update found for at least one RNG used in Scan Op {client.op}. You can use `pytensorf.collect_default_updates` inside the Scan function to return updates automatically.''') else: return None return find_default_update(clients, next_rng) if (inputs is None): inputs = [] outputs = makeiter(outputs) fg = FunctionGraph(outputs=outputs, clone=False) clients = fg.clients rng_updates = {} for input_rng in (inp for inp in graph_inputs(outputs, blockers=inputs) if (((not must_be_shared) or isinstance(inp, SharedVariable)) and isinstance(inp.type, RandomType))): default_update = find_default_update(clients, input_rng) if getattr(input_rng, 'default_update', None): rng_updates[input_rng] = input_rng.default_update elif (default_update is not None): rng_updates[input_rng] = default_update return rng_updates
def build_stages(command): def run(ctx, **cli_params): out = [] for stage in command.stages: mapped_stage_params = {remap.old.lstrip('-'): cli_params[remap.new.lstrip('-')] for remap in stage.remap_params} mapped_stage_params.update(stage.params) inject_namespace = {k: v for (k, v) in cli_params.items() if (k in command.inject_values)} cmd = cli.get_command(ctx, stage.command) out.extend(ctx.invoke(cmd, **mapped_stage_params, inject_values=inject_namespace)) return out params = (command.arguments + command.options) return cli_tools.DocumentedCommand(name=command.name, params=params, callback=click.pass_context(run), short_help=command.short_help, help=command.help, section=getattr(command, 'section', None), hidden=command.hidden)
def parse_checkpoints(files): entries = [] for f in files: m = pt_regexp_epoch_based.fullmatch(f) if (m is not None): entries.append((int(m.group(1)), m.group(0))) else: m = pt_regexp_update_based.fullmatch(f) if (m is not None): entries.append((int(m.group(1)), m.group(0))) return entries
class ResNet(MetaModule): def __init__(self, depth, n_outputs): super(ResNet, self).__init__() assert (((depth - 2) % 6) == 0), 'depth should be 6n+2' n = ((depth - 2) // 6) block = (Bottleneck if (depth >= 44) else BasicBlock) self.inplanes = 16 self.conv1 = MetaConv2d(3, 16, kernel_size=3, padding=1, bias=False) self.bn1 = MetaBatchNorm2d(16) self.relu = nn.ReLU(inplace=True) self.layer1 = self._make_layer(block, 16, n) self.layer2 = self._make_layer(block, 32, n, stride=2) self.layer3 = self._make_layer(block, 64, n, stride=2) self.avgpool = nn.AvgPool2d(8) self.fc = MetaLinear((64 * block.expansion), n_outputs) for m in self.modules(): if isinstance(m, MetaConv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, MetaBatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(MetaConv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), MetaBatchNorm2d((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.avgpool(x) x = x.view(x.size(0), (- 1)) x = self.fc(x) return x
def fold_all_batch_norms_to_scale(sim: QuantizationSimModel) -> List[Tuple[(QcQuantizeWrapper, QcQuantizeWrapper)]]: assert (sim.model is not None) assert (sim.connected_graph is not None) model = sim.model connected_graph = sim.connected_graph quant_wrappers = {quant_wrapper._module_to_wrap: quant_wrapper for (_, quant_wrapper) in sim.quant_wrappers()} (conv_bn_pairs, bn_conv_pairs, _) = _find_all_batch_norms_to_fold(connected_graph) conv_bn_pairs = [(quant_wrappers[conv], quant_wrappers[bn]) for (conv, bn) in conv_bn_pairs] bn_conv_pairs = [(quant_wrappers[bn], quant_wrappers[conv]) for (bn, conv) in bn_conv_pairs] _fold_given_batch_norms(model, conv_bn_pairs, bn_conv_pairs) return (conv_bn_pairs + [(conv, bn) for (bn, conv) in bn_conv_pairs])
class TestSimpleStubModule(): (autouse=True, scope='class') def built(self, builder): builder('pyiexample', warningiserror=True) def test_integration(self, parse): example_file = parse('_build/html/autoapi/example/index.html') assert ('DoNotFindThis' not in example_file) foo_sig = example_file.find(id='example.Foo') assert foo_sig foo = foo_sig.parent assert foo.find(id='example.Foo.Meta') class_var = foo.find(id='example.Foo.another_class_var') class_var_docstring = class_var.parent.find('dd').contents[0].text assert (class_var_docstring.strip() == 'Another class var docstring') class_var = foo.find(id='example.Foo.class_var_without_value') class_var_docstring = class_var.parent.find('dd').contents[0].text assert (class_var_docstring.strip() == 'A class var without a value.') method_okay = foo.find(id='example.Foo.method_okay') assert method_okay method_multiline = foo.find(id='example.Foo.method_multiline') assert method_multiline method_without_docstring = foo.find(id='example.Foo.method_without_docstring') assert method_without_docstring init_args = foo.parent.find_next(class_='field-list') assert ('Set an attribute' in init_args.text)
class SportTest(unittest.TestCase): def setUp(self): self.ddbb = DDBB() self.ddbb.connect() self.ddbb.create_tables(add_default=False) def tearDown(self): self.ddbb.disconnect() self.ddbb.drop_tables() def test_id_should_default_to_none(self): sport = Sport() self.assertEqual(None, sport.id) def test_id_should_accept_integer(self): sport = Sport() sport.id = 1 self.assertEqual(1, sport.id) def test_id_should_accept_integer_string(self): sport = Sport() sport.id = '1' self.ddbb.session.add(sport) self.ddbb.session.commit() sport = self.ddbb.session.query(Sport).filter((Sport.id == 1)).one() self.assertEqual(1, sport.id) def test_id_should_not_accept_non_integer_string(self): sport = Sport() try: sport.id = 'test' self.ddbb.session.add(sport) self.ddbb.session.flush() except (IntegrityError, DataError, OperationalError): pass else: self.fail() def test_name_should_default_to_empty_string(self): sport = Sport() self.assertEqual(u'', sport.name) def test_name_should_accept_unicode_string(self): sport = Sport() sport.name = u'Unicycling' self.assertEqual(u'Unicycling', sport.name) ((sys.version_info > (3, 0)), 'All strings are unicode in Python 3') def test_name_should_not_accept_non_unicode_string(self): sport = Sport() sport.name = ('Juggling' + chr(255)) try: self.ddbb.session.add(sport) self.ddbb.session.flush() except (ProgrammingError, DataError, OperationalError): pass else: self.fail() def test_name_should_not_accept_none(self): sport = Sport() sport.name = None try: self.ddbb.session.add(sport) self.ddbb.session.commit() except (IntegrityError, OperationalError): pass else: self.fail() def test_met_should_default_to_None(self): sport = Sport() self.assertEqual(None, sport.met) def test_met_should_accept_float(self): sport = Sport() sport.met = 22.5 self.ddbb.session.add(sport) self.ddbb.session.flush() self.assertEqual(22.5, sport.met) def test_met_should_accept_float_string(self): sport = Sport() sport.name = 'test1' sport.met = '22.5' self.ddbb.session.add(sport) self.ddbb.session.commit() sport = self.ddbb.session.query(Sport).filter((Sport.id == 1)).one() self.assertEqual(22.5, sport.met) def test_met_should_not_accept_non_float_string(self): sport = Sport() sport.met = '22.5kg' try: self.ddbb.session.add(sport) self.ddbb.session.flush() except (ValueError, StatementError): pass else: self.fail() def test_met_should_not_accept_negative_value(self): if (self.ddbb.engine.name == 'mysql'): self.skipTest('Check constraints not available on Mysql') sport = Sport() sport.met = (- 1) try: self.ddbb.session.add(sport) self.ddbb.session.flush() except (IntegrityError, InterfaceError): pass else: self.fail() def test_met_should_accept_none(self): sport = Sport() sport.met = None self.assertEqual(None, sport.met) def test_weight_should_default_to_zero(self): sport = Sport() self.assertEqual(0, sport.weight) def test_weight_should_accept_float(self): sport = Sport() sport.weight = 22.5 self.assertEqual(22.5, sport.weight) def test_weight_should_accept_float_string(self): sport = Sport() sport.weight = '22.5' self.ddbb.session.add(sport) self.ddbb.session.commit() self.assertEqual(22.5, sport.weight) def test_weight_should_not_accept_non_float_string(self): sport = Sport() sport.weight = '22.5kg' try: self.ddbb.session.add(sport) self.ddbb.session.flush() except StatementError: pass else: self.fail() def test_weight_should_not_accept_negative_value(self): if (self.ddbb.engine.name == 'mysql'): self.skipTest('Check constraints not available on Mysql') sport = Sport() sport.weight = (- 1) try: self.ddbb.session.add(sport) self.ddbb.session.flush() except (IntegrityError, InterfaceError): pass else: self.fail() def test_weight_should_not_accept_none(self): sport = Sport() sport.weight = None try: self.ddbb.session.add(sport) self.ddbb.session.flush() except (IntegrityError, OperationalError): pass else: self.fail() def test_max_pace_should_default_to_none(self): sport = Sport() self.assertEqual(None, sport.max_pace) def test_max_pace_should_accept_integer(self): sport = Sport() sport.max_pace = 220 self.ddbb.session.add(sport) self.ddbb.session.flush() self.assertEqual(220, sport.max_pace) def test_max_pace_should_accept_integer_string(self): sport = Sport() sport.max_pace = '220' self.ddbb.session.add(sport) self.ddbb.session.commit() self.assertEqual(220, sport.max_pace) def test_max_pace_should_not_accept_non_integer_string(self): sport = Sport() sport.max_pace = '225s' try: self.ddbb.session.add(sport) self.ddbb.session.flush() except (ValueError, StatementError): pass else: self.fail() def test_max_pace_should_take_floor_of_float(self): sport = Sport() sport.max_pace = 220.6 self.ddbb.session.add(sport) self.ddbb.session.commit() sport = self.ddbb.session.query(Sport).filter((Sport.id == 1)).one() self.assertEqual(220, sport.max_pace) def test_max_pace_should_not_accept_negative_value(self): if (self.ddbb.engine.name == 'mysql'): self.skipTest('Check constraints not available on Mysql') sport = Sport() sport.max_pace = (- 1) try: self.ddbb.session.add(sport) self.ddbb.session.flush() except (IntegrityError, InterfaceError): pass else: self.fail() def test_max_pace_should_accept_none(self): sport = Sport() sport.max_pace = None self.assertEqual(None, sport.max_pace) def test_color_should_default_to_blue(self): sport = Sport() self.assertEqual(255, sport.color.rgb_val) def test_color_should_not_accept_none(self): sport = Sport() sport.color = None try: self.ddbb.session.add(sport) self.ddbb.session.commit() except StatementError: pass else: self.fail()
def _call_ll2cr(lons, lats, target_geo_def): new_src = SwathDefinition(lons, lats) (swath_points_in_grid, cols, rows) = ll2cr(new_src, target_geo_def) if (swath_points_in_grid == 0): return ((lons.shape, np.nan, lons.dtype), (lats.shape, np.nan, lats.dtype)) return np.stack([cols, rows], axis=0)
_inside_iff((lambda keys: jit.loop_unrolling_heuristic(keys, len(keys), values.UNROLLING_CUTOFF))) def _find_strategy_class(keys): if (not config.strategies): return ObjectHashmapStrategy.singleton if (len(keys) == 0): return EmptyHashmapStrategy.singleton single_class = type(keys[0]) for elem in keys: if (not isinstance(elem, single_class)): return ObjectHashmapStrategy.singleton if (single_class is values.W_Fixnum): return FixnumHashmapStrategy.singleton if (single_class is values.W_Symbol): return SymbolHashmapStrategy.singleton if (single_class is values_string.W_String): return StringHashmapStrategy.singleton if (single_class is values.W_ImmutableBytes): return ImmutableByteHashmapStrategy.singleton if (single_class is values.W_MutableBytes): return MutableByteHashmapStrategy.singleton return ObjectHashmapStrategy.singleton
class FEVEROUS(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo(description=_DESCRIPTION, features=datasets.Features({'id': datasets.Value('string'), 'statement': datasets.Value('string'), 'table': datasets.features.Sequence({'header': datasets.features.Sequence(datasets.Value('string')), 'rows': datasets.features.Sequence(datasets.features.Sequence(datasets.Value('string')))}), 'context': datasets.features.Sequence(datasets.Value('string')), 'label': datasets.Value('string')}), supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION) def _split_generators(self, dl_manager): downloaded_files = dl_manager.download_and_extract(_URLS) return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files['train'], 'database': os.path.join(downloaded_files['database'], 'feverous_wikiv1.db')}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'filepath': downloaded_files['dev'], 'database': os.path.join(downloaded_files['database'], 'feverous_wikiv1.db')})] def _generate_examples(self, filepath, database): con = sqlite3.connect(database) cur = con.cursor() with open(filepath, 'r') as f: count = (- 1) for (idx, line) in enumerate(f): example = json.loads(line) statement = example['claim'] label = example['label'] if is_table_involved(example): (tables, contexts) = retrieve_context(example, cur) count += 1 (yield (count, {'id': str(example['id']), 'statement': statement, 'table': tables, 'context': contexts, 'label': label}))
class FIDInceptionA(models.inception.InceptionA): def __init__(self, in_channels, pool_features): super(FIDInceptionA, self).__init__(in_channels, pool_features) def forward(self, x): branch1x1 = self.branch1x1(x) branch5x5 = self.branch5x5_1(x) branch5x5 = self.branch5x5_2(branch5x5) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] return torch.cat(outputs, 1)
class Effect5922(BaseEffect): runTime = 'early' type = ('projected', 'passive') def handler(fit, beacon, context, projectionRange, **kwargs): fit.modules.filteredItemMultiply((lambda mod: (mod.item.group.name == 'Stasis Web')), 'speedFactor', beacon.getModifiedItemAttr('stasisWebStrengthMultiplier'), stackingPenalties=True, penaltyGroup='postMul', **kwargs)
def compact_engines(stdscr, pos_y, pos_x, width, height, jetson): center_x = (pos_x + (width // 2)) map_eng = map_engines(jetson) size_map = len(map_eng) if (size_map > 0): stdscr.addstr(pos_y, (center_x - 7), ' [HW engines] ', curses.A_BOLD) size_map += 1 size_table = 26 for (gidx, row) in enumerate(map_eng): size_eng = ((size_table // len(row)) - 1) for (idx, (name, value)) in enumerate(row): if (name is not None): color = (curses.A_NORMAL if ('[OFF]' in value) else (NColors.green() | curses.A_BOLD)) plot_name_info(stdscr, ((pos_y + gidx) + 1), (((center_x - (size_table // 2)) + ((size_eng + 1) * idx)) + 1), name, value, color=color) return size_map
class UserPreferences(LoginRequiredMixin, SuccessMessageMixin, UpdateView): model = Author form_class = PreferencesForm template_name = 'dictionary/user/preferences/index.html' success_message = _('settings are saved, dear') success_url = reverse_lazy('user_preferences') def get_object(self, queryset=None): return self.request.user def form_invalid(self, form): notifications.error(self.request, gettext("we couldn't handle your request. try again later.")) return super().form_invalid(form)
class TestSessions(BaseTestCase): def test_sessions(self): available = [d for (d, _) in Session.iter_valid_session_classes()] missing = [d for (d, _) in Session.iter_session_classes_issues()] expected = [(InterfaceType.tcpip, 'INSTR'), (InterfaceType.tcpip, 'SOCKET')] exp_missing = [] usbs = [(InterfaceType.usb, 'INSTR'), (InterfaceType.usb, 'RAW')] try: import usb _ = usb.core.find() expected.extend(usbs) except Exception: exp_missing.extend(usbs) gpibs = [(InterfaceType.gpib, 'INSTR'), (InterfaceType.gpib, 'INTFC')] try: try: from gpib_ctypes import gpib from gpib_ctypes.Gpib import Gpib from gpib_ctypes.gpib.gpib import _lib as gpib_lib except ImportError: import gpib from Gpib import Gpib else: extra_funcs = [('ibcac', [ctypes.c_int, ctypes.c_int], ctypes.c_int), ('ibgts', [ctypes.c_int, ctypes.c_int], ctypes.c_int), ('ibpct', [ctypes.c_int], ctypes.c_int)] for (name, argtypes, restype) in extra_funcs: libfunction = gpib_lib[name] libfunction.argtypes = argtypes libfunction.restype = restype expected.extend(gpibs) except Exception: exp_missing.extend(gpibs) asrl = (InterfaceType.asrl, 'INSTR') try: import serial expected.append(asrl) except Exception: exp_missing.append(asrl) vicp = (InterfaceType.vicp, 'INSTR') try: import pyvicp expected.append(vicp) except Exception: exp_missing.append(vicp) assert (sorted(available) == sorted(expected)) assert (sorted(missing) == sorted(exp_missing))
def compute_K_c(Xsamples, x_minimum, num_of_obser, sigma, noise, l_vec): d = len(x_minimum) nob_nob = covNobeservations(Xsamples, num_of_obser, sigma, noise, l_vec) nob_grad = cov_nObser_maxGrad(Xsamples, x_minimum, num_of_obser, sigma, noise, l_vec) nob_off_dia = cov_nObser_off_maxHess(Xsamples, x_minimum, num_of_obser, sigma, l_vec) grad_grad = cov_maxGrad_maxGrad(x_minimum, sigma, l_vec) grad_off_hess = cov_maxGrad_off_maxHess(x_minimum, sigma, l_vec) nonDia_nonDia = cov_nonDiaHess_nonDiaHess(x_minimum, sigma, l_vec) first_row = np.concatenate((np.concatenate((nob_nob, nob_grad), axis=1), nob_off_dia), axis=1) second_row = np.concatenate((np.concatenate((nob_grad.T, grad_grad), axis=1), grad_off_hess), axis=1) third_row = np.concatenate((np.concatenate((nob_off_dia.T, grad_off_hess.T), axis=1), nonDia_nonDia), axis=1) result = np.vstack((first_row, second_row, third_row)) result = (result + ((sigma * (10 ** (- 10))) * np.eye(result.shape[0]))) return result
class PSPAtmosphericalCorrection(ModifierBase): def __call__(self, projectables, optional_datasets=None, **info): from pyspectral.atm_correction_ir import AtmosphericalCorrection band = projectables[0] if optional_datasets: satz = optional_datasets[0] else: satz = get_satellite_zenith_angle(band) satz = satz.data logger.info('Correction for limb cooling') corrector = AtmosphericalCorrection(band.attrs['platform_name'], band.attrs['sensor']) atm_corr = da.map_blocks(_call_mapped_correction, satz, band.data, corrector=corrector, band_name=band.attrs['name'], meta=np.array((), dtype=band.dtype)) proj = xr.DataArray(atm_corr, attrs=band.attrs, dims=band.dims, coords=band.coords) self.apply_modifier_info(band, proj) return proj
def obtain_fitness(disc_enc_type, smiles_here, selfies_here, oracle, discriminator, generation_index, max_molecules_len, device, generation_size, num_processors, beta, image_dir, data_dir, max_fitness_collector, impose_time_adapted_pen): if ((disc_enc_type == 'smiles') or (disc_enc_type == 'properties_rdkit')): (fitness_here, discriminator_predictions) = fitness(smiles_here, oracle, discriminator, disc_enc_type, generation_index, max_molecules_len, device, num_processors, beta, data_dir, max_fitness_collector, impose_time_adapted_pen) elif (disc_enc_type == 'selfies'): (fitness_here, discriminator_predictions) = fitness(selfies_here, oracle, discriminator, disc_enc_type, generation_index, max_molecules_len, device, num_processors, beta, data_dir, max_fitness_collector, impose_time_adapted_pen) fitness_here = fitness_here.reshape((generation_size,)) (order, fitness_ordered, smiles_ordered, selfies_ordered) = order_based_on_fitness(fitness_here, smiles_here, selfies_here) return (fitness_here, order, fitness_ordered, smiles_ordered, selfies_ordered)
def test_transfer_statechange_operators(): block_hash = factories.make_transaction_hash() a = Block(block_number=2, gas_limit=1, block_hash=block_hash) b = Block(block_number=2, gas_limit=1, block_hash=block_hash) c = Block(block_number=3, gas_limit=1, block_hash=factories.make_transaction_hash()) assert (a == b) assert (not (a != b)) assert (a != c) assert (not (a == c)) a = ActionCancelPayment(2) b = ActionCancelPayment(2) c = ActionCancelPayment(3) assert (a == b) assert (not (a != b)) assert (a != c) assert (not (a == c))
_benchmark.command(name='start') _option _range_option _option def start_command(workflow: str, workflow_range: (int, int), concurrency: int) -> NoReturn: try: start(workflow, workflow_range, concurrency) except Exception as e: logger.error(f'Something went wrong during benchmark launch: {e}')
class MPEncdecMultiheadAttn(nn.Module): def __init__(self, num_heads, embed_dim, attn_drop=0.0, factor_size=8, rank_size=(- 1)): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = attn_drop self.head_dim = (embed_dim // num_heads) self.factor_size = factor_size assert ((self.head_dim * num_heads) == self.embed_dim), 'embed_dim must be divisible by num_heads' self.bias = False self.scaling = (self.head_dim ** (- 0.5)) if (rank_size == (- 1)): rank_size = factor_size self.rank_size = rank_size self.factor_to_rank = nn.Linear(self.factor_size, self.rank_size) self.in_proj_weight_q = Parameter(torch.Tensor((embed_dim * embed_dim), rank_size)) self.in_proj_weight_kv = Parameter(torch.Tensor(((2 * embed_dim) * embed_dim), rank_size)) self.out_proj_weight = Parameter(torch.Tensor((embed_dim * embed_dim), rank_size)) self.in_proj_bias_q = None self.in_proj_bias_kv = None self.out_proj_bias = None self.attn_func = encdec_attn_func try: from ..optimized.encdec_attention_func_bias import fast_encdec_attn_func self.attn_func_fast = fast_encdec_attn_func self.optimized = 1 except ModuleNotFoundError as e: self.optimized = 2 self.attn_func_fast = None self.reset_parameters() def reset_parameters(self, init='normal'): if (init == 'normal'): std_ = math.sqrt((2.0 / (self.embed_dim + self.embed_dim))) nn.init.normal_(self.in_proj_weight_q, 0.0, std_) nn.init.normal_(self.in_proj_weight_kv, 0.0, std_) nn.init.normal_(self.out_proj_weight, 0.0, std_) else: std_ = math.sqrt((6.0 / (self.embed_dim + self.embed_dim))) nn.init.uniform_(self.in_proj_weight_q, (- std_), std_) nn.init.uniform_(self.in_proj_weight_kv, (- std_), std_) nn.init.uniform_(self.out_proj_weight, (- std_), std_) def forward(self, query, key, value, src_factor=None, tgt_factor=None, attn_mask=None, incremental=False, incremental_cache=None): assert (value is key), 'ERROR: Keys and values must be the same.' is_training = self.training time_masking = False len_key = key.size(0) in_proj_weight_q = torch.mv(self.in_proj_weight_q, tgt_factor).view(self.embed_dim, self.embed_dim) in_proj_weight_kv = torch.mv(self.in_proj_weight_kv, src_factor).view((self.embed_dim * 2), self.embed_dim) out_proj_weight = torch.mv(self.out_proj_weight, tgt_factor).view(self.embed_dim, self.embed_dim) if ((self.optimized == 1) and (self.training and (not incremental)) and (len_key <= 1024) and query.is_cuda and (in_proj_weight_q.dtype == torch.half)): if (attn_mask is not None): if (attn_mask.dim() == 3): attn_mask = attn_mask.squeeze(1) attn_mask = attn_mask.byte() outputs = self.attn_func_fast(time_masking, is_training, self.num_heads, query, key.type_as(in_proj_weight_q), in_proj_weight_q, in_proj_weight_kv, out_proj_weight, attn_mask, self.dropout) coverage = None else: (outputs, coverage) = self.attn_func(time_masking, is_training, self.num_heads, query, key, in_proj_weight_q, in_proj_weight_kv, out_proj_weight, attn_mask, self.dropout, incremental, incremental_cache) return (outputs, coverage)
class PyTensorConfigParser(): def __init__(self, flags_dict: dict, pytensor_cfg, pytensor_raw_cfg): self._flags_dict = flags_dict self._pytensor_cfg = pytensor_cfg self._pytensor_raw_cfg = pytensor_raw_cfg self._config_var_dict: dict = {} super().__init__() def __str__(self, print_doc=True): sio = StringIO() self.config_print(buf=sio, print_doc=print_doc) return sio.getvalue() def config_print(self, buf, print_doc=True): for cv in self._config_var_dict.values(): print(cv, file=buf) if print_doc: print(' Doc: ', cv.doc, file=buf) print(' Value: ', cv.__get__(self, self.__class__), file=buf) print('', file=buf) def get_config_hash(self): all_opts = sorted([c for c in self._config_var_dict.values() if c.in_c_key], key=(lambda cv: cv.name)) return hash_from_code('\n'.join([f'{cv.name} = {cv.__get__(self, self.__class__)}' for cv in all_opts])) def add(self, name, doc, configparam, in_c_key=True): if ('.' in name): raise ValueError(f'Dot-based sections were removed. Use double underscores! ({name})') if (name in dir(self)): raise AttributeError(f"A config parameter with the name '{name}' was already registered on another config instance.") configparam.doc = doc configparam.name = name configparam.in_c_key = in_c_key self._config_var_dict[name] = configparam if (not callable(configparam.default)): configparam.__get__(self, type(self), delete_key=True) else: try: self.fetch_val_for_key(name) configparam.__get__(self, type(self), delete_key=True) except KeyError: _logger.info(f"Suppressed KeyError in PyTensorConfigParser.add for parameter '{name}'!") setattr(self.__class__, name, configparam) def fetch_val_for_key(self, key, delete_key=False): if (key in self._flags_dict): if delete_key: return self._flags_dict.pop(key) return self._flags_dict[key] key_tokens = key.rsplit('__', 1) if (len(key_tokens) > 2): raise KeyError(key) if (len(key_tokens) == 2): (section, option) = key_tokens else: (section, option) = ('global', key) try: try: return self._pytensor_cfg.get(section, option) except InterpolationError: return self._pytensor_raw_cfg.get(section, option) except (NoOptionError, NoSectionError): raise KeyError(key) def change_flags(self, *args, **kwargs) -> _ChangeFlagsDecorator: return _ChangeFlagsDecorator(*args, _root=self, **kwargs) def warn_unused_flags(self): for key in self._flags_dict.keys(): warnings.warn(f'PyTensor does not recognise this flag: {key}')
def results2csv(dataset, results, out_file, custom_classes=None): if isinstance(results[0], list): csv_results = det2csv(dataset, results, custom_classes) def to_str(item): if isinstance(item, float): return f'{item:.3f}' return str(item) with open(out_file, 'w') as f: for csv_result in csv_results: f.write(','.join(map(to_str, csv_result))) f.write('\n')
def _test_ucx_infiniband_nvlink(skip_queue, protocol, enable_infiniband, enable_nvlink, enable_rdmacm): cupy = pytest.importorskip('cupy') if (protocol == 'ucx'): ucp = pytest.importorskip('ucp') elif (protocol == 'ucxx'): ucp = pytest.importorskip('ucxx') if (enable_infiniband and (not any([at.startswith('rc') for at in ucp.get_active_transports()]))): skip_queue.put("No support available for 'rc' transport in UCX") return else: skip_queue.put('ok') if ((enable_infiniband is None) and (enable_nvlink is None) and (enable_rdmacm is None)): enable_tcp_over_ucx = None cm_tls = ['all'] cm_tls_priority = ['rdmacm', 'tcp', 'sockcm'] else: enable_tcp_over_ucx = True cm_tls = ['tcp'] if (enable_rdmacm is True): cm_tls_priority = ['rdmacm'] else: cm_tls_priority = ['tcp'] initialize(protocol=protocol, enable_tcp_over_ucx=enable_tcp_over_ucx, enable_infiniband=enable_infiniband, enable_nvlink=enable_nvlink, enable_rdmacm=enable_rdmacm) with LocalCUDACluster(protocol=protocol, interface='ib0', enable_tcp_over_ucx=enable_tcp_over_ucx, enable_infiniband=enable_infiniband, enable_nvlink=enable_nvlink, enable_rdmacm=enable_rdmacm, rmm_pool_size='1 GiB') as cluster: with Client(cluster) as client: res = da.from_array(cupy.arange(10000), chunks=(1000,), asarray=False) res = res.sum().compute() assert (res == ) def check_ucx_options(): conf = ucp.get_config() assert ('TLS' in conf) assert all(((t in conf['TLS']) for t in cm_tls)) assert all(((p in conf['SOCKADDR_TLS_PRIORITY']) for p in cm_tls_priority)) if (cm_tls != ['all']): assert ('tcp' in conf['TLS']) assert ('cuda_copy' in conf['TLS']) if enable_nvlink: assert ('cuda_ipc' in conf['TLS']) if enable_infiniband: assert ('rc' in conf['TLS']) return True assert all(client.run(check_ucx_options).values())
def main(): parser = argparse.ArgumentParser() parser.add_argument('input') parser.add_argument('--gzip', action='store_true') args = parser.parse_args() def gopen(): if args.gzip: return gzip.open(args.input, 'r') else: return open(args.input, 'r', encoding='utf-8') num_lines = [] num_toks = [] with gopen() as h: num_docs = 1 num_lines_in_doc = 0 num_toks_in_doc = 0 for (i, line) in enumerate(h): if (len(line.strip()) == 0): num_docs += 1 num_lines.append(num_lines_in_doc) num_toks.append(num_toks_in_doc) num_lines_in_doc = 0 num_toks_in_doc = 0 else: num_lines_in_doc += 1 num_toks_in_doc += len(line.rstrip().split()) if ((i % 1000000) == 0): print(i, file=sys.stderr, end='', flush=True) elif ((i % 100000) == 0): print('.', file=sys.stderr, end='', flush=True) print(file=sys.stderr, flush=True) print('found {} docs'.format(num_docs)) print('average num lines per doc: {}'.format(np.mean(num_lines))) print('average num toks per doc: {}'.format(np.mean(num_toks)))
def generate_model_output_test2() -> Dict[(str, torch._tensor.Tensor)]: return {'predictions': torch.tensor([[1.0, 0.0, 0.51, 0.8, 1.0, 0.0, 0.51, 0.8, 1.0, 0.0, 0.51, 0.8]]), 'session': torch.tensor([[1, 1, 1, 1, 1, 1, 1, (- 1), (- 1), (- 1), (- 1), (- 1)]]), 'labels': torch.tensor([[1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0]]), 'weights': torch.tensor([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]), 'expected_recall': torch.tensor([0.5])}
class TestEvolve(): (slots=st.booleans(), frozen=st.booleans()) def test_empty(self, slots, frozen): (slots=slots, frozen=frozen) class C(): pass i1 = C() i2 = evolve(i1) assert (i1 is not i2) assert (i1 == i2) (simple_classes()) def test_no_changes(self, C): i1 = C() i2 = evolve(i1) assert (i1 is not i2) assert (i1 == i2) (simple_classes(), st.data()) def test_change(self, C, data): assume(fields(C)) field_names = [a.name for a in fields(C)] original = C() chosen_names = data.draw(st.sets(st.sampled_from(field_names))) change_dict = {name.replace('_', ''): data.draw(st.integers()) for name in chosen_names} changed = evolve(original, **change_dict) for name in chosen_names: assert (getattr(changed, name) == change_dict[name.replace('_', '')]) (simple_classes()) def test_unknown(self, C): with pytest.raises(TypeError) as e: evolve(C(), aaaa=2) if hasattr(C, '__attrs_init__'): expected = "__attrs_init__() got an unexpected keyword argument 'aaaa'" else: expected = "__init__() got an unexpected keyword argument 'aaaa'" assert e.value.args[0].endswith(expected) def test_validator_failure(self): class C(): a = attr.ib(validator=instance_of(int)) with pytest.raises(TypeError) as e: evolve(C(a=1), a='some string') m = e.value.args[0] assert m.startswith("'a' must be <class 'int'>") def test_private(self): class C(): _a = attr.ib() assert (evolve(C(1), a=2)._a == 2) with pytest.raises(TypeError): evolve(C(1), _a=2) with pytest.raises(TypeError): evolve(C(1), a=3, _a=2) def test_non_init_attrs(self): class C(): a = attr.ib() b = attr.ib(init=False, default=0) assert (evolve(C(1), a=2).a == 2) def test_regression_attrs_classes(self): class Cls1(): param1 = attr.ib() class Cls2(): param2 = attr.ib() obj2a = Cls2(param2='a') obj2b = Cls2(param2='b') obj1a = Cls1(param1=obj2a) assert (Cls1(param1=Cls2(param2='b')) == attr.evolve(obj1a, param1=obj2b)) def test_dicts(self): class Cls1(): param1 = attr.ib() class Cls2(): param2 = attr.ib() obj2a = Cls2(param2='a') obj2b = {'foo': 42, 'param2': 42} obj1a = Cls1(param1=obj2a) assert (Cls1({'foo': 42, 'param2': 42}) == attr.evolve(obj1a, param1=obj2b)) def test_inst_kw(self): class C(): pass with pytest.warns(DeprecationWarning) as wi: evolve(inst=C()) assert (__file__ == wi.list[0].filename) def test_no_inst(self): with pytest.raises(TypeError, match='evolve\\(\\) missing 1'): evolve(x=1) def test_too_many_pos_args(self): with pytest.raises(TypeError, match='evolve\\(\\) takes 1 positional argument, but 2 were given'): evolve(1, 2) def test_can_change_inst(self): class C(): inst: int assert (C(42) == evolve(C(23), inst=42))
def test_obtain_input_shape(): with pytest.raises(ValueError): utils._obtain_input_shape(input_shape=(224, 224, 3), default_size=299, min_size=139, data_format='channels_last', require_flatten=True, weights='imagenet') for data_format in ['channels_last', 'channels_first']: shape = (139, 139) input_shape = ((shape + (99,)) if (data_format == 'channels_last') else ((99,) + shape)) with pytest.warns(UserWarning): utils._obtain_input_shape(input_shape=input_shape, default_size=None, min_size=139, data_format=data_format, require_flatten=False, weights='fake_weights') shape = (100, 100) input_shape = ((shape + (3,)) if (data_format == 'channels_last') else ((3,) + shape)) with pytest.raises(ValueError): utils._obtain_input_shape(input_shape=input_shape, default_size=None, min_size=139, data_format=data_format, require_flatten=False) shape = (100,) input_shape = ((shape + (3,)) if (data_format == 'channels_last') else ((3,) + shape)) with pytest.raises(ValueError): utils._obtain_input_shape(input_shape=input_shape, default_size=None, min_size=139, data_format=data_format, require_flatten=False) shape = (100, 100) input_shape = ((shape + (5,)) if (data_format == 'channels_last') else ((5,) + shape)) with pytest.raises(ValueError): utils._obtain_input_shape(input_shape=input_shape, default_size=None, min_size=139, data_format=data_format, require_flatten=False) with pytest.raises(ValueError): utils._obtain_input_shape(input_shape=None, default_size=None, min_size=139, data_format='channels_first', require_flatten=True) assert (utils._obtain_input_shape(input_shape=(3, 200, 200), default_size=None, min_size=139, data_format='channels_first', require_flatten=True) == (3, 200, 200)) assert (utils._obtain_input_shape(input_shape=None, default_size=None, min_size=139, data_format='channels_last', require_flatten=False) == (None, None, 3)) assert (utils._obtain_input_shape(input_shape=None, default_size=None, min_size=139, data_format='channels_first', require_flatten=False) == (3, None, None)) assert (utils._obtain_input_shape(input_shape=None, default_size=None, min_size=139, data_format='channels_last', require_flatten=False) == (None, None, 3)) assert (utils._obtain_input_shape(input_shape=(150, 150, 3), default_size=None, min_size=139, data_format='channels_last', require_flatten=False) == (150, 150, 3)) assert (utils._obtain_input_shape(input_shape=(3, None, None), default_size=None, min_size=139, data_format='channels_first', require_flatten=False) == (3, None, None))
class Solution(): def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None: i = 0 j = 0 new = [] while ((i < m) and (j < n)): if (nums1[i] <= nums2[j]): new.append(nums1[i]) i += 1 else: new.append(nums2[j]) j += 1 while (i < m): new.append(nums1[i]) i += 1 while (j < n): new.append(nums2[j]) j += 1 i = 0 while (i < len(new)): nums1[i] = new[i] i += 1
def freshen_function_type_vars(callee: F) -> F: if isinstance(callee, CallableType): if (not callee.is_generic()): return cast(F, callee) tvs = [] tvmap: dict[(TypeVarId, Type)] = {} for v in callee.variables: tv = v.new_unification_variable(v) tvs.append(tv) tvmap[v.id] = tv fresh = expand_type(callee, tvmap).copy_modified(variables=tvs) return cast(F, fresh) else: assert isinstance(callee, Overloaded) fresh_overload = Overloaded([freshen_function_type_vars(item) for item in callee.items]) return cast(F, fresh_overload)
class TFSegformerDWConv(tf.keras.layers.Layer): def __init__(self, dim: int=768, **kwargs): super().__init__(**kwargs) self.depthwise_convolution = tf.keras.layers.Conv2D(filters=dim, kernel_size=3, strides=1, padding='same', groups=dim, name='dwconv') def call(self, hidden_states: tf.Tensor, height: int, width: int) -> tf.Tensor: batch_size = shape_list(hidden_states)[0] num_channels = shape_list(hidden_states)[(- 1)] hidden_states = tf.reshape(hidden_states, (batch_size, height, width, num_channels)) hidden_states = self.depthwise_convolution(hidden_states) new_height = shape_list(hidden_states)[1] new_width = shape_list(hidden_states)[2] num_channels = shape_list(hidden_states)[3] hidden_states = tf.reshape(hidden_states, (batch_size, (new_height * new_width), num_channels)) return hidden_states
def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() send_example_telemetry('run_plm', model_args, data_args) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) if training_args.should_log: transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}')) logger.info(f'Training/evaluation parameters {training_args}') last_checkpoint = None if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)): last_checkpoint = get_last_checkpoint(training_args.output_dir) if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)): raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.') elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)): logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') set_seed(training_args.seed) if (data_args.dataset_name is not None): raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None)) if ('validation' not in raw_datasets.keys()): raw_datasets['validation'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None)) raw_datasets['train'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None)) else: data_files = {} if (data_args.train_file is not None): data_files['train'] = data_args.train_file if (data_args.validation_file is not None): data_files['validation'] = data_args.validation_file extension = data_args.train_file.split('.')[(- 1)] if (extension == 'txt'): extension = 'text' raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) if ('validation' not in raw_datasets.keys()): raw_datasets['validation'] = load_dataset(extension, data_files=data_files, split=f'train[:{data_args.validation_split_percentage}%]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None)) raw_datasets['train'] = load_dataset(extension, data_files=data_files, split=f'train[{data_args.validation_split_percentage}%:]', cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None)) config_kwargs = {'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': (True if model_args.use_auth_token else None)} if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = XLNetConfig() logger.warning('You are instantiating a new config instance from scratch.') if (model_args.config_overrides is not None): logger.info(f'Overriding config: {model_args.config_overrides}') config.update_from_string(model_args.config_overrides) logger.info(f'New config: {config}') tokenizer_kwargs = {'cache_dir': model_args.cache_dir, 'use_fast': model_args.use_fast_tokenizer, 'revision': model_args.model_revision, 'use_auth_token': (True if model_args.use_auth_token else None)} if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) else: raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.') if model_args.model_name_or_path: model = XLNetLMHeadModel.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None), low_cpu_mem_usage=model_args.low_cpu_mem_usage) else: logger.info('Training new model from scratch') model = XLNetLMHeadModel(config) embedding_size = model.get_input_embeddings().weight.shape[0] if (len(tokenizer) > embedding_size): model.resize_token_embeddings(len(tokenizer)) if training_args.do_train: column_names = raw_datasets['train'].column_names else: column_names = raw_datasets['validation'].column_names text_column_name = ('text' if ('text' in column_names) else column_names[0]) if (data_args.max_seq_length > tokenizer.model_max_length): logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.') max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) if data_args.line_by_line: padding = ('max_length' if data_args.pad_to_max_length else False) def tokenize_function(examples): examples['text'] = [line for line in examples['text'] if ((len(line) > 0) and (not line.isspace()))] return tokenizer(examples['text'], padding=padding, truncation=True, max_length=max_seq_length) with training_args.main_process_first(desc='dataset map tokenization'): tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on dataset line_by_line') else: def tokenize_function(examples): return tokenizer(examples[text_column_name]) with training_args.main_process_first(desc='dataset map tokenization'): tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on every text in dataset') def group_texts(examples): concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) if (total_length >= max_seq_length): total_length = ((total_length // max_seq_length) * max_seq_length) result = {k: [t[i:(i + max_seq_length)] for i in range(0, total_length, max_seq_length)] for (k, t) in concatenated_examples.items()} return result with training_args.main_process_first(desc='grouping texts together'): tokenized_datasets = tokenized_datasets.map(group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache), desc=f'Grouping texts in chunks of {max_seq_length}') if training_args.do_train: if ('train' not in tokenized_datasets): raise ValueError('--do_train requires a train dataset') train_dataset = tokenized_datasets['train'] if (data_args.max_train_samples is not None): max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if ('validation' not in tokenized_datasets): raise ValueError('--do_eval requires a validation dataset') eval_dataset = tokenized_datasets['validation'] if (data_args.max_eval_samples is not None): max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) data_collator = DataCollatorForPermutationLanguageModeling(tokenizer=tokenizer, plm_probability=data_args.plm_probability, max_span_length=data_args.max_span_length) trainer = Trainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator) if training_args.do_train: checkpoint = None if (training_args.resume_from_checkpoint is not None): checkpoint = training_args.resume_from_checkpoint elif (last_checkpoint is not None): checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() metrics = train_result.metrics max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset)) metrics['train_samples'] = min(max_train_samples, len(train_dataset)) trainer.log_metrics('train', metrics) trainer.save_metrics('train', metrics) trainer.save_state() if training_args.do_eval: logger.info('*** Evaluate ***') metrics = trainer.evaluate() max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset)) metrics['eval_samples'] = min(max_eval_samples, len(eval_dataset)) try: perplexity = math.exp(metrics['eval_loss']) except OverflowError: perplexity = float('inf') metrics['perplexity'] = perplexity trainer.log_metrics('eval', metrics) trainer.save_metrics('eval', metrics) kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'language-modeling'} if (data_args.dataset_name is not None): kwargs['dataset_tags'] = data_args.dataset_name if (data_args.dataset_config_name is not None): kwargs['dataset_args'] = data_args.dataset_config_name kwargs['dataset'] = f'{data_args.dataset_name} {data_args.dataset_config_name}' else: kwargs['dataset'] = data_args.dataset_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs)
def decoding_latent_code(encoded_code): decode_temp = np.random.rand(int((encoded_code.shape[0] / 9))).astype('float32') for i in range(decode_temp.shape[0]): encoded_binary = encoded_code[(9 * i):(9 * (i + 1))] integer_part = '' for binary in encoded_binary[1:]: integer_part += str(binary) decoded_str = '' if (encoded_binary[0] == 1): decoded_str += '-' decoded_str += str(int(integer_part, 2)) decode_temp[i] = eval(decoded_str) return decode_temp
def make_call(*items: tuple[(str, (str | None))]) -> CallExpr: args: list[Expression] = [] arg_names = [] arg_kinds = [] for (arg, name) in items: shortname = arg.split('.')[(- 1)] n = NameExpr(shortname) n.fullname = arg args.append(n) arg_names.append(name) if name: arg_kinds.append(ARG_NAMED) else: arg_kinds.append(ARG_POS) return CallExpr(NameExpr('f'), args, arg_kinds, arg_names)
('pyresample.spherical_utils.check_keys_int_or_tuple') def test_merge_overlapping_and_nonoverlapping_objects(keys_int_or_tuple): mysets = [SET_A, SET_B, SET_C, SET_D, SET_E, SET_F, SET_G] myobjects = GetNonOverlapUnionsBaseClass(mysets) keys_int_or_tuple.return_code = None with patch('pyresample.spherical_utils.merge_tuples') as mypatch: mypatch.side_effect = fake_merge_tuples myobjects.merge() polygons = myobjects.get_polygons() ids = myobjects.get_ids() polygons_expected = [{1, 3, 5, 7, 9}, {2, 4, 6, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}, {20, 21, 22, 23, 24, 26, 27, 28, 29}] assert (polygons == polygons_expected) ids_expected = [0, (2, 1, 3), (5, 4, 6)] assert (ids == ids_expected)
def infer(valid_queue, model, criterion): global is_multi_gpu objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.eval() for (step, (input, target)) in enumerate(valid_queue): with torch.no_grad(): input = input.cuda() target = target.cuda(non_blocking=True) (logits, _) = model(input) loss = criterion(logits, target) (prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.item(), n) top1.update(prec1.item(), n) top5.update(prec5.item(), n) if ((step % args.report_freq) == 0): logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return (top1.avg, objs.avg)
def test_categorical_basic(): p = np.array([[100000, 1, 1], [1, 100000, 1], [1, 1, 100000]], dtype=config.floatX) p = (p / p.sum(axis=(- 1))) rng = np.random.default_rng() with pytest.raises(ValueError): categorical.rng_fn(rng, p, size=(10,)) msg = re.escape('`size` is incompatible with the shape of `p`') with pytest.raises(ValueError, match=msg): categorical.rng_fn(rng, p, size=(1,)) with pytest.raises(ValueError, match=msg): categorical.rng_fn(rng, p[None], size=(3,))
def asynq(pure=False, sync_fn=None, cls=async_task.AsyncTask, asyncio_fn=None, **kwargs): if kwargs: assert pure, 'custom kwargs are only supported with pure=True' if pure: assert (sync_fn is None), 'sync_fn is not supported for pure async functions' def decorate(fn): assert (not (is_pure_async_fn(fn) or has_async_fn(fn))), '() decorator can be applied just once' if pure: return qcore.decorators.decorate(PureAsyncDecorator, cls, kwargs)(fn) elif (sync_fn is None): decorated = qcore.decorators.decorate(AsyncDecorator, cls, kwargs, asyncio_fn)(fn) return decorated else: return qcore.decorators.decorate(AsyncAndSyncPairDecorator, cls, sync_fn)(fn) return decorate
class Stoned_Optimizer(BaseOptimizer): def __init__(self, args=None): super().__init__(args) self.model_name = 'stoned' def _optimize(self, oracle, config): self.oracle.assign_evaluator(oracle) population = np.random.choice(self.all_smiles, size=config['generation_size']).tolist() population = [encoder(smi) for smi in population] len_random_struct = max([len(get_selfie_chars(s)) for s in population]) patience = 0 while True: if (len(self.oracle) > 100): self.sort_buffer() old_scores = [item[1][0] for item in list(self.mol_buffer.items())[:100]] else: old_scores = 0 fitness = self.oracle([decoder(i) for i in population]) if self.finish: print('max oracle hit, abort ...... ') break best_idx = np.argmax(fitness) best_selfie = population[best_idx] new_population = [] for i in range((config['generation_size'] - 1)): (selfie_mutated, _) = mutate_selfie(best_selfie, len_random_struct, write_fail_cases=True) new_population.append(selfie_mutated) new_population.append(best_selfie) population = new_population[:] if (len(self.oracle) > 2000): self.sort_buffer() new_scores = [item[1][0] for item in list(self.mol_buffer.items())[:100]] if (new_scores == old_scores): patience += 1 if (patience >= self.args.patience): self.log_intermediate(finish=True) print('convergence criteria met, abort ...... ') break else: patience = 0 old_scores = new_scores
def process_for_clause(tree): clauses = [c for c in tree.children[1].children if (isinstance(c, Node) and (c.label == 'for_clause_entry'))] res = [] for cl in clauses: vars = [mk_tok([('"%s"' % t.value)]) for t in cl.children[0].terms() if (t.type == 'NAME')] vars = mk_tok(['[', reduce((lambda x, y: ((x + mk_tok([','])) + y)), vars), ']']) unpack_expr = (('"' + ' '.join([t.value for t in cl.children[0].terms()])) + '"') expression = getTermsEsc(cl.children[2], True) clause_tokens = mk_tok(['For(', vars, ',', unpack_expr, ',', expression, ')']) res.append(clause_tokens) return res