code
stringlengths
101
5.91M
def check_attribute_being_used(config_class, attributes, default_value, source_strings): attribute_used = False for attribute in attributes: for modeling_source in source_strings: if ((f'config.{attribute}' in modeling_source) or (f'getattr(config, "{attribute}"' in modeling_source) or (f'ge...
class LaionDataset(BaseDataset): def __init__(self, vis_processor, text_processor, location): super().__init__(vis_processor=vis_processor, text_processor=text_processor) self.inner_dataset = wds.DataPipeline(wds.ResampledShards(location), wds.tarfile_to_samples(handler=wds.warn_and_continue), wds.s...
def make_dedup_key(outer_type, item_nodes): item_keys = [((py_object_type, None, type(None)) if (node is None) else (make_dedup_key(node.type, ([(node.mult_factor if node.is_literal else None)] + node.args)) if node.is_sequence_constructor else (make_dedup_key(node.type, (node.start, node.stop, node.step)) if node....
def html_quote(value, force=True): if ((not force) and hasattr(value, '__html__')): return value.__html__() if (value is None): return '' if (not isinstance(value, basestring_)): value = coerce_text(value) if ((sys.version >= '3') and isinstance(value, bytes)): value = cg...
() def avito1k_train_test(nrows=None): data = pd.read_csv('./examples/data/avito1k_train.csv') (train_data, test_data) = train_test_split(data, test_size=500, random_state=42) return (train_data, test_data)
def main(args): wpt = nltk.WordPunctTokenizer() write_list = [] with open(args.input_file, 'r') as in_file: for nlg in in_file: nlg_words = wpt.tokenize(nlg.strip()) dialogue_instance = ' '.join(nlg_words) write_list.append(dialogue_instance) with open(args.to...
def experiment(variant): task = generate_task(task_generator_id='picking', dense_reward_weights=np.array([250, 0, 125, 0, 750, 0, 0, 0.005]), fractional_reward_weight=1, goal_height=0.15, tool_block_mass=0.02) eval_env = CausalWorld(task=task, skip_frame=3, enable_visualization=False, seed=0, max_episode_length...
class Bird(Benchmark): def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip(([((- 2.0) * pi)] * self.N), ([(2.0 * pi)] * self.N))) self.global_optimum = [[4., 3.], [(- 1.), (- 3.)]] self.fglob = (- 106.) def fun(self, x, *args): s...
def set_layer_from_config(layer_config): if (layer_config is None): return None name2layer = {ConvLayer.__name__: ConvLayer, DepthConvLayer.__name__: DepthConvLayer, PoolingLayer.__name__: PoolingLayer, IdentityLayer.__name__: IdentityLayer, LinearLayer.__name__: LinearLayer, MBInvertedConvLayer.__name_...
class Window(): def __init__(self, name, res, vsync=False, show_window=True, fps_limit=1000, pos=(100, 100)): check_ggui_availability() package_path = str(pathlib.Path(__file__).parent.parent) ti_arch = default_cfg().arch self.window = _ti_core.PyWindow(get_runtime().prog, name, res,...
def _execute_1D(func_str, pocketfft_func, x, n, axis, norm, overwrite_x, workers, plan): xp = array_namespace(x) if is_numpy(xp): return pocketfft_func(x, n=n, axis=axis, norm=norm, overwrite_x=overwrite_x, workers=workers, plan=plan) norm = _validate_fft_args(workers, plan, norm) if hasattr(xp,...
class MatplotlibPdfManager(): def __init__(self, path, plt, pad_inches=None): self.path = path print('Creating {}'.format(self.path)) self.pdf = PdfPages(self.path) self.plt = plt self.ncount = 0 self.pad_inches = pad_inches def generate_from(self): self.n...
def test_normalize_layer(): rgb_mean = (1, 2, 3) rgb_std = (1, 0.5, 0.25) layer = ImgNormalize(1, rgb_mean, rgb_std) x = torch.randn((2, 3, 64, 64)) y = layer(x) x = x.permute((1, 0, 2, 3)).reshape((3, (- 1))) y = y.permute((1, 0, 2, 3)).reshape((3, (- 1))) rgb_mean = torch.tensor(rgb_me...
def test_dlne_simulate(group): g = group.generator() x = Secret() y = (3 * g) y2 = (397474 * g) g2 = (1397 * g) p = DLNotEqual([y, g], [y2, g2], x, bind=True) secret_dict = {x: 3} tr = p.simulate() assert p.verify_simulation_consistency(tr) assert (not p.verify(tr))
.parametrize('bilinear_type', ['all', 'each', 'interaction']) def test_BilinearInteraction(bilinear_type): with CustomObjectScope({'BilinearInteraction': layers.BilinearInteraction}): layer_test(layers.BilinearInteraction, kwargs={'bilinear_type': bilinear_type}, input_shape=([(BATCH_SIZE, 1, EMBEDDING_SIZE...
class OptimizerAndSchedulerConfig(FairseqDataclass): optimizer: Any = None lr_scheduler: Optional[Any] = None lr: List = II('optimization.lr') lr_float: Optional[float] = None
def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args,...
def create_vocabulary(vocabulary_path, data_paths, max_vocabulary_size, tokenizer=None, normalize_digits=False): if (not gfile.Exists(vocabulary_path)): print(('Creating vocabulary %s from data %s' % (vocabulary_path, str(data_paths)))) vocab = {} for path in data_paths: with gfi...
def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): __tracebackhide__ = True if (sys.version_info.major >= 3): funcname = _d.assertRaisesRegex else: funcname = _d.assertRaisesRegexp return funcname(exception_class, expected_regexp, *args, **kwargs)
def train(epoch): model.train() for (batch_idx, (image1, image2, image3, mask)) in enumerate(train_loader): if args.cuda: (image1, image2, image3, mask) = (image1.cuda(), image2.cuda(), image3.cuda(), mask.cuda()) (image1, image2, image3, mask) = (Variable(image1), Variable(image2), ...
class BoundingBox(): def __init__(self, imageName, classId, x, y, w, h, typeCoordinates=CoordinatesType.Absolute, imgSize=None, bbType=BBType.GroundTruth, classConfidence=None, format=BBFormat.XYWH): self._imageName = imageName self._typeCoordinates = typeCoordinates if ((typeCoordinates == ...
def generateBatesPDF(n): def f(x): coeff = (n / (2.0 * fact((n - 1)))) summation = 0 lead = 1.0 for k in range((n + 1)): v1 = binom(n, k) v2 = (((n * x) - k) ** (n - 1)) nx = (n * x) s = 0 if (nx < k): s = (-...
class EfficientNet(tf.keras.Model): def __init__(self, input_shape, model_name, checkpoint=None, override_params=None, normalization_op_params=None): (blocks_args, global_params) = get_model_params(model_name, override_params=override_params, normalization_op_params=normalization_op_params) input_la...
_cache(maxsize=1024) def _unit_nhops_to_fil(layer, filter_nodes, fil_dict, fwd=False): nhops = 0 for (filrng, coord_list) in fil_dict.items(): fil_size = ((filrng[0].size() * filrng[1].size()) * layer.filter_size()) if fwd: src_set = set(filter_nodes) dst_set = set(coord_...
def bnd_values(alf, bet, k=0, gn=1): if (gn == 1): gn = (lambda a, b, n: 1) if (k == 0): return ((lambda i: ((gn(alf, bet, i) * ((- 1) ** i)) * sp.binomial((i + bet), i))), (lambda i: (gn(alf, bet, i) * sp.binomial((i + alf), i)))) elif (k == 1): gam = (lambda i: (sp.rf((((i + alf) +...
def lazy_groups_of(iterator: Iterator[A], group_size: int) -> Iterator[List[A]]: return iter((lambda : list(islice(iterator, 0, group_size))), [])
class RandomImportanceMetric(BaseImportanceMetric): def __init__(self, graph: Graph, representative_data_gen: Callable, fw_impl: FrameworkImplementation, pruning_config: PruningConfig, fw_info: FrameworkInfo): self.float_graph = graph self.representative_data_gen = representative_data_gen se...
def _pre_loading(args, training, validation): training_set = {} fl = h5py.File(args['input_hdf5'], 'r') print('Loading the training data into the memory ...') pbar = tqdm(total=len(training)) for ID in training: pbar.update() if (ID.split('_')[(- 1)] == 'EV'): dataset = f...
def worker_init_envs(g, alloc, scope, env): logger.log(('initializing environment on worker %d' % g.worker_id)) if (not hasattr(g, 'parallel_vec_envs')): g.parallel_vec_envs = dict() g.parallel_vec_env_template = dict() g.parallel_vec_envs[scope] = [(idx, pickle.loads(pickle.dumps(env))) for...
class DataLoaderX(DataLoader): def __iter__(self): return BackgroundGenerator(super().__iter__())
def analyze_proto(proto): def analyze(p): fields = {} for f in p.fields: child_fields = None if (f.type == FieldDescriptor.TYPE_MESSAGE): child_fields = analyze(f.message_type) fields[f.name] = {'type': f.type, 'message': (getattr(protobufs, f.mess...
def submit_local_explain(datasource, original_sql, select, model, model_params, result_table, explainer='TreeExplainer', user=''): model = Model.load_from_db(datasource, model) if (model.get_type() == EstimatorType.XGBOOST): explain_func = xgboost_explain else: explain_func = tf_explain ...
class HomogenizationApp(HomogenizationEngine): def process_options(options): get = options.get volume = get('volume', None) volumes = get('volumes', None) if ((volume is None) and (volumes is None)): raise ValueError('missing "volume" in options!') return Struct(p...
def get_name(rules, model, enc_dim, dim, seed): d = (seed % 5) s = (seed // 5) return f'Sequence_10_Order_1_Dim_32/Data-Seed_{d}/GT_Rules_{rules}/{model}_{enc_dim}_{dim}_{rules}_{s}'
class NetState(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETSTATE
def eval_avg_halluci(sessions) -> list: avg_halluci = [] for sess in sessions: halluci = get_halluci(sess) avg_halluci.append((sum(halluci) / len(halluci))) return avg_halluci
class EUCKRProber(MultiByteCharSetProber): def __init__(self): super(EUCKRProber, self).__init__() self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL) self.distribution_analyzer = EUCKRDistributionAnalysis() self.reset() def charset_name(self): return 'EUC-KR' def lan...
def SaveMatlabSparseMtx_PNEANet(Graph, OutFNm): return _snap.SaveMatlabSparseMtx_PNEANet(Graph, OutFNm)
def eval_checkpoint(model_object, eval_dataloader, config, device, n_gpu, label_list, eval_sign='dev'): model_object.eval() eval_loss = 0 start_pred_lst = [] end_pred_lst = [] span_pred_lst = [] mask_lst = [] start_gold_lst = [] span_gold_lst = [] end_gold_lst = [] eval_steps = 0...
def convert_to_complex_array(x_float2, dtype=np.complex64): x_real = x_float2[(..., 0)] x_imag = x_float2[(..., 1)] x_complex = (x_real + (1j * x_imag)) return x_complex
def single_prompt_wordnet(prompt, nums_lst): original_prompt = prompt synonyms_prompt_lst = [] keywords_dict = extract_keywords_and_POS(prompt) if (keywords_dict == False): return [] keywords_lst = list(keywords_dict.keys()) num_keywords = len(keywords_lst) prompt_synonym = original_...
class FlowNetSD(nn.Module): def __init__(self, batchNorm=True): super(FlowNetSD, self).__init__() self.batchNorm = batchNorm self.conv0 = conv(self.batchNorm, 6, 64) self.conv1 = conv(self.batchNorm, 64, 64, stride=2) self.conv1_1 = conv(self.batchNorm, 64, 128) self....
class RegNet(nn.Module): def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0.0, zero_init_last_bn=True): super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate stem_width = cfg['stem_width'] self.stem = ConvBnAct(in_chans...
def main(): parser = argparse.ArgumentParser(description='Convert keys in official pretrained STDC1/2 to MMSegmentation style.') parser.add_argument('src', help='src model path') parser.add_argument('dst', help='save path') parser.add_argument('type', help='model type: STDC1 or STDC2') args = parser...
def flatten(dictionary: dict, parent_key: str='', sep: str='.'): import collections items = [] for (k, v) in dictionary.items(): new_key = (((parent_key + sep) + k) if parent_key else k) if isinstance(v, collections.abc.MutableMapping): items.extend(flatten(v, new_key, sep=sep).i...
class NeuralIR_Encoder_PassThrough(nn.Module): def __init__(self, word_embeddings: TextFieldEmbedder, neural_ir_model: nn.Module): super(NeuralIR_Encoder_PassThrough, self).__init__() self.word_embeddings = word_embeddings self.neural_ir_model = neural_ir_model def forward(self, query: D...
def register_Ns3CriticalSection_methods(root_module, cls): cls.add_constructor([param('ns3::SystemMutex &', 'mutex')]) cls.add_constructor([param('ns3::CriticalSection const &', 'arg0')]) return
def input_user(prompt: str, prefill='') -> str: user_utterance = input_with_prefill(((bcolors.OKCYAN + bcolors.BOLD) + prompt), prefill) while (not user_utterance.strip()): user_utterance = input_with_prefill(((bcolors.OKCYAN + bcolors.BOLD) + prompt), prefill) print(bcolors.ENDC) return user_ut...
_module() class RealBasicVSR(RealESRGAN): def __init__(self, generator, discriminator=None, gan_loss=None, pixel_loss=None, cleaning_loss=None, perceptual_loss=None, is_use_sharpened_gt_in_pixel=False, is_use_sharpened_gt_in_percep=False, is_use_sharpened_gt_in_gan=False, is_use_ema=True, train_cfg=None, test_cfg=N...
def read_test_ds(data_path): with open(data_path, 'r', encoding='utf-8') as f: for line in f: (ids, words) = line.strip('\n').split('\t')[0:2] (yield {'source': words})
def cluster_feature(feature_list, zone_number=16): (record, centers) = mc.k_means(feature_list, zone_number, 300) return (record, centers)
class Wav2Vec2ConformerForCTC(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class SuperResolutionNet(nn.Module): def __init__(self, upscale_factor): super(SuperResolutionNet, self).__init__() self.relu = nn.ReLU() self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2)) self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)) self.conv3 = nn.Conv2d(64, 32...
_module class Classification(nn.Module): def __init__(self, backbone, with_sobel=False, head=None, pretrained=None): super(Classification, self).__init__() self.with_sobel = with_sobel if with_sobel: self.sobel_layer = Sobel() self.backbone = builder.build_backbone(backbo...
def _add_instances_conversion_methods(newInstances): cls_name = newInstances.__name__ .unused def from_instances(instances: Instances): fields = instances.get_fields() image_size = instances.image_size ret = newInstances(image_size) for (name, val) in fields.items(): ...
.hypothesis_nested def test_curl_command_validity(curl, loose_schema): (case=loose_schema['/test/{key}']['POST'].as_strategy()) (max_examples=30, suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much], deadline=None) def test(case): command = case.as_curl_command() curl.as...
_grad() def convert_flava_checkpoint(checkpoint_path, codebook_path, pytorch_dump_folder_path, config_path=None): if (config_path is not None): config = FlavaConfig.from_pretrained(config_path) else: config = FlavaConfig() hf_model = FlavaForPreTraining(config).eval() codebook_state_dict...
def dump_ndarray(data, path_to_file): try: with open(path_to_file, 'wb') as f: np.save(f, data) except Exception as e: raise e
class TFCLIPModelTester(): def __init__(self, parent, is_training=True): self.parent = parent self.text_model_tester = TFCLIPTextModelTester(parent) self.vision_model_tester = TFCLIPVisionModelTester(parent) self.is_training = is_training def prepare_config_and_inputs(self): ...
def _block_name_base(stage, block): if (block < 27): block = ('%c' % (block + 97)) conv_name_base = ((('res' + str(stage)) + str(block)) + '_branch') bn_name_base = ((('bn' + str(stage)) + str(block)) + '_branch') return (conv_name_base, bn_name_base)
class LayerFusingTest3(BaseLayerFusingTest): def __init__(self, unit_test): super().__init__(unit_test) self.expected_fusions = [[Conv2d, ReLU]] def get_tpc(self): (generated_tp, mixed_precision_configuration_options) = super().get_tpc() with generated_tp: conv = tp.O...
def test_multivi(): data = synthetic_iid() MULTIVI.setup_anndata(data, batch_key='batch') vae = MULTIVI(data, n_genes=50, n_regions=50) vae.train(1, save_best=False) vae.train(1, adversarial_mixing=False) vae.train(3) vae.get_elbo(indices=vae.validation_indices) vae.get_accessibility_est...
def get_sentence_indices(range_param, src_sentences): ids = [] if args.range: try: if (':' in args.range): (from_idx, to_idx) = args.range.split(':') else: from_idx = int(args.range) to_idx = from_idx ids = range((int(fr...
def get_addr(f, indices): return expr.Expr(impl.get_runtime().compiling_callable.ast_builder().expr_snode_get_addr(f._snode.ptr, expr.make_expr_group(indices)), dbg_info=_ti_core.DebugInfo(impl.get_runtime().get_current_src_info()))
def attrs_of_obj(obj, attrs: (Mapping | None)=None) -> (Mapping | None): from awkward.highlevel import Array, ArrayBuilder, Record if (attrs is not None): return attrs elif isinstance(obj, (Array, Record, ArrayBuilder)): return obj._attrs else: return None
class Dotdict(dict): __getattr__ = dict.__getitem__ __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ def __init__(self, dct=None): dct = (dict() if (not dct) else dct) for (key, value) in dct.items(): if hasattr(value, 'keys'): value = Dotdict(val...
class PickAndPlaceSimpleTask(BaseTask): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def goal_satisfied(self, state): pcs = self.goal_conditions_met(state) return (pcs[0] == pcs[1]) def goal_conditions_met(self, state): ts = 1 s = 0 t...
.parametrize('ctx, func_name', ctxs) .parametrize('axis', [0, 1, 2, (- 1), (- 2), (- 3)]) .parametrize('seed', [313]) .parametrize('different_size', [False, True]) .parametrize('num_inputs', [2, 3]) def test_concatenate_double_backward(seed, axis, different_size, num_inputs, ctx, func_name): from nbla_test_utils im...
class DistanceAggregator(QuasipartitionAggregatorBase): def __init__(self, num_quasipartition_mixtures: int): super().__init__(num_quasipartition_mixtures) self.alpha_net = DeepLinearNet(input_dim=num_quasipartition_mixtures, output_dim=1, non_negative=True) def forward(self, expected_quasiparti...
def vorticity(vf: ti.template()): for (i, j) in vf: vl = sample(vf, (i - 1), j) vr = sample(vf, (i + 1), j) vb = sample(vf, i, (j - 1)) vt = sample(vf, i, (j + 1)) velocity_curls[(i, j)] = ((((vr.y - vl.y) - vt.x) + vb.x) * 0.5)
class Trainer(): def __init__(self, dataset, args, config): Model = BC.Model self.model = Model(config, args, pre_embed=dataset.vec.embeddings) self.metrics = calc_metrics_classification self.display_metrics = True def train_standard(self, train_data, test_data, args, save_on_met...
def main(args=sys.argv[1:]): p = argparse.ArgumentParser() p.add_argument('cdbg_prefix', help='cdbg prefix') p.add_argument('catlas_prefix', help='catlas prefix') p.add_argument('output') p.add_argument('--contigs-db', required=True) p.add_argument('--maxsize', type=float, default=20000) p.a...
class Encoder(nn.Module): def __init__(self): super(Encoder, self).__init__() resnet = models.resnet50(pretrained=True) self.conv1 = resnet.conv1 self.bn1 = resnet.bn1 self.relu = resnet.relu self.maxpool = resnet.maxpool self.res2 = resnet.layer1 self...
def full_pandas_dataset_nonunique_columns(): events = pd.DataFrame({'user_id': [0, 0, 1, 1, 1, 2], 'item_id': [0, 1, 0, 2, 3, 1], 'timestamp': [0, 1, 2, 3, 4, 5], 'rating': [1.1, 1.2, 1.3, 2, 3, 4]}) users = pd.DataFrame({'user_id': [0, 1, 2], 'gender': [0, 1, 0], 'feature1': [0.1, 0.2, 0.3]}) items = pd.Da...
def make_spark_mapper(mapper: Mapper) -> Mapper: mapper._update_fields = _update_fields return mapper
def dice_per_img(score, target): target = np.atleast_2d(target.astype(bool)) B = target.shape[0] target = target.reshape((B, (- 1))) score = np.atleast_2d(score.astype(bool)).reshape((B, (- 1))) intersection = np.count_nonzero((target & score), axis=1) size_i1 = np.count_nonzero(target, axis=1)....
def _test_pretrained(tmp_path, model_name, test_image, model_type=StarDist2D, test_image_norm_axes='ZYX'): model = model_type.from_pretrained(model_name) assert (model is not None) export_path = (tmp_path / f'{model_name}.zip') export_bioimageio(model, export_path, test_input=test_image, test_input_norm...
def register_Ns3DsrDsrReceivedRreqEntry_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_constructor([param('ns3::dsr::DsrReceivedRreqEntry const &', 'arg0')]) cls.add_constructor([param('ns3::Ipv4Address', 'd', default_value='ns3::Ipv4Address()'), param('uint16_t', 'i', default_v...
def reprocess_lines(processed_lines): reprocessed_lines = [] for line in processed_lines: text = ''.join(line) try: chunks = sent_tokenize(text) except NameError as e: raise NameError('Sentences cannot be reprocessed without first installing pythainlp') from e ...
class BleuScorer(object): __slots__ = ('n', 'crefs', 'ctest', '_score', '_ratio', '_testlen', '_reflen', 'special_reflen') def copy(self): new = BleuScorer(n=self.n) new.ctest = copy.copy(self.ctest) new.crefs = copy.copy(self.crefs) new._score = None return new def _...
class MobileViTImageProcessor(metaclass=DummyObject): _backends = ['vision'] def __init__(self, *args, **kwargs): requires_backends(self, ['vision'])
class ModelStack(ProbabilisticModel, Generic[ProbabilisticModelType]): def __init__(self, model_with_event_size: tuple[(ProbabilisticModelType, int)], *models_with_event_sizes: tuple[(ProbabilisticModelType, int)]): (self._models, self._event_sizes) = zip(*((model_with_event_size,) + models_with_event_sizes...
def getargspec(func): if ismethod(func): func = func.__func__ if (not isfunction(func)): raise TypeError('arg is not a Python function') (args, varargs, varkw) = getargs(func.__code__) return (args, varargs, varkw, func.__defaults__)
def save_images(img_tensors, img_names, save_dir): for (img_tensor, img_name) in zip(img_tensors, img_names): tensor = (((img_tensor.clone() + 1) * 0.5) * 255) tensor = tensor.cpu().clamp(0, 255) try: array = tensor.numpy().astype('uint8') except: array = tens...
def filter_unique_options(options, allow_kwarg, type_to_signature, remove_self): def exclude_arg(arg): return (arg['type'] == 'CONSTANT') def exclude_arg_with_self_check(arg): return (exclude_arg(arg) or (remove_self and (arg['name'] == 'self'))) def signature(option, kwarg_only_count): ...
class EmailNoScrollOracle(LinearProgramPolicy): def __init__(self, config): labeled_demos = [LabeledDemonstration.from_oracle_programs([[WeightedProgram(ClickToken(LikeToken(FieldsValueSelectorToken(0))), 1)], [WeightedProgram(ClickToken(LikeToken(StringToken(u'Forward'))), 1)], [WeightedProgram(FocusAndTyp...
def save_model_card(args, repo_id: str, images=None, repo_folder=None): img_str = '' if (len(images) > 0): image_grid = make_image_grid(images, 1, len(args.validation_prompts)) image_grid.save(os.path.join(repo_folder, 'val_imgs_grid.png')) img_str += '![val_imgs_grid](./val_imgs_grid.pn...
class TSADEvaluator(EvaluatorBase): config_class = TSADEvaluatorConfig def __init__(self, model, config): from merlion.models.anomaly.base import DetectorBase assert isinstance(model, DetectorBase) super().__init__(model=model, config=config) def max_early_sec(self): return s...
class SoftMax(object): def __init__(self, keepdims=False): self.keepdims = keepdims def __call__(self, x): y = T.nnet.softmax(x.reshape(((- 1), x.shape[(- 1)]))) if self.keepdims: y = y.reshape(x.shape) return y
def log(message, level=LogLevel.INFO, end='\n', context=True): Log.get_instance().log(message, level=level, end=end, context=context).dispatch()
def execute(chunk: Chunk, only_array: bool=True): chunk = chunk.transpose(only_array=only_array) return [chunk]
def construct_config(opts: typing.List[str]): config = OmegaConf.create({'checkpoint_path': ''}) return _merge_with_dotlist(config, opts)
def main(): run_keyness_pipeline() if run_dependency_bigram: run_dep_bigram_pipeline()
def has_file_allowed_extension(filename, extensions): filename_lower = filename.lower() return any((filename_lower.endswith(ext) for ext in extensions))
def create_ast_list(elts, store: bool=False) -> ast.List: return ast.List(ctx=(ast.Store() if store else ast.Load()), elts=elts)
def print_bonds(cgbeads, molecule, partitioning, cgbead_coords, ringatoms, trial=False): logger.debug('Entering print_bonds()') bondlist = [] constlist = [] text = '' if (len(cgbeads) > 1): for i in range(len(cgbeads)): for j in range((i + 1), len(cgbeads)): dist ...
def parse_requirements(requirements): with open(requirements) as f: return [l.strip('\n') for l in f if (l.strip('\n') and (not l.startswith('#')))]
def _to_bytes(value: Any) -> bytes: if isinstance(value, bytes): return value if isinstance(value, Binary): return value.data return str(value).encode(errors='ignore')
class UpSample(nn.Module): def __init__(self, in_channels, scale_factor, stride=2, kernel_size=3): super(UpSample, self).__init__() self.scale_factor = int(np.log2(scale_factor)) modules_body = [] for i in range(self.scale_factor): modules_body.append(ResidualUpSample(in_...
class GcpDistributedDocker(Compiler): def getName(self) -> str: return 'GcpDistributedDocker' def __init_tf(self): self._log('initializing terraform environment...') mkdir('_tf_scripts') for file in ['_tf_scripts/get-swmtkn', '_tf_scripts/ssh-keygen', 'variables.tf', 'main.tf', '...