code stringlengths 101 5.91M |
|---|
def _blender_get_text_name(filename: str):
if (filename.startswith(os.path.sep) and (filename.count(os.path.sep) == 1)):
return filename[1:]
index = filename.rfind(('.blend' + os.path.sep))
if (index != (- 1)):
return filename[(index + 7):]
return None |
class TFMobileViTForImageClassification(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class HallLittlewood_q(HallLittlewood_generic):
class Element(HallLittlewood_generic.Element):
pass
def __init__(self, hall_littlewood):
HallLittlewood_generic.__init__(self, hall_littlewood)
self._P = self._hall_littlewood.P()
category = sage.categories.all.ModulesWithBasis(self... |
class ConvNextConfig(BackboneConfigMixin, PretrainedConfig):
model_type = 'convnext'
def __init__(self, num_channels=3, patch_size=4, num_stages=4, hidden_sizes=None, depths=None, hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-12, layer_scale_init_value=1e-06, drop_path_rate=0.0, image_size=224, o... |
def reduce(tensor, dst, op=reduce_op.SUM, group=group.WORLD):
assert (torch.distributed._initialized == _INITIALIZED_PG), 'collective only supported in process-group mode'
return torch._C._dist_reduce(tensor, dst, op, group) |
def set_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr |
def tf_efficientnet_lite0(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model |
def add_shortcut(model, prefix, blob_in, dim_in, dim_out, stride):
if (dim_in == dim_out):
return blob_in
c = model.Conv(blob_in, (prefix + '_branch1'), dim_in, dim_out, kernel=1, stride=stride, no_bias=1)
return model.AffineChannel(c, (prefix + '_branch1_bn')) |
def concat_langid(family):
for mode in ['trn', 'dev']:
with open(f'{OUT_DIR}/{family}.{mode}', 'w') as fp:
for lang in sorted(LANGS[family]):
for toks in read_file(f'{IN_DIR}/{family}/{lang}.{mode}'):
print(toks[0], toks[1], f'{lang};{toks[2]}', sep='\t', file... |
class Attri2Vec():
def __init__(self, layer_sizes, generator=None, bias=False, activation='sigmoid', normalize=None, input_dim=None, node_num=None, multiplicity=None):
if ((activation == 'linear') or (activation == 'relu') or (activation == 'sigmoid')):
self.activation = activation
else:... |
class Node():
balance = 0.5
def __init__(self, state, parent, action):
self.state = state
self.parent = parent
self.action = action
self.depth = 0
if (self.parent != None):
self.depth = (parent.depth + 1)
def getChildren(self):
children = []
... |
def fractional_translation(img, p, r=0.125):
if (random.random() < (1 - p)):
return img
tx = np.random.uniform((- r), r)
ty = np.random.uniform((- r), r)
if isinstance(img, PIL.Image.Image):
H = img.size[0]
W = img.size[1]
elif torch.is_tensor(img):
H = img.size()[(- ... |
def read_gt_label(gt_label_path, mapping_dict=None):
df_gt = pd.read_csv(gt_label_path, sep=' ', header=None)
gt = df_gt[0].tolist()
if (mapping_dict is not None):
gt_label = [mapping_dict[i] for i in gt]
gt_label = np.array(gt_label)
n_labels = len(mapping_dict)
else:
(_... |
.parametrize('seed', [313])
.parametrize('shape_a, shape_b', [((1,), (1,)), ((100,), (100,)), ((1, 1), (1, 1)), ((3, 2), (2, 3)), ((2, 3, 1), (1,)), ((1,), (2, 1, 2)), ((2, 3, 2), (2, 2, 2))])
def test_backward_dot_muti_array_out(seed, shape_a, shape_b):
rng = np.random.RandomState(seed)
a = rng.randn(*shape_a)... |
class ConvLSTM(nn.Module):
def __init__(self, inp_dim, oup_dim, kernel, dilation):
super().__init__()
pad_x = int(((dilation * (kernel - 1)) / 2))
self.conv_xf = nn.Conv2d(inp_dim, oup_dim, kernel, padding=pad_x, dilation=dilation)
self.conv_xi = nn.Conv2d(inp_dim, oup_dim, kernel, p... |
def validate_yaml(path: Union[(str, Path)]):
if (not _HAS_YAMALE):
raise RuntimeError('The Yamale library is required for YAML schema validation. You could install it by `pip install muspy[schema]`.')
data = yamale.make_data(str(path))
schema = yamale.make_schema(str(get_yaml_schema_path()))
yam... |
class CLIPScoreMetric(Metric):
def __init__(self, multilingual: bool=False):
self._multilingual: bool = multilingual
def __repr__(self):
return f'CLIPScoreMetric(multilingual={self._multilingual})'
def evaluate_generation(self, adapter_spec: AdapterSpec, request_state: RequestState, metric_s... |
def test_arraytype_8():
text = str(ak.with_parameter(ak.Array([{'x': 1, 'y': 1.1}, {'x': 2, 'y': 2.2}, {'x': 3, 'y': 3.3}]), 'wonky', 'string').type)
parsedtype = ak.types.from_datashape(text, highlevel=False)
assert (str(parsedtype) == text) |
def test_approx_predict_same_clusters():
n_clusters = 5
clusterer = HDBSCAN_flat(X, cluster_selection_method='eom', n_clusters=n_clusters)
(labels_flat, proba_flat) = approximate_predict_flat(clusterer, X_test, n_clusters=None)
n_clusters_out = n_clusters_from_labels(labels_flat)
assert (n_clusters_... |
class TrOCRConfig(PretrainedConfig):
model_type = 'trocr'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers'}
def __init__(self, vocab_size=50265, d_model=1024, decoder_layer... |
def Generator():
down_stack = [downsample_block(64, 4, batch_norm=False, use_config_activation=GENERATOR_ACTIVATION_INDEX[0]), downsample_block(128, 4, use_config_activation=GENERATOR_ACTIVATION_INDEX[1]), downsample_block(256, 4, use_config_activation=GENERATOR_ACTIVATION_INDEX[2]), downsample_block(512, 4, use_co... |
class SquadExample(object):
def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, all_answers=None, start_position=None, end_position=None, switch=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text ... |
def evaluate(config):
os.chdir(config.load_from_checkpoint)
original_overrides = OmegaConf.load(os.path.join(config.load_from_checkpoint, '.hydra/overrides.yaml'))
current_overrides = HydraConfig.get().overrides.task
hydra_config = OmegaConf.load(os.path.join(config.load_from_checkpoint, '.hydra/hydra.y... |
def encode_type_id(b, ext_id):
if (ext_id is not None):
bb = ext_id.encode('UTF-8')
return ((b.upper() + lencode(len(bb))) + bb)
else:
return b |
def pretrain_and_evaluate(args, model, tokenizer, eval_only, model_path):
val_dataset = TextDataset(tokenizer=tokenizer, file_path=args.val_datapath, block_size=tokenizer.max_len)
if eval_only:
train_dataset = val_dataset
else:
logger.info(f'Loading and tokenizing training data is usually sl... |
class BatchNorm(nn.Module):
def __init__(self, out_channels):
super(BatchNorm, self).__init__()
self.batch_norm = nn.BatchNorm2d(num_features=out_channels)
def forward(self, input):
(x, m) = input
x = self.batch_norm(x)
return (x, m) |
def find_factors_UCI():
fname = 'datasets/UCI_processed/OCnodeslinks_chars.txt'
max_nodes = 1901
G_times = UCI_loader.load_temporarl_edgelist(fname, max_nodes=max_nodes)
T = toTensor(G_times, max_nodes)
dim = 3
print('CPD starts')
print(datetime.datetime.now())
factors = apply_parafac(T,... |
class DataParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DATAPARAMETER |
class IndexQuantizationRanker(_object):
__swig_setmethods__ = {}
__setattr__ = (lambda self, name, value: _swig_setattr(self, IndexQuantizationRanker, name, value))
__swig_getmethods__ = {}
__getattr__ = (lambda self, name: _swig_getattr(self, IndexQuantizationRanker, name))
__repr__ = _swig_repr
... |
()
def calculate_pavpu(prediction, label, uncertainty, accuracy_threshold=0.5, uncertainty_threshold=0.2, window_size=3):
accurate_certain = 0.0
inaccurate_certain = 0.0
accurate_uncertain = 0.0
inaccurate_uncertain = 0.0
anchor = (0, 0)
last_anchor = ((prediction.shape[0] - window_size), (predi... |
def test_turn_right_2(env_single_agent):
env = env_single_agent
env.agents[0].x = 4
env.agents[0].y = 25
env.agents[0].dir = Direction.DOWN
env._recalc_grid()
env.step([Action.RIGHT])
assert (env.agents[0].x == 4)
assert (env.agents[0].y == 25)
assert (env.agents[0].dir == Direction.... |
class AST_ArrayAccess(AST_Node):
def __init__(self, context, arrayname, accdims):
AST_Node.__init__(self, context)
self.arrayname = arrayname
self.accdims = accdims
def __repr__(self):
return (((('AST_ArrayAccess(' + str(self.arrayname)) + ', ') + str(self.accdims)) + ')')
de... |
def train(config: dict):
net = ResMLP(dropout=config['dropout'], num_residuals_per_block=config['num_residuals_per_block'], num_blocks=config['num_blocks'], num_classes=config['num_classes'], num_initial_features=512, add_residual=config['add_residual'], add_IC=config['add_IC'])
device = 'cpu'
if torch.cuda... |
class VGG16(Network):
def setup(self):
self.feed('data').conv(3, 3, 64, 1, 1, name='conv1_1').conv(3, 3, 64, 1, 1, name='conv1_2').max_pool(2, 2, 2, 2, name='pool1').conv(3, 3, 128, 1, 1, name='conv2_1').conv(3, 3, 128, 1, 1, name='conv2_2').max_pool(2, 2, 2, 2, name='pool2').conv(3, 3, 256, 1, 1, name='con... |
def CCompiler_show_customization(self):
if 0:
for attrname in ['include_dirs', 'define', 'undef', 'libraries', 'library_dirs', 'rpath', 'link_objects']:
attr = getattr(self, attrname, None)
if (not attr):
continue
log.info(("compiler '%s' is set to %s" % (... |
def load_or_generate_inception_embedding(directory, cache_dir, inception_path):
hash = hashlib.md5(directory.encode('utf-8')).hexdigest()
path = os.path.join(cache_dir, (hash + '.npy'))
if os.path.exists(path):
embeddings = np.load(path)
return embeddings
imgs = load_images_from_dir(dire... |
class CCodeConfig(object):
def __init__(self, emit_linenums=True, emit_code_comments=True, c_line_in_traceback=True):
self.emit_code_comments = emit_code_comments
self.emit_linenums = emit_linenums
self.c_line_in_traceback = c_line_in_traceback |
def _make_text_stream(stream, encoding, errors, force_readable=False, force_writable=False):
if (encoding is None):
encoding = get_best_encoding(stream)
if (errors is None):
errors = 'replace'
return _NonClosingTextIOWrapper(stream, encoding, errors, line_buffering=True, force_readable=force... |
def train(fold=0, data_name='dstc8', model_name='HiGRU+ATTN'):
print('[TRAIN ACTION] JDDC')
dialog_used = 10
data_name = data_name.replace('\r', '')
model_name = model_name.replace('\r', '')
print('dialog used', dialog_used)
name = f'act_{data_name}_{model_name}_{fold}'
print('TRAIN ::', nam... |
_function_dispatch(_is_type_dispatcher)
def iscomplex(x):
ax = asanyarray(x)
if issubclass(ax.dtype.type, _nx.complexfloating):
return (ax.imag != 0)
res = zeros(ax.shape, bool)
return res[()] |
class FunnelModelTester():
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, block_sizes=[1, 1, 2], num_decoder_layers=1, d_model=32, n_head=4, d_head=8, d_inner=37, hidden_act='gelu_new', hidden_dropout=0.1, atten... |
def find_matching_files_in_dir(search_dir, filepattern):
if (type(filepattern) == str):
logging.info(f'Searching for files containing {filepattern} in directory tree at {search_dir}')
filepattern = re.compile((('.*' + re.escape(filepattern)) + '.*'))
else:
logging.info(f'Searching for fi... |
def construct_dialog_fact(example, para_generator, hallu_generator, dataset):
if (example['fact'] == ''):
return None
assert (dataset in ['persona_chat_fact', 'topical_chat_fact'])
if (dataset == 'persona_chat_fact'):
n_fact_sents = np.random.randint(1, 4)
example_fact_sents = sent_t... |
class BaseAgent():
def __init__(self, question: str, key: str, llm: BaseLLM, context_len: int=2000, max_steps: int=10, docstore: Docstore=Wikipedia()) -> None:
self.question = question
self.answer = ''
self.key = key
self.max_steps = max_steps
self.agent_prompt = ''
s... |
class BasicStem(nn.Module):
def __init__(self, in_channels: int=3, out_channels: int=64, norm: str='BN', caffe_maxpool: bool=False):
super().__init__()
self.conv1 = Conv2d(in_channels, out_channels, kernel_size=7, stride=2, padding=3, bias=False, norm=get_norm(norm, out_channels))
self.caffe... |
def get_interactions(x_train, x_test, model, interaction_function):
interactions = interaction_function(model, x_test, baseline=x_train) |
def run_model(model_name, model, tokenizer, input_string, **generator_args):
input_ids = tokenizer.encode(input_string, return_tensors='pt')
input_ids = allocate2gpu(input_ids, model_name)
res = model.generate(input_ids, **generator_args)
return tokenizer.batch_decode(res, skip_special_tokens=True) |
class Pendulum():
def __init__(self, nbJoint=1):
self.viewer = Display()
self.visuals = []
self.model = pin.Model()
self.createPendulum(nbJoint)
self.data = self.model.createData()
self.q0 = zero(self.model.nq)
self.DT = 0.05
self.NDT = 2
self.... |
def LSTMWithAttention(model, decoder_inputs, decoder_input_lengths, initial_decoder_hidden_state, initial_decoder_cell_state, initial_attention_weighted_encoder_context, encoder_output_dim, encoder_outputs, encoder_lengths, decoder_input_dim, decoder_state_dim, scope, attention_type=AttentionType.Regular, outputs_with_... |
def main():
setup_logging()
p = argparse.ArgumentParser(description='')
p.add_argument('--render-only', action='store_true')
p.add_argument('--validate-only', action='store_true')
p.add_argument('--spacetime-only', action='store_true')
p.add_argument('--test-optim', action='store_true')
p.ad... |
def reduce_sum(seq_batch):
weights = tf.ones(shape=tf.shape(seq_batch.mask))
return weighted_sum(seq_batch, weights) |
class Identity(nn.Module):
def __init__(self, c_mid, *args, **kwargs):
super(Identity, self).__init__()
self.out_channels = c_mid
def forward(self, x):
return x |
def test_tac_prepare():
for (linkf, queryf, preparedf) in ((TAC_GOLD_LINKS, TAC_GOLD_QUERIES, TAC_GOLD_COMB), (TAC_SYS_LINKS, TAC_SYS_QUERIES, TAC_SYS_COMB)):
prepared = PrepareTac(linkf, queryf)()
assert (prepared == open(preparedf).read().rstrip('\n')) |
class TestICEWindowService():
TEST_TOKEN_IDS: List[int] = [20123, 21490, 20108, 22581, 20111, 22430, 48828, 20019, 21172, 27993, 20014, 20107, 20125, 20105, 44550, 27193, 22258, 20165, 20101, 20100, 33572, 22661, 20108, 24235, 20011, 28882, 20201, 59599, 30558, 20019, 68731, 20014, 20109, 24853, 20103, 20238, 24878... |
def register_Ns3LteFfrSapProvider_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteFfrSapProvider const &', 'arg0')])
cls.add_method('GetAvailableDlRbg', 'std::vector< bool >', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('GetAvailableUlRbg', 'std::vect... |
def create_env_instance(args, instance, decorr_steps):
instance_seed = (args.seed + instance)
decorr_steps = (None if (decorr_steps is None) else (decorr_steps * instance))
if args.env_name.startswith('retro:'):
env = create_retro_env(args, instance_seed, instance, decorr_steps)
elif args.env_na... |
def run(data_shape: tuple, reshaped_shape: tuple, vec_width=1, queue=None):
ptmodel = Model(reshaped_shape)
x = torch.rand(data_shape)
torch_output = ptmodel(x)
import daceml.onnx as donnx
with dace.library.change_default(donnx.ONNXReshape, 'pure'):
dace_model = DaceModule(ptmodel, auto_opti... |
def __surface_distances(result, reference, voxelspacing=None, connectivity=1):
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
if (voxelspacing is not None):
voxelspacing = _ni_support._normalize_sequence(voxelspacing, result.ndim)
... |
()
def response(request, response_factory):
return response_factory.requests(content_type=request.param) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args,... |
class ProductOfSimplicialSets_finite(ProductOfSimplicialSets, PullbackOfSimplicialSets_finite):
def __init__(self, factors=None):
PullbackOfSimplicialSets_finite.__init__(self, [space.constant_map() for space in factors])
self._factors = tuple([f.domain() for f in self._maps])
def projection_map... |
class ResNet101(nn.Module):
def __init__(self, block, layers, num_classes, phase):
self.inplanes = 64
self.phase = phase
super(ResNet101, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64, affine=affine... |
class SingleStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin, MaskTestMixin):
def __init__(self, backbone, neck=None, bbox_head=None, extra_head=None, train_cfg=None, test_cfg=None, pretrained=None):
super(SingleStageDetector, self).__init__()
self.backbone = builder.build_backbone(backbone)
... |
class capfilt_dataset(Dataset):
def __init__(self, ann_path, transform):
f = open(ann_path, 'r')
self.ann = json.load(f)
print(('loading %s' % len(self.ann)))
self.transform = transform
def __len__(self):
return len(self.ann)
def __getitem__(self, index):
ann ... |
def test():
field_to_content = {'x': {'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': None}}
form = ak.forms.from_dict({'class': 'RecordArray', 'fields': field_to_content.keys(), 'contents': field_to_content.values(), 'parameters': {}, 'form_key': None})
assert ... |
def evaluate_val(df_val):
assert (len(df_val) == len(df_val.index.unique()))
mse = np.mean(np.square((df_val['true_mos'] - df_val['pred_mos'])))
utt_srcc = scipy.stats.spearmanr(df_val['true_mos'], df_val['pred_mos'])[0]
print('CV UTT MSE: {:f}'.format(mse))
print('CV UTT SRCC: {:f}'.format(utt_srcc... |
class NegatableFlag(argparse.Action):
def __init__(self, option_strings, dest, default=False, required=False, help=None):
neg_options = []
for opt in option_strings:
if opt.startswith('--no-'):
raise ValueError('Flags cannot begin with "--no-"')
if opt.startsw... |
class _GreaterThanEq(Constraint):
def __init__(self, lower_bound):
self.lower_bound = lower_bound
def check(self, value):
return (self.lower_bound <= value)
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(lower_bound={})'.format(self.lower_bound)
... |
def get_scope_id_to_label(filename):
scop_id_to_label = {}
with open(filename, 'r') as f:
for l in f:
scop_id = l.split()[0]
label = l.split()[2]
scop_id_to_label[scop_id] = label
return scop_id_to_label |
def get_journal_reference(entry, string=''):
if ('journal_ref' in entry):
return entry['journal_ref']
if ('arxiv_journal_ref' in entry):
return entry['arxiv_journal_ref']
if ('arxiv_doi' in entry):
try:
return get_journal_reference_from_doi(entry['arxiv_doi'], string)
... |
class TypeSpace(SageObject):
def __init__(self, f, p, base_extend=True):
self._p = p
self._f = f
if (f.level() % p):
raise ValueError('p must divide level')
amb = ModularSymbols(self.group(), f.weight())
self.e_space = find_in_space(f, amb, base_extend=base_extend... |
def get_actions(obs):
if ('[Search]' in obs):
avai_actions = {'search': []}
else:
avai_actions = {'click': get_buttons(obs)}
return avai_actions |
_keyword(color='rgbcolor')
(alpha=1, rgbcolor=(0, 0, 1), edgecolor=None, thickness=None, legend_label=None, legend_color=None, aspect_ratio=1.0, fill=True)
def polygon2d(points, **options):
from sage.plot.plot import xydata_from_point_list
from sage.plot.all import Graphics
if (options['thickness'] is None)... |
def _replicatable_module(module, memo=None):
def descendant_modules(module):
gen = module.modules()
next(gen)
return gen
if (not _is_jit_enabled()):
return True
if (memo is None):
memo = set()
memo.add(module)
if _is_script_module(module):
memo.update(... |
def _compute_node_activation_memory(n: BaseNode, node_nbits: int) -> float:
origin_node = _get_origin_activation_node(n)
node_output_size = origin_node.get_total_output_params()
return ((node_output_size * node_nbits) / BITS_TO_BYTES) |
def qfsolve(G):
ret = G.__pari__().qfsolve()
if (ret.type() == 't_COL'):
return vector(QQ, ret)
return ZZ(ret) |
def get_cfg(existing_cfg, _log):
_sanity_check(existing_cfg, _log)
import ntpath, os, ruamel.yaml as yaml
with open(os.path.join(os.path.dirname(__file__), '{}.yml'.format(ntpath.basename(__file__).split('.')[0])), 'r') as stream:
try:
ret = yaml.load(stream, Loader=yaml.Loader)
... |
def construct_icl_examples(examples, k):
np.random.seed(88)
demo_examples = np.random.choice(examples, size=k)
icl_str = ''
for ex in demo_examples:
icl_str += f'''{ex['options'][0]['premise']}{ex['label_list'][ex['label']]}
'''
return icl_str |
class ConformerEncoder(nn.Module):
def __init__(self, input_dim: int=80, encoder_dim: int=512, num_layers: int=17, num_attention_heads: int=8, feed_forward_expansion_factor: int=4, conv_expansion_factor: int=2, input_dropout_p: float=0.1, feed_forward_dropout_p: float=0.1, attention_dropout_p: float=0.1, conv_dropo... |
def get_examples_book(input_dir, output_dir, book_id):
filename = ((input_dir + str(book_id)) + '.xml')
parser = ET.XMLParser(huge_tree=True)
tree = ET.parse(filename, parser=parser)
book = tree.getroot()
b = book.find('.//body')
headers = b.findall('.//header')
start_para_nums = list()
... |
def drop_connect(inputs, p, training):
if (not training):
return inputs
batch_size = inputs.shape[0]
keep_prob = (1 - p)
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
o... |
def preprocess(snakemake_args=(), cores=1, conda_frontend='conda'):
snakefile = (paths.showyourwork().workflow / 'prep.smk')
run_snakemake(snakefile.as_posix(), run_type='preprocess', cores=cores, conda_frontend=conda_frontend, extra_args=snakemake_args, check=True) |
def test_lof_novelty_true():
n_neighbors = 4
rng = np.random.RandomState(0)
X1 = rng.randn(40, 2)
X2 = rng.randn(40, 2)
est_chain = make_pipeline(KNeighborsTransformer(n_neighbors=n_neighbors, mode='distance'), LocalOutlierFactor(metric='precomputed', n_neighbors=n_neighbors, novelty=True, contamina... |
def indent_files(files, diff=False, debug=False, level=0, inplace=False):
output = []
for f in files:
dst = indent(f, debug=debug, level=level)
output.append([f, dst])
if inplace:
for (src, dst) in output:
shutil.copyfile(dst, src)
return True
failed = []
... |
def _layer_norm_fwd(x, weight, bias, eps, residual=None, out_dtype=None, residual_dtype=None, is_rms_norm=False):
if (residual is not None):
residual_dtype = residual.dtype
(M, N) = x.shape
assert (x.stride((- 1)) == 1)
if (residual is not None):
assert (residual.stride((- 1)) == 1)
... |
class TestTabularTransforms(unittest.TestCase):
def test_standardize_transform_class(self):
data = np.array([[(- 0.), 0., (- 2.)], [(- 0.), (- 0.6893588), (- 1.)], [0., 1., 0.], [(- 0.), 0., 1.], [(- 1.), (- 0.), (- 0.)]])
transform = StandardizeTransform(with_mean=True, with_std=True)
trans... |
class DatasetSplit():
def __init__(self, processed_filename, raw_filename, load_function):
if os.path.exists(processed_filename):
print(('Loading preprocessed data from ' + processed_filename))
with open(processed_filename, 'rb') as infile:
self.examples = pickle.load... |
class KR_type_A2(KirillovReshetikhinGenericCrystal):
def module_generator(self):
R = self.weight_lattice_realization()
Lambda = R.fundamental_weights()
r = self.r()
s = self.s()
weight = ((s * Lambda[r]) - (s * Lambda[0]))
if (r == (self.cartan_type().rank() - 1)):
... |
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
return |
class KLConstantSchedule(KLSchedule):
def __init__(self):
pass
def on_train_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
pass
def _anneal_fn(self, epoch: int) -> None:
pass |
def test_stitch_boxes_into_lines():
boxes = [[0, 0, 1, 0, 1, 1, 0, 1], [2, 0.5, 3, 0.5, 3, 1.5, 2, 1.5], [3, 1.2, 4, 1.2, 4, 2.2, 3, 2.2], [5, 0.5, 6, 0.5, 6, 1.5, 5, 1.5], [6, 1.5, 7, 1.25, 7, 1.75, 6, 1.75]]
raw_input = [{'box': boxes[i], 'text': str(i)} for i in range(len(boxes))]
result = stitch_boxes_i... |
def main(hparams):
model = AffWild2VA(hparams)
checkpoint = torch.load(hparams.checkpoint, map_location=(lambda storage, loc: storage))
model.load_state_dict(checkpoint['state_dict'])
print('Loaded pretrained weights')
trainer = Trainer(gpus=hparams.gpus, nb_gpu_nodes=hparams.nodes, distributed_back... |
def execute(chunk: Chunk):
if chunk.is_segmentation:
uniq = fastremap.unique(chunk.array, return_counts=False)
print(f'{len(uniq)} objects with min id {uniq.min()} and max id {uniq.max()}') |
def preprocess(id_file, output_dir):
with open(id_file) as f:
all_ids = json.load(f)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
for split in all_ids.keys():
file_dir = os.path.join(output_dir, split)
if (not os.path.exists(file_dir)):
os.makedirs... |
def cppclasstype(name, base_classes):
return pt.CppClassType(name, None, ('CPP_' + name), base_classes) |
def nvidia_modified():
model = Sequential()
model.add(Conv2D(24, kernel_size=(5, 5), strides=(2, 2), input_shape=(WIDTH, HEIGHT, 1), activation='elu'))
model.add(Conv2D(36, kernel_size=(5, 5), strides=(2, 2), activation='elu'))
model.add(Conv2D(48, kernel_size=(5, 5), strides=(2, 2), activation='elu'))
... |
def resnet_v2_50(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, reuse=None, scope='resnet_v2_50'):
blocks = [resnet_utils.Block('block1', bottleneck, (([(256, 64, 1)] * 2) + [(256, 64, 2)])), resnet_utils.Block('block2', bottleneck, (([(512, 128, 1)] * 3) + [(512, 128, 2)])), resn... |
('data.caltech101', 'class')
class Caltech101(base.ImageTfdsData):
def __init__(self, data_dir=None):
dataset_builder = tfds.builder('caltech101:3.0.1', data_dir=data_dir)
dataset_builder.download_and_prepare()
trainval_count = dataset_builder.info.splits['train'].num_examples
train_... |
class NudityCheckClient():
MODEL_DOWNLOAD_URL: str = '
def __init__(self, cache_config: CacheConfig):
try:
from nudenet import NudeClassifier
except ModuleNotFoundError as e:
handle_module_not_found_error(e, ['heim'])
self.cache = Cache(cache_config)
self.... |
def schechter_vdf(alpha, beta, vd_star, vd_min, vd_max, size=None, resolution=1000):
if (np.ndim(alpha) > 0):
raise NotImplementedError('only scalar alpha is supported')
alpha_prime = ((alpha / beta) - 1)
(x_min, x_max) = (((vd_min / vd_star) ** beta), ((vd_max / vd_star) ** beta))
samples = sch... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.