code stringlengths 101 5.91M |
|---|
class ArikiKoikeAlgebra(Parent, UniqueRepresentation):
def __classcall_private__(cls, r, n, q=None, u=None, R=None):
if (u is None):
if (q is not None):
R = q.parent()
if (R is None):
R = PolynomialRing(ZZ, 'u', r)
u = R.gens()
... |
def test_foldx():
test_dir = Path(__file__).parent.resolve()
test_pdb_asset = (test_dir / './files/1ggx.pdb')
pdb_path = extract_chain(test_pdb_asset, chain='A')
work_dir = (Path(__file__).parent / 'tmp')
work_dir.mkdir(parents=True, exist_ok=True)
(residue_seq, pos_seq) = pdb_to_residues(pdb_pa... |
def prepareClassifier(module, outFeatures):
model = module()
inFeatures = model.fc.in_features
model.fc = torch.nn.Linear(inFeatures, outFeatures)
return model |
def rogue2_bleu(gt, pred):
tokens = nltk.word_tokenize(gt)
bigramgt = set(nltk.bigrams(tokens))
tokens = nltk.word_tokenize(pred)
bigrampred = set(nltk.bigrams(tokens))
return ((len(bigramgt.intersection(bigrampred)) / (len(bigramgt) * 1.0)), (len(bigramgt.intersection(bigrampred)) / (len(bigrampred... |
class physicalvolume(geomobject):
def __init__(self, g, n, v):
self.geom = g
self.n = n
self.volumes = v
def getvolumes(self):
return [self.geom.d3[x] for x in self.volumes] |
.experimental
.parametrize('dataset', [pytest.param('simple_dataframe_array'), pytest.param('simple_dataframe_array_pandas')])
def test_array_columns(dataset, request):
simple_dataframe_array = request.getfixturevalue(dataset)
generator = SequenceGenerator(groupby_column=['user_id'], transform_columns=['item_id... |
class RandomWindow(FixedWindow):
def __init__(self, low=None, high=None, windowlen=None, **kwargs):
super().__init__(windowlen=windowlen, **kwargs)
self.low = low
self.high = high
if (high is not None):
if (low is None):
low = 0
if (windowlen i... |
.openapi_version('3.0')
.operations('success', 'text')
def test_conditional(testdir, app_schema, openapi3_base_url):
if (sys.version_info < (3, 9)):
dec1 = '\nauth = schema.auth()\_to(method="GET", path="/text")'
dec2 = '\nauth = schema.auth()\_to(method="GET", path="/success")'
else:
de... |
def process_variant(variant):
rl_variant = variant['rl_variant']
if args.debug:
rl_variant['algo_kwargs']['base_kwargs']['num_epochs'] = 4
rl_variant['algo_kwargs']['base_kwargs']['batch_size'] = 128
rl_variant['vis_kwargs']['num_samples_for_video'] = 2
rl_variant['vae_wrapped_en... |
_level_function()
def categories(array, highlevel=True, *, behavior=None, attrs=None):
(yield (array,))
return _impl(array, highlevel, behavior, attrs) |
_sentencepiece
_tokenizers
class FNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = FNetTokenizer
rust_tokenizer_class = FNetTokenizerFast
test_rust_tokenizer = True
test_sentencepiece = True
test_sentencepiece_ignore_case = True
test_seq2seq = False
def setUp(s... |
def get_version(version):
if re.match('^\\d+\\.\\d+$', version):
return (version + '.0')
return version |
class ViewNode(ScheduleTreeNode):
target: str
source: str
memlet: Memlet
src_desc: data.Data
view_desc: data.Data
def as_string(self, indent: int=0):
return ((indent * INDENTATION) + f'{self.target} = view {self.memlet} as {self.view_desc.shape}') |
def torch_attack_serializer():
prefix = '__serialized_torch_attack_test_dir'
serializer = SkorchSerializer(torch_attack_model_fn, prefix)
(yield serializer)
shutil.rmtree(prefix) |
def build_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir')
parser.add_argument('--min_size', type=int, default=None)
parser.add_argument('--max_size', type=int, default=None)
parser.add_argument('--ft_name', default='pool1_output')
return parser |
def format_end2end_prompt(q, ans, info=False):
if info:
prompt = 'Q: {0}\nA: {1}\nHelpful:'.format(q, ans)
else:
prompt = 'Q: {0}\nA: {1}\nTrue:'.format(q, ans)
return prompt |
def wald_pdf(x):
if (x > 0):
return (math.exp(((- ((x - 1) ** 2)) / (2 * x))) / math.sqrt((x ** 3)))
return 0.0 |
def test_itruediv():
value = 42
copy = proxy = tt.ObjectProxy(value)
value /= 2
proxy /= 2
assert (value == proxy)
assert (int in tt.UsageTraceNode.from_proxy(copy).children['__itruediv__'].arg_types[0]) |
class PIDLockFile(LockBase):
def __init__(self, path, threaded=False, timeout=None):
LockBase.__init__(self, path, False, timeout)
self.unique_name = self.path
def read_pid(self):
return read_pid_from_pidfile(self.path)
def is_locked(self):
return os.path.exists(self.path)
... |
def train(cfg, output_dir=''):
logger = logging.getLogger('fastmvsnet.trainer')
set_random_seed(cfg.RNG_SEED)
(model, loss_fn, metric_fn) = build_model(cfg)
logger.info('Build model:\n{}'.format(str(model)))
model = nn.DataParallel(model).cuda()
optimizer = build_optimizer(cfg, model)
schedu... |
class DepthwiseDenseAffineQuantize(Model):
def __init__(self, output_shape=None, *, input_shape=None, input_point_size=0, depth_size=0, quantize=True, weight_bits=8, output_bits=16, input_bits=0, weight_scale=(1 / (1 << 8)), output_scale=(1 / (1 << 8)), input_scale=(1 / (1 << 8)), initialize_std=0.01, initializer='... |
class LambdaLR(_LRScheduler):
def __init__(self, optimizer, lr_lambda, last_epoch=(- 1)):
self.optimizer = optimizer
if ((not isinstance(lr_lambda, list)) and (not isinstance(lr_lambda, tuple))):
self.lr_lambdas = ([lr_lambda] * len(optimizer.param_groups))
else:
if (... |
class MLDocParser():
def __call__(self, file_path: str):
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
(label, sentence) = line.strip().split('\t')
sentence = re.sub('\\u3000+', '\u3000', sentence)
sentence = re.sub(' +', ' ', se... |
class A000124(SloaneSequence):
def __init__(self):
SloaneSequence.__init__(self, offset=0)
def _repr_(self):
return "Central polygonal numbers (the Lazy Caterer's sequence): n(n+1)/2 + 1."
def _eval(self, n):
return ZZ((((n * (n + 1)) // 2) + 1)) |
def visualization_opts(parser):
group = parser.add_argument_group('Visualization options')
group.add_argument('--im-or-file', type=str, required=True, help='Name of the image or list of images in file to be visualized')
group.add_argument('--is-type-file', action='store_true', default=False, help='Is it a f... |
_start_docstrings('\n PoolFormer Model transformer with an image classification head on top\n ', POOLFORMER_START_DOCSTRING)
class PoolFormerForImageClassification(PoolFormerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.... |
def plot_roundabout():
(fig, axes) = plt.subplots(1, 8, figsize=((8 * 3.5), 3.5))
plot_uniform(axes[0], 'radius', 20.0, 40.0)
plot_categorical(axes[1], 'num_roads', [0.25, 0.25, 0.25, 0.25], [2, 3, 4, 5])
plot_categorical(axes[2], 'num_lanes', [0.5, 0.5], [1, 2])
plot_normal(axes[3], 'angle_offset',... |
def get_correct(ngrams_ref, ngrams_test, correct, total):
for rank in ngrams_test:
for chain in ngrams_test[rank]:
total[rank] += ngrams_test[rank][chain]
if (chain in ngrams_ref[rank]):
correct[rank] += min(ngrams_test[rank][chain], ngrams_ref[rank][chain])
retur... |
def get_train_val_loader(train_year: Union[(str, int)], valid_year: Union[(str, int)], split: int, batch_size: int, root: str=C.ROOT, num_workers: Optional[int]=None) -> Tuple[(Any, Any)]:
label_dir_name = f'{train_year}-{valid_year}-split{split}'
iqon_outfits = IQONOutfits(train_year=train_year, valid_year=val... |
def glorot_uniform_unit(tensor, scale=1):
size = tensor.size()
if (len(size) == 2):
fan_in = size[0]
fan_out = size[1]
elif (len(size) == 3):
fan_in = size[1]
fan_out = size[2]
else:
raise Exception('Shape not supported')
bound = np.sqrt((6.0 / (fan_in + fan_o... |
class DCN_TraDeS(DCNv2):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, deformable_groups=1):
super(DCN_TraDeS, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, deformable_groups)
def forward(self, input_feat, offset, mask):
... |
def _run_in_process(target, *args, **kwargs):
process = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
process.daemon = True
try:
process.start()
process.join(timeout=10)
return process.exitcode
finally:
if process.is_alive():
process.termina... |
class ImageDataset(Dataset):
def __init__(self, dataset, transform=None):
self.dataset = dataset
self.transform = transform
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
(img, (pid, camid)) = self.dataset[index]
if (self.transform is not No... |
def make_cost_matrix(profit_matrix, inversion_function):
cost_matrix = []
for row in profit_matrix:
cost_matrix.append([inversion_function(value) for value in row])
return cost_matrix |
def valid_boundary(x, with_score=True):
num = len(x)
if (num < 8):
return False
if (((num % 2) == 0) and (not with_score)):
return True
if (((num % 2) == 1) and with_score):
return True
return False |
def search_core_object(object_name, dtypes):
core_object_name = make_core_object_name(object_name, dtypes)
return _core_object_dict[core_object_name] |
def equal():
return (lambda intrvl1, intrvl2: ((intrvl1['t1'] == intrvl2['t1']) and (intrvl1['t2'] == intrvl2['t2']))) |
class Dirichlet(object):
def __init__(self, gamma):
assert ((np.all(gamma) >= 0) and (gamma.shape[(- 1)] >= 1))
self.gamma = gamma
def log_probability(self, x):
assert (np.allclose(x.sum(axis=(- 1)), 1.0) and (np.amin(x) >= 0.0))
return ((gammaln(self.gamma.sum()) - gammaln(self.... |
def main(config):
logger = config.get_logger('test')
data_loader = getattr(module_data, config['data_loader']['type'])(config['data_loader']['args']['data_dir'], batch_size=512, shuffle=False, validation_split=0.0, training=False, num_workers=2).split_validation()
model = config.initialize('arch', module_ar... |
def read_run_separate_aggregate(pred_dir: str, aggregation='interleave', scores='ranks'):
if ((aggregation == 'overlap_scores') or (aggregation == 'mean_scores')):
scores = 'scores'
run = read_run_separate(pred_dir, scores)
if (aggregation == 'overlap_docs'):
print('aggregate overlapping doc... |
class Model(LinearSeq):
def __init__(self, user_size, item_size, size, batch_size, learning_rate, learning_rate_decay_factor, user_attributes=None, item_attributes=None, item_ind2logit_ind=None, logit_ind2item_ind=None, n_input_items=0, loss_function='ce', logit_size_test=None, dropout=1.0, top_N_items=100, use_sep... |
def final_cleanup_and_write(filename, res):
orig = res
res = [x for x in res if (('\t' not in x[0]) and ('\t' not in x[1]) and (len(x[0]) >= 2) and (len(x[1]) >= 2))]
print('Length before levenshtein: ', len(res))
res = [x for x in res if (jamo_levenshtein(x[0], x[1]) > 10)]
print('Length after leve... |
class SwedishStemmer(_ScandinavianStemmer):
__vowels = 'aeiouyaao'
__s_ending = 'bcdfghjklmnoprtvy'
__step1_suffixes = ('heterna', 'hetens', 'heter', 'heten', 'anden', 'arnas', 'ernas', 'ornas', 'andes', 'andet', 'arens', 'arna', 'erna', 'orna', 'ande', 'arne', 'aste', 'aren', 'ades', 'erns', 'ade', 'are', ... |
def extract_db_history(dialog_id, turn_id) -> List[str]:
db_dialog_history = []
if (turn_id > 0):
previous_turns = list(dialog_db_collection.find({'dialog_id': dialog_id, 'turn_id': {'$lt': turn_id}}, {'turn_id': 1, 'system_name': 1, 'user_utterance': 1, 'agent_utterance': 1}).sort('turn_id', pymongo.AS... |
def validate_ie_pps(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(pps.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
... |
class ConcatDataset(FairseqDataset):
def cumsum(sequence, sample_ratios):
(r, s) = ([], 0)
for (e, ratio) in zip(sequence, sample_ratios):
curr_len = int((ratio * len(e)))
r.append((curr_len + s))
s += curr_len
return r
def __init__(self, datasets, sam... |
def tosa_to_llvm(tosa_mlir: str, objfile: str):
cmd = ['mlir-opt', tosa_mlir]
lower_param = '--pass-pipeline="builtin.module(func.func(tosa-to-linalg-named, tosa-to-linalg, tosa-to-arith, tosa-to-tensor, tosa-to-scf), convert-tensor-to-linalg, func.func(canonicalize, linalg-bufferize, convert-linalg-to-affine-l... |
def _getdatatransformswm(is_imgnet32=False):
if is_imgnet32:
transform_wm = transforms.Compose([transforms.CenterCrop(32), transforms.ToTensor(), transforms.Normalize((0.4811, 0.4575, 0.4079), (0.2604, 0.2532, 0.2682))])
else:
transform_wm = transforms.Compose([transforms.CenterCrop(32), transfo... |
def test_method_statement_eq(default_test_case, method_mock):
var1 = vr.VariableReference(default_test_case, default_test_case.test_cluster.type_system.convert_type_hint(MagicMock))
var2 = vr.VariableReference(default_test_case, default_test_case.test_cluster.type_system.convert_type_hint(MagicMock))
args =... |
class Stage2Hparams():
embed_dim: int = 1536
n_layers: int = 42
n_heads: int = 24
n_dense_layers: int = 42
ctx_len_img: int = 256
ctx_len_txt: int = 64
embd_pdrop: float = 0.0
resid_pdrop: float = 0.0
attn_pdrop: float = 0.0
mlp_bias: bool = True
attn_bias: bool = True
ge... |
class Scorer(object):
def keys(self) -> Set[str]:
raise NotImplementedError
def default_scores(self) -> Dict[(str, float)]:
return {key: 0.0 for key in self.keys()}
def score_single_ref(self, context: str, questions: List[str], answers: List[str], predictions: List[str], probabilities: List[... |
def init_backend(backend, *args, **kwargs):
return backend.value.init_backend_handler(*args, **kwargs) |
class BaseEigModelScheme(BaseAdjModelScheme):
def get_default_config(self):
config_dict = super().get_default_config()
config_dict.update(model_name='dc_eig', cache_dir=HDict.L('c:f"data_cache/{c.dataset_name.upper()}/eig_{c.num_eig_features}"'), num_eig_features=20, sel_eig_features=8, use_eig=True... |
def compute_discrete_imitation_loss(policy: CategoricalPolicy, x: TorchObservation, action: torch.Tensor, beta: float) -> torch.Tensor:
dist = policy(x)
penalty = (dist.logits ** 2).mean()
log_probs = F.log_softmax(dist.logits, dim=1)
return (F.nll_loss(log_probs, action.view((- 1))) + (beta * penalty)) |
def download_image_from_url_val(url):
basename = os.path.basename(url)
filename = os.path.join(storage_dir, 'val', basename)
download_file(url, filename) |
class InterpolationBlock(nn.Module):
def __init__(self, in_channel, output_dim, out_channel=None, activate=True):
super(InterpolationBlock, self).__init__()
self.activate = activate
out_channel = (out_channel if (out_channel is not None) else in_channel)
self.block = nn.Sequential(nn... |
class TestScaledGaussianMixture():
def test_init(self):
pass
def test_fit(self):
pass
def test_transform(self):
pass |
class dummy_context_mgr():
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False |
def check_entities_equal(doc, expected):
assert (len(doc.ents) == len(expected))
for (doc_entity, expected_entity) in zip(doc.ents, expected):
for k in expected_entity:
assert (getattr(doc_entity, k) == expected_entity[k]) |
def get_json_from_tarfile():
def _get_json_from_tarfile(archive_data_path, json_name):
with tarfile.open(archive_data_path, 'r:gz', encoding='utf-8') as archive:
json_file = archive.extractfile(archive.getmember(json_name)).read().decode('utf8')
return json.loads(json_file)
return _g... |
def flatten_shape(shape):
if (len(shape) == 1):
return ()
else:
return (((shape[0] * shape[1]),) + shape[2:]) |
def train_pinsage(model, device, loader, optimizer, weight_decay, config_dict):
model.train()
loss_accum = 0
for (step, batch) in enumerate(tqdm(loader, desc='Iteration')):
(user, item, item_neg) = batch
(user, item, item_neg) = (user.to(device), item.to(device), item_neg.to(device))
... |
def args_sanity_check(config, _log):
if (config['use_cuda'] and (not th.cuda.is_available())):
config['use_cuda'] = False
_log.warning('CUDA flag use_cuda was switched OFF automatically because no CUDA devices are available!')
assert (((config['run_mode'] in ['parallel_subproc']) and config['use... |
class AttentionWeightComputation(Function):
def forward(ctx, query_batch_cnt: torch.Tensor, key_batch_cnt: torch.Tensor, index_pair_batch: torch.Tensor, index_pair: torch.Tensor, query_features: torch.Tensor, key_features: torch.Tensor):
assert query_batch_cnt.is_contiguous()
assert key_batch_cnt.is... |
class Paraphraser():
def __init__(self, batch_size=16, max_length=128, num_return_sequences=[20, 20], beam_size=30):
self.batch_size = batch_size
self.num_return_sequences = num_return_sequences
self.beam_size = beam_size
self.max_length = max_length
self.ranker = ParaphraseR... |
def get_train_val_indices(train_dataset, val_split=0.2):
train_classes = np.unique(train_dataset.target)
train_idxs = []
val_idxs = []
for cls in train_classes:
cls_idxs = np.where((train_dataset.target == cls))[0]
v_ = np.random.choice(cls_idxs, replace=False, size=(int((val_split * len... |
def register_namespace_handler(importer_type, namespace_handler):
_namespace_handlers[importer_type] = namespace_handler |
_args('v', 'f', 'i', 'v', 'v', 'v', 'v')
def full_like(g, input, fill_value, dtype, layout, device, pin_memory=False, memory_format=None):
shape = g.op('Shape', input)
return _constant_fill(g, shape, dtype, fill_value) |
class NumpyOutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
ret = doctest.OutputChecker.check_output(self, want, got, optionflags)
if (not ret):
if ('#random' in want):
return True
got = got.replace("'>", "'<")
... |
class PlanePartitions_CSSCPP(PlanePartitions):
def __init__(self, box_size):
if ((box_size[0] != box_size[1]) or (box_size[1] != box_size[2])):
raise ValueError('x, y, and z dimensions ({},{},{}) must all be equal'.format(*box_size))
if ((box_size[0] % 2) == 1):
raise ValueEr... |
def pytest_configure(config):
generate = config.getoption('generate', default=False)
output = config.getoption('output', default=serial.DATA_DIR)
disable = config.getoption('disable', default=False)
disable_coverage = config.getoption('disable_coverage', default=False)
serial._output_context.__setat... |
def safeMakeDirs(dir):
if (not os.path.exists(dir)):
try:
os.makedirs(dir)
except:
print('Failed to make dirs at {}'.format(dir)) |
class ClassDataset(object):
def __init__(self, meta_train=False, meta_val=False, meta_test=False, meta_split=None, class_augmentations=None):
if (((meta_train + meta_val) + meta_test) == 0):
if (meta_split is None):
raise ValueError('The meta-split is undefined. Use either the ar... |
def get_dataset_names() -> List[str]:
module_path = dirname(__file__)
files = os.listdir(f'{module_path}/data')
csv_files = list(filter((lambda x: x.endswith('.csv')), files))
datasets = list(map((lambda f: os.path.splitext(f)[0]), csv_files))
return datasets |
def _step(state: State, action: Array) -> State:
state = state.replace(_board=state._board.at[action].set(state._turn))
won = _win_check(state._board, state._turn)
reward = jax.lax.cond(won, (lambda : jnp.float32([(- 1), (- 1)]).at[state.current_player].set(1)), (lambda : jnp.zeros(2, jnp.float32)))
ret... |
def conv(x, *args, pad=1, **kwargs):
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], padding='VALID'):
x = padding(x, pad)
return slim.conv2d(x, *args, **kwargs) |
def main(input_file, output_file, prob_lst, fnc):
main_prompt_lst = []
with open(input_file) as in_f:
input_prompts = in_f.readlines()
counter = 0
for prompt in input_prompts:
new_prompt_lst = synthetic_noise_main(prompt.strip('\n'), prob_lst, fnc)
if (new_prompt_lst is not None)... |
def catalan_number(n):
n = ZZ(n)
if (n < (- 1)):
return ZZ.zero()
if (n == (- 1)):
return QQ(((- 1), 2))
return (2 * n).binomial(n).divide_knowing_divisible_by((n + 1)) |
def max_pool(x, ksize=2, stride=2):
return tf.nn.max_pool(x, ksize=[1, ksize, ksize, 1], strides=[1, stride, stride, 1], padding='SAME') |
class TestXGBoostMultiClassifierModelSaving(TestXGBoostModelSavingBase):
def filename(self):
return 'xgboost_multi_classifier_model'
def test_main(self):
num_class = 4
batch_size = self.batch_size()
feature_size = self.feature_size()
params = {'objective': 'multi:softmax'... |
class MPolynomial_element(MPolynomial):
def __init__(self, parent, x):
CommutativeRingElement.__init__(self, parent)
self.__element = x
def _repr_(self):
return ('%s' % self.__element)
def __call__(self, *x, **kwds):
if (len(kwds) > 0):
f = self.subs(**kwds)
... |
def train(model, reglog, optimizer, loader, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
losses = AverageMeter()
end = time.perf_counter()
model.eval()
reglog.train()
criterion = nn.CrossEntropyLoss().cuda()
for (iter_... |
class PongDuel(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, step_cost=0, reward=1, max_rounds=10):
self._grid_shape = (40, 30)
self.n_agents = 2
self.reward = reward
self._max_rounds = max_rounds
self.action_space = MultiAgentActionSpac... |
class Dimshuffle(object):
def __init__(self, new_axes):
self.new_axes = new_axes
def __call__(self, x):
return x.dimshuffle(self.new_axes) |
def top500_female_dominant(topicsDF):
sortedDF = topicsDF.drop('topicDistribution').filter('sourcesFemaleCount - sourcesMaleCount >= 1').orderBy(f.col('sourcesFemaleCount'), ascending=False).limit(500)
return sortedDF |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--score_file', type=str, default=None)
parser.add_argument('--qrels_file', type=str, default=None)
return parser.parse_args() |
def features_2d():
return np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]) |
class InceptionBackbone(nn.Module):
def __init__(self, input_channels, kss, depth, bottleneck_size, nb_filters, use_residual):
super().__init__()
self.depth = depth
assert ((depth % 3) == 0)
self.use_residual = use_residual
n_ks = (len(kss) + 1)
self.im = nn.ModuleLis... |
class Control(ControlBase):
def __init__(self, statestore):
super().__init__(statestore)
self.name = 'DefaultControl'
def session(self, config):
if (self._state == ReducerState.instructing):
print('Controller already in INSTRUCTING state. A session is in progress.', flush=Tru... |
class BasicDecoderTest(tf.test.TestCase, DecoderTests):
def setUp(self):
tf.test.TestCase.setUp(self)
tf.logging.set_verbosity(tf.logging.INFO)
DecoderTests.__init__(self)
def create_decoder(self, helper, mode):
params = BasicDecoder.default_params()
params['max_decode_le... |
class Rx2Wormhole(LoRaWormhole):
class FrameMeta():
def __init__(self, entry_node: LoRaModule, ts: int, frame: dict):
self.entry_node = entry_node
self.ts = ts
self.frame = frame
self.ts_local = time.time()
class Rx2NodeEventType(Enum):
PREPARE_RX2... |
class SequenceStorageOps():
def storage_dim(a: T.SequenceElement) -> int:
return sum((StorageOps.storage_dim(v) for v in a))
def to_storage(a: T.SequenceElement) -> T.List[T.Scalar]:
return [scalar for v in a for scalar in StorageOps.to_storage(v)]
def from_storage(a: T.SequenceElement, elem... |
class MultiDatasetSampler(Sampler):
def __init__(self, cfg, dataset_dicts, sizes, seed: Optional[int]=None):
self.sizes = sizes
self.sample_epoch_size = cfg.MULTI_DATASET.SAMPLE_EPOCH_SIZE
assert ((self.sample_epoch_size % cfg.SOLVER.IMS_PER_BATCH) == 0), ((self.sample_epoch_size % cfg.SOLVE... |
def sampling(G, cpa, sfunc, null_model):
Gr = null_model(G)
Ar = sparse.csr_matrix(nx.adjacency_matrix(Gr))
cpa.detect(Ar)
q_rand = cpa.qs_
s_rand = sfunc(Ar, cpa.c_, cpa.x_)
return {'q': q_rand, 's': s_rand} |
def parse_scorer_output():
a_scores_file = open(os.path.join(args.out_dir, 'a_scores.txt'), 'w')
b_scores_file = open(os.path.join(args.out_dir, 'b_scores.txt'), 'w')
for i in range(1000):
out_dir = os.path.join(args.out_dir, 'run_{}'.format(i))
conll_file_a = os.path.join(out_dir, 'conll_a_... |
class MinFilter(RankFilter):
name = 'Min'
def __init__(self, size=3):
self.size = size
self.rank = 0 |
class WiderResNetA2(nn.Module):
def __init__(self, structure, norm_act=bnrelu, classes=0, dilation=False, dist_bn=False):
super(WiderResNetA2, self).__init__()
self.dist_bn = dist_bn
nn.Dropout = nn.Dropout2d
norm_act = bnrelu
self.structure = structure
self.dilation ... |
def configuration(parent_package='', top_path=None):
config = Configuration('metrics', parent_package, top_path)
libraries = []
if (os.name == 'posix'):
libraries.append('m')
config.add_extension('_confusion_matrix', sources=['_confusion_matrix.pyx'], include_dirs=[numpy.get_include()], librarie... |
class Attention(nn.Module):
def __init__(self, dim) -> None:
super().__init__()
self.proj_1 = nn.Conv2d(dim, dim, 1)
self.activation = nn.GELU()
self.spatial_gating_unit = LKA(dim)
self.proj_2 = nn.Conv2d(dim, dim, 1)
def forward(self, x: Tensor) -> Tensor:
shortc... |
_model
def convformer_b36_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_b36_in21ft1k']
if pretrained:
state_dict = torch.hub.load_state_dict_from... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.