code stringlengths 101 5.91M |
|---|
def binarize(args, filename, dict, output_prefix, lang, offset, end):
ds = indexed_dataset.IndexedDatasetBuilder(dataset_dest_file(args, output_prefix, lang, 'bin'))
def consumer(tensor):
ds.add_item(tensor)
res = Tokenizer.binarize(filename, dict, consumer, offset=offset, end=end)
ds.finalize(d... |
class TestCythonUtilityLoader(TestTempitaUtilityLoader):
expected = (None, 'test {{cy_loader}} impl')
expected_tempita = (None, 'test CyLoader impl')
required = (None, 'req {{cy_loader}} impl')
required_tempita = (None, 'req CyLoader impl')
context = dict(cy_loader='CyLoader')
name = 'TestCyUtil... |
def backend_of(*objects, default: (D | Sentinel)=UNSET, coerce_to_common: bool=True) -> (Backend | D):
unique_backends = frozenset((b for b in (backend_of_obj(o, default=None) for o in objects) if (b is not None)))
if (len(unique_backends) == 0):
if (default is UNSET):
raise ValueError('coul... |
class RotatedCOCOEvaluator(COCOEvaluator):
def process(self, inputs, outputs):
for (input, output) in zip(inputs, outputs):
prediction = {'image_id': input['image_id']}
if ('instances' in output):
instances = output['instances'].to(self._cpu_device)
pr... |
class _MutantInfo():
mut_num: int
timed_out_by: list[int] = dataclasses.field(default_factory=list)
killed_by: list[int] = dataclasses.field(default_factory=list) |
class sage__rings__finite_rings(JoinFeature):
def __init__(self):
JoinFeature.__init__(self, 'sage.rings.finite_rings', [PythonModule('sage.rings.finite_rings.element_pari_ffelt'), PythonModule('sage.rings.algebraic_closure_finite_field'), sage__libs__pari()], type='standard') |
class solver():
def __init__(self, model, lmdb, optimizer, scheduler, total_epoch, model_path, last_epoch):
self.model = model
print(self.model)
(self.lmdb_train, self.lmdb_test) = lmdb
self.optimizer = optimizer
self.scheduler = scheduler
self.total_epoch = total_epo... |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('p', [0.5])
def test_dropout_grad_dependency(p, seed, ctx, func_name):
from nnabla._dropout_workaround import _get_dropout_mask
atol_f = 0.0001
with nn.context_scope(ctx):
rng = np.random.RandomState(seed)
init_x =... |
def loss_hinge_dis(dis_fake, dis_real):
loss = F.mean(F.relu((1.0 - dis_real)))
loss += F.mean(F.relu((1.0 + dis_fake)))
return loss |
def load_pretrained(identifier, config_file, ckpt_file, root='pretrained', **kwargs):
config_path = os.path.join(root, identifier, config_file)
ckpt_path = os.path.join(root, identifier, ckpt_file)
cfg = OmegaConf.load(config_path)
model_name = cfg['model']['arch']
model = get_model(cfg)
ckpt = ... |
def test_likelihood_with_masking_entire_sequence_skip_gap(msa_sampler, msa_batch_example):
msa_batch_example[0][(- 1)] = 'MTSPDELAAARARIDELDARLVALLAE-'
(seq_prob, pos_probs) = msa_sampler.log_likelihood(msa_batch_example[0], target_index=4, with_masking=True, mask_distance=1, count_gaps=False)
assert (seq_p... |
def extract_cumsum_train_times(loaded, time_units='seconds'):
times = extract_train_epoch_times(loaded)
times = times_to_cumsum_and_units(time_units, times)
return times |
def get_model_instance_segmentation(num_classes):
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
anchor_generator = AnchorGenerator(sizes=tuple([(4, 8, 16, 32, 64, 128, 256, 512) for _ in range(5)]), aspect_ratios=tuple([(0.25, 0.5, 1.0, 2.0) for _ in range(5)]))
model.rpn.ancho... |
class TextVQAAccuracyEvaluator():
def __init__(self):
self.answer_processor = EvalAIAnswerProcessor()
def _compute_answer_scores(self, raw_answers):
answers = [self.answer_processor(a) for a in raw_answers]
assert (len(answers) == 10)
gt_answers = list(enumerate(answers))
... |
def gens_to_basis_matrix(syms, relation_matrix, mod, field, sparse):
from sage.structure.element import is_Matrix
if (not is_Matrix(relation_matrix)):
raise TypeError('relation_matrix must be a matrix')
if (not isinstance(mod, list)):
raise TypeError('mod must be a list')
verbose(str(rel... |
.parametrize('nn_version', ['1.12.0'])
.parametrize('nntxt_idx', [1, 3, 4])
def test_nnp_to_nnp_with_version_supported(nn_version, nntxt_idx):
class Args():
pass
args = Args()
set_default_value(args)
nntxt_str = N_ARRAY[nntxt_idx]
with generate_case_from_nntxt_str(nntxt_str, nnp_file_name(),... |
class DerivedRec(Recommender):
def _init_args(self):
return {}
def _fit(self, dataset: Dataset) -> None:
pass
def _predict(self, dataset: PandasDataFrame, k: int, queries: PandasDataFrame, items: PandasDataFrame, filter_seen_items: bool=True) -> PandasDataFrame:
pass |
def test_override_static():
b = m.MyBase.make()
d1 = m.MyDerived.make2()
d2 = m.MyDerived.make()
assert isinstance(b, m.MyBase)
assert isinstance(d1, m.MyDerived)
assert isinstance(d2, m.MyDerived) |
class TFConvBertPreTrainedModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class LayoutLMv2ForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def _to_sequence_example(image, decoder, vocab):
with tf.gfile.FastGFile(image.filename, 'r') as f:
encoded_image = f.read()
try:
decoder.decode_jpeg(encoded_image)
except (tf.errors.InvalidArgumentError, AssertionError):
print(('Skipping file with invalid JPEG data: %s' % image.file... |
def _seg_49():
return [(64944, 'M', u''), (64945, 'M', u''), (64946, 'M', u''), (64947, 'M', u''), (64948, 'M', u''), (64949, 'M', u''), (64950, 'M', u''), (64951, 'M', u''), (64952, 'M', u''), (64953, 'M', u''), (64954, 'M', u''), (64955, 'M', u''), (64956, 'M', u''), (64957, 'M', u''), (64958, 'M', u''), (64959, ... |
def get_monitor_pos(monitor):
xpos_value = ctypes.c_int(0)
xpos = ctypes.pointer(xpos_value)
ypos_value = ctypes.c_int(0)
ypos = ctypes.pointer(ypos_value)
_glfw.glfwGetMonitorPos(monitor, xpos, ypos)
return (xpos_value.value, ypos_value.value) |
def prepareSlotValuesIndependent():
domains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi', 'hospital', 'police']
requestables = ['phone', 'address', 'postcode', 'reference', 'id']
dic = []
dic_area = []
dic_food = []
dic_price = []
for domain in domains:
try:
fi... |
class DataAnalyzer(DataMixin):
def __init__(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(dash_logger)
def get_stats(df):
stats = {'': OrderedDict({'NO. of Variables': len(df.columns), 'Time Series Length': len(df), 'H... |
def _under_prefix(location):
if ('install' not in sys.argv):
return True
args = sys.argv[(sys.argv.index('install') + 1):]
for (index, arg) in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith(('%s=' % option)):
top_dir = arg.split('root=')[... |
def test_split_model_name():
(lang, package, processor) = prepare_resources.split_model_name('ro_nonstandard_tagger.pt')
assert (lang == 'ro')
assert (package == 'nonstandard')
assert (processor == 'pos')
(lang, package, processor) = prepare_resources.split_model_name('en_ncbi_disease_nertagger.pt')... |
def num_tech_eval(translation: str, target: str, total_trans: int, total_gold: int, correct_trans: int, correct_gold: int, english_term: list):
trans_num = re.findall(num_regex, translation)
gold_num = re.findall(num_regex, target)
trans_english_term = []
gold_english_term = []
for elm in re.findall... |
class SEModule(nn.Module):
REDUCTION = 4
def __init__(self, channel):
super(SEModule, self).__init__()
self.channel = channel
self.reduction = SEModule.REDUCTION
num_mid = make_divisible((self.channel // self.reduction), divisor=8)
self.fc = nn.Sequential(OrderedDict([('r... |
class TryExceptStatNode(StatNode):
child_attrs = ['body', 'except_clauses', 'else_clause']
in_generator = False
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
for except_clause in self.except_clauses:
except_clause.analyse_declarations(env)
if se... |
def get_tokenizer(pretrained_tokenizer: Optional[str], tokenizer_class: Optional[str], vocab_file: str, merges_file: str, special_tokens_dict: Optional[str]) -> PreTrainedTokenizerBase:
tokenizer = None
model_config = None
if ((pretrained_tokenizer is None) and (tokenizer_class is None)):
pretrained... |
def find_positions(tokens: List[str], mask: List[bool]) -> List[int]:
pos = []
for (i, (token, istoken)) in enumerate(zip(tokens, mask)):
if istoken:
pos.append(i)
return pos |
def loss_computation(logits_list, labels, losses, edges=None):
check_logits_losses(logits_list, losses)
loss_list = []
for i in range(len(logits_list)):
logits = logits_list[i]
loss_i = losses['types'][i]
if ((loss_i.__class__.__name__ in ('BCELoss',)) and loss_i.edge_label):
... |
def upsert_cache(gender_cache_col, name, gender):
name = name.lower()
gender_cache_col.update_one({'name': name}, {'$set': {'gender': gender}}, upsert=True)
name = utils.clean_ne(name)
name = utils.remove_accents(name)
gender_cache_col.update_one({'name': name}, {'$set': {'gender': gender}}, upsert=... |
def create_dataset(X, Y, split, dataset_name, input_name, task_name):
return DictDataset(name=dataset_name, split=split, X_dict={input_name: X}, Y_dict={task_name: Y}) |
class Model(nn.Module):
def __init__(self, input_size=1, hidden_size=2, n_layers=1, activation='ReLU', p=0.0):
super(Model, self).__init__()
self.n_layers = n_layers
if (self.n_layers == 1):
self.layers = [nn.Linear(input_size, 1)]
else:
size = (([input_size] ... |
def byte_decode(x: str) -> str:
try:
return bytes([BCHAR_TO_BYTE[bc] for bc in x]).decode('utf-8')
except ValueError:
return '' |
class OptimizeclonesTest(tf.test.TestCase):
def setUp(self):
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(((2 * self._label... |
class ShardDirectorClient():
def __init__(self, *, director_host, director_port, shard_name, tls=True, root_certificate=None, private_key=None, certificate=None) -> None:
self.shard_name = shard_name
director_addr = f'{director_host}:{director_port}'
logger.info(f'Director address: {director... |
def embed_data(train_set, gen_set):
encoder = CNN_Metric(8, 57)
encoder.eval()
train_embed = []
for (batch_idx, x_train) in enumerate(train_set):
z_train = encoder(x_train)
train_embed.append(z_train.data.cpu().numpy())
gen_embed = []
for (batch_idx, x_gen) in enumerate(gen_set):... |
def orthogonal_procrustes(A, B, check_finite=True):
if check_finite:
A = np.asarray_chkfinite(A)
B = np.asarray_chkfinite(B)
else:
A = np.asanyarray(A)
B = np.asanyarray(B)
if (A.ndim != 2):
raise ValueError(('expected ndim to be 2, but observed %s' % A.ndim))
if ... |
def __resolve_dependencies(root_module: _ModuleParseResult, type_inference_strategy: TypeInferenceStrategy, test_cluster: ModuleTestCluster, query_type4py: bool=False) -> None:
parse_results: dict[(str, _ModuleParseResult)] = _ParseResults(query_type4py=query_type4py)
parse_results[root_module.module_name] = ro... |
def register_Ns3MgtReassocRequestHeader_methods(root_module, cls):
cls.add_constructor([param('ns3::MgtReassocRequestHeader const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetCapabilities', ... |
class ParallelContinuousThompsonSampling(SingleModelVectorizedAcquisitionBuilder[HasTrajectorySampler]):
def __init__(self, select_output: Callable[([TensorType], TensorType)]=select_nth_output):
self._select_output = select_output
def __repr__(self) -> str:
return f'ParallelContinuousThompsonSa... |
class LocalSession(Session):
def __init__(self, ws=None):
Session.__init__(self)
self._ws = (ws or workspace.C.Workspace.current)
def _compile_task_group(cls, task_group, setup_net_list=None):
with Cluster():
task = task_group.to_task()
plan = core.Plan('task_group_pl... |
class DebertaConverter(Converter):
def converted(self) -> Tokenizer:
ot = self.original_tokenizer
vocab = ot.encoder
merges = list(ot.bpe_ranks.keys())
tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix='', end_of_word_suffix='', fuse_unk=Fa... |
def _make_cross_attention_qkv(d, db, input, keys_input, output, num_heads=8, key_dim=64, value_dim=64, ff_init=("variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=%s)" % 1.0)):
d[(output + '_query0')] = {'class': 'linear', 'activation': None, 'with_bias': False, 'from': [input], 'n_out': (nu... |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_fi_alv(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_re... |
class MSC(nn.Module):
def __init__(self, base, scales=None):
super(MSC, self).__init__()
self.base = base
if scales:
self.scales = scales
else:
self.scales = [0.5, 0.75]
def forward(self, x):
logits = self.base(x)
(_, _, H, W) = logits.shap... |
def auc(mask, seg):
try:
return roc_auc_score(((seg.flatten() > 0) * 1.0), mask.flatten())
except:
return None |
def checkpoint_sequential(functions, segments, *inputs):
def run_function(start, end, functions):
def forward(*inputs):
input = inputs[0]
for j in range(start, (end + 1)):
input = functions[j](input)
return input
return forward
if isinstance(fu... |
class LifoQueue(queue.Queue):
def _init(self, _):
self.queue = collections.deque()
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop() |
def build_model(name, num_classes, loss='softmax', pretrained=True, use_gpu=True):
avai_models = list(__model_factory.keys())
if (name not in avai_models):
raise KeyError('Unknown model: {}. Must be one of {}'.format(name, avai_models))
return __model_factory[name](num_classes=num_classes, loss=loss... |
def check_return_X_y(bunch, dataset_func):
X_y_tuple = dataset_func(return_X_y=True)
assert isinstance(X_y_tuple, tuple)
assert (X_y_tuple[0].shape == bunch.data.shape)
assert (X_y_tuple[1].shape == bunch.target.shape) |
def test_is_datetime_type_with_pandas_datetime():
data = pd.to_datetime('2020-01-01')
is_datetime = is_datetime_type(data)
assert is_datetime |
def mean_pool(x, lengths, gpu):
out = torch.FloatTensor(x.size(0), x.size(2)).zero_()
if (gpu >= 0):
out = out.cuda()
for i in range(len(lengths)):
out[i] = torch.mean(x[i][0:lengths[i]], 0)
return out |
def _get_specific(match_parse, basic_ontology, type_, constant):
assert isinstance(match_parse, MatchParse)
assert isinstance(constant, Constant)
packs = []
if (type_.name == 'line'):
label_a = constant.content[0]
label_b = constant.content[(- 1)]
keys_a = match_parse.match_graph... |
def eval_str_list(x, x_type=float):
if (x is None):
return None
if isinstance(x, str):
if (len(x) == 0):
return []
x = ast.literal_eval(x)
try:
return list(map(x_type, x))
except TypeError:
return [x_type(x)] |
def enable_progress_bar():
global _tqdm_active
_tqdm_active = True
hf_hub_utils.enable_progress_bars() |
def test_plane_power_grad():
space = Simspace(TESTDATA, optplan.SimulationSpace(pml_thickness=[0, 0, 0, 0, 0, 0], mesh=optplan.UniformMesh(dx=40), sim_region=optplan.Box3d(center=[0, 0, 0], extents=[80, 80, 80]), eps_bg=optplan.GdsEps(gds='straight_waveguide.gds', mat_stack=optplan.GdsMaterialStack(background=optpl... |
def track_iter_progress(tasks, bar_width=50, file=sys.stdout, **kwargs):
if isinstance(tasks, tuple):
assert (len(tasks) == 2)
assert isinstance(tasks[0], Iterable)
assert isinstance(tasks[1], int)
task_num = tasks[1]
tasks = tasks[0]
elif isinstance(tasks, Iterable):
... |
def find_smaller_factor(num):
sqrt_int = int(math.sqrt(num))
if ((num % sqrt_int) == 0):
return sqrt_int
for factor in range((sqrt_int - 1), 0, (- 1)):
if ((num % factor) == 0):
break
return factor |
def flat_cfg(x):
output = {}
for (k, v) in _flat_cfg(x):
output[k] = v
return output |
class _ChannelSummaryMixin():
def __init__(self, *args: Any, **kwargs: Sequence[Channel]):
channels = kwargs.pop('channels')
super().__init__(*args, **kwargs)
self._channels: list[str] = []
self._samples: list[str] = []
self._modifiers: list[tuple[(str, str)]] = []
se... |
def get_installer(dist):
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
return line.strip()
return '' |
def experiment_training(env, training_agent: experiment.ExperimentTraining, path_body: str) -> None:
if env.attacker_retrain:
(attacker_model, scores, watermark_logit, ground_truth_logit, full_watermark) = training_agent.train_attacker(log_interval=1000)
date = datetime.datetime.today().strftime('%Y... |
class TestDDPG(TfGraphTestCase):
.mujoco_long
def test_ddpg_double_pendulum(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(gym.make('InvertedDoublePendulum-v2'))
policy = ContinuousMLPPolicy(env_spec=env.spec, hidden_sizes=[64, 64], hidden_n... |
def make_analyzer(cfg):
module = '.'.join(['lib.analyzers', cfg.task])
path = os.path.join('lib/analyzers', (cfg.task + '.py'))
analyzer = imp.load_source(module, path).Analyzer()
return analyzer |
class MMDistributedDataParallel(DistributedDataParallel):
def to_kwargs(self, inputs, kwargs, device_id):
return scatter_kwargs(inputs, kwargs, [device_id], dim=self.dim)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def train_... |
class EvalHook(Hook):
rule_map = {'greater': (lambda x, y: (x > y)), 'less': (lambda x, y: (x < y))}
init_value_map = {'greater': (- inf), 'less': inf}
_default_greater_keys = ['acc', 'top', '', 'auc', 'precision', 'mAP', 'mDice', 'mIoU', 'mAcc', 'aAcc']
_default_less_keys = ['loss']
def __init__(se... |
def loadDict(fpr):
data = {}
for line in fpr:
ws = line.strip('\n').split('\t')
data[ws[0]] = ws[1]
return data |
class DiceLoss(nn.Module):
def __init__(self, loss_weight=1.0):
super(DiceLoss, self).__init__()
self.loss_weight = loss_weight
def forward(self, input, target, mask, reduce=True):
batch_size = input.size(0)
input = torch.sigmoid(input)
input = input.contiguous().view(bat... |
class HypLinear(nn.Module):
def __init__(self, in_features, out_features, c, bias=True):
super(HypLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.c = c
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
if... |
class GaussianChannel(Channel):
def __init__(self, var=1):
self.var = var
self.repr_init()
self.sigma = np.sqrt(var)
self.a = (1 / var)
def sample(self, Z):
noise = (self.sigma * np.random.standard_normal(Z.shape))
X = (Z + noise)
return X
def math(sel... |
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
re... |
def compute_video(lst):
(i, video_id, data, label) = lst
feat = [x for x in data]
feat = np.mean(feat, axis=0)
pred = np.argmax(feat)
top1 = ((int(pred) == int(label)) * 1.0)
top5 = ((int(label) in np.argsort((- feat))[:5]) * 1.0)
return [pred, top1, top5, int(label)] |
(scope='module')
def filename_meshes():
meshes = [(data_dir + ('/meshes/elements/%s_2.mesh' % geom)) for geom in ['1_2', '2_3', '2_4', '3_4', '3_8']]
meshes.append((data_dir + '/meshes/2d/special/square_triquad.mesh'))
return meshes |
def run_atax(device_type: dace.dtypes.DeviceType):
(M, N) = sizes['small']
(A, x, y_ref) = init_data(M, N)
if (device_type in {dace.dtypes.DeviceType.CPU, dace.dtypes.DeviceType.GPU}):
sdfg = kernel.to_sdfg()
sdfg = auto_optimize(sdfg, device_type)
y = sdfg(A, x, M=M, N=N)
elif (... |
class score_Parser():
def __init__(self):
parser = argparse.ArgumentParser(description='Eval topx networks.')
parser.add_argument('--debug', type=int, default=(- 1))
self.parser = parser |
def fn(batch):
(X, _, Y, _, _, _) = list(zip(*batch))
X = [mfcc(x[0]).T for x in X]
Y = [torch.tensor(tokenizer.encode(y.lower())) for y in Y]
x_len = torch.tensor([x.shape[0] for x in X])
Mx = max(x_len)
y_len = torch.tensor([len(y) for y in Y])
My = max(y_len)
return {'x': nn.utils.rnn... |
def main(args):
config = parse_args_to_config(args)
emmental.init(log_dir=config['meta_config']['log_path'], config=config)
cmd_msg = ' '.join(sys.argv)
logger.info(f'COMMAND: {cmd_msg}')
write_to_file(f'{emmental.Meta.log_path}/cmd.txt', cmd_msg)
logger.info(f'Config: {emmental.Meta.config}')
... |
def get_concat_2levelmel_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper... |
def usec_to_str(usec: int):
if (usec < 1000):
return ('%dus' % usec)
elif (usec < 1000000):
return ('%.3fms' % (usec / 1000.0))
else:
return ('%.3fs' % (usec / 1000000.0)) |
def _coeff_smooth(lam):
xi = ((1 - (96 * lam)) + ((24 * lam) * sqrt((3 + (144 * lam)))))
omeg = arctan2(sqrt(((144 * lam) - 1)), sqrt(xi))
rho = ((((24 * lam) - 1) - sqrt(xi)) / (24 * lam))
rho = (rho * sqrt((((48 * lam) + ((24 * lam) * sqrt((3 + (144 * lam))))) / xi)))
return (rho, omeg) |
def ptb_detokenizer(string):
string = string.replace(" '", "'")
string = string.replace(' \n', '\n')
string = string.replace('\n ', '\n')
string = string.replace(" n't", "n't")
string = string.replace(' N ', '1 ')
string = string.replace('$ 1', '$1')
string = string.replace('# 1', '#1')
... |
def test_flow_equality():
class NewFlow(flows.Flow):
pos: np.ndarray = flows.np_zero_field(3)
priority: int = flows.constant_field(default=0)
flow = NewFlow(pos=np.array([3, 1, 2]), priority=3)
flow2 = NewFlow(pos=np.array([3, 1, 2]), priority=3)
assert (flow == flow2) |
_model_architecture('transformer_lm', 'transformer_lm')
def base_lm_architecture(args):
if hasattr(args, 'no_tie_adaptive_proj'):
args.no_decoder_final_norm = True
if (args.no_tie_adaptive_proj is False):
args.tie_adaptive_proj = True
if hasattr(args, 'decoder_final_norm'):
a... |
class PreActBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
... |
def batchnorm_5d(data, height, width, name, fix_gamma, eps, momentum):
data = mx.symbol.reshape(data, shape=(0, 0, (- 1), width))
data = mx.sym.BatchNorm(data, name=name, fix_gamma=fix_gamma, eps=eps, momentum=momentum, use_global_stats=cfg.MODEL.DECONVBASELINE.BN_GLOBAL_STATS)
return mx.symbol.reshape(data... |
class PoseHeaderDimensions():
def __init__(self, width: int, height: int, depth: int=0, *args):
self.width = math.ceil(width)
self.height = math.ceil(height)
self.depth = math.ceil(depth)
def read(version: float, reader: BufferReader):
(width, height, depth) = reader.unpack(Const... |
def get_sizes(args):
(relation, entity) = ((- 1), (- 1))
for line in open(args.train_file):
(h, r, t) = list(map(int, line.strip().split('\t')))
relation = max(relation, r)
entity = max(entity, h, t)
return ((relation + 1), (entity + 1)) |
def grid2_width(nx=4, ny=2, width=TEXTWIDTH, large_margin=0.14, small_margin=0.03, sep=0.03, cbar_width=0.06):
left = large_margin
right = large_margin
top = small_margin
bottom = large_margin
panel_size = ((((1.0 - top) - bottom) - ((ny - 1) * sep)) / ny)
height = (width / ((((left + (nx * pane... |
class LearnedTimeDiffusion(nn.Module):
def __init__(self, C_inout, method='spectral'):
super(LearnedTimeDiffusion, self).__init__()
self.C_inout = C_inout
self.diffusion_time = nn.Parameter(torch.Tensor(C_inout))
self.method = method
nn.init.constant_(self.diffusion_time, 0.0... |
def compare_outputs(quote_objects, entities, text):
pos = neg = 0
for entity in entities:
for mention_span in entities[entity][0]:
(start_mention, end_mention) = (mention_span[0][0], mention_span[(- 1)][(- 1)])
for quote in quote_objects:
if (not quote['speaker_in... |
class BatchNorm2dWithId(nn.BatchNorm2d):
_id = count(0)
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True):
super(BatchNorm2dWithId, self).__init__(num_features, eps, momentum, affine, track_running_stats)
self.id = next(self._id)
def forward(sel... |
class InputFeatures(object):
def __init__(self, input_ids, input_mask, segment_ids, label_id, valid_ids=None, label_mask=None, ori_label=None, subword=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.v... |
class MultiObserver(BaseObserver):
def __init__(self, *args, config, vehicle, traffic_manager):
super().__init__(config, vehicle, traffic_manager)
self.obs_members = {'vector': [], 'image': []}
self.obs_spaces = {'vector': None, 'image': None}
self.multi_observer_type = None
... |
def test_nan():
array = ak.Array([1, 2, np.nan, 3, 0, np.nan])
assert (ak.operations.argsort(array).to_list() == [2, 5, 4, 0, 1, 3])
assert (str(ak.operations.sort(array).to_list()) == '[nan, nan, 0.0, 1.0, 2.0, 3.0]') |
def format_data_with_default(training_row: dd.Series, test_row: dd.Series, cat_imputation: str='constant', cat_null_value: Optional[List[Any]]=None, fill_val: str='missing_value', num_imputation: str='mean', num_null_value: Optional[List[Any]]=None, cat_encoding: str='one_hot', variance_threshold: bool=True, variance: ... |
def find_eggs_in_zip(importer, path_item, only=False):
if importer.archive.endswith('.whl'):
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
(yield Distribution.from_filename(path_item, metadata=metadata))
if only:
return
for subitem in metadata.... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.