code stringlengths 101 5.91M |
|---|
class NDCG(object):
def __init__(self):
self._ndcg_numerator = 0.0
self._ndcg_denominator = 0.0
def observe(self, predicted_scores: torch.Tensor, target_relevance: torch.Tensor):
predicted_scores = predicted_scores.detach()
predicted_scores = predicted_scores.unsqueeze(1)
... |
def test_option_numpy_2():
text = '?int64[parameters={"wonky": [1, 2, 3]}]'
parsedtype = ak.types.from_datashape(text, highlevel=False)
assert isinstance(parsedtype, ak.types.OptionType)
assert (str(parsedtype) == text) |
class JaxTrainingMixin():
_data_splitter_cls = DataSplitter
_training_plan_cls = JaxTrainingPlan
_train_runner_cls = TrainRunner
_dsp.dedent
def train(self, max_epochs: (int | None)=None, accelerator: str='auto', devices: ((int | list[int]) | str)='auto', train_size: float=0.9, validation_size: (flo... |
_spec_function('commonsense')
def get_commonsense_spec(dataset: str, method: str) -> RunSpec:
if (dataset == HellaSwagScenario.name):
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.commonsense_scenario.HellaSwagScenario', args={})
elif (dataset == OpenBookQA.name):
scenario_sp... |
class BLEUScorer(object):
def score(self, hypothesis, corpus, n=1):
count = [0, 0, 0, 0]
clip_count = [0, 0, 0, 0]
r = 0
c = 0
weights = [0.25, 0.25, 0.25, 0.25]
for (hyps, refs) in zip(hypothesis, corpus):
hyps = [hyps]
for (idx, hyp) in enume... |
('/get_balance_with_name/', methods=('GET',))
def get_balance_with_name():
web3 = connect_to_geth(app.web3_url, app.consensus)
balance = {}
for addr in app.eth_accounts:
node = {}
caddr = Web3.toChecksumAddress(addr)
node['balance'] = web3.fromWei(web3.eth.get_balance(caddr), 'ether'... |
class AggregationBlock(nn.Module):
def __init__(self, dim_input, num_enc_sab=3, num_outputs=1, dim_hidden=384, dim_feedforward=1024, num_heads=8, ln=False, attention_dropout=0.1, use_efficient_attention=False):
super(AggregationBlock, self).__init__()
self.num_outputs = num_outputs
self.dim_... |
def process_wiki_sections(data):
processed_wiki = {}
for key in data.keys():
wiki = data[key]
processed_wiki[key] = {}
for i in range(4):
sec = wiki[str(i)]
if (i == 0):
sentences = process_sec_0(sec)
else:
sentences = n... |
class TestFiniteDifferenceHvp(TfGraphTestCase):
def test_finite_difference_hvp(self):
policy = HelperPolicy(n_vars=1)
x = policy.get_params()[0]
a_val = np.array([5.0])
a = tf.constant([0.0])
f = (a * (x ** 2))
expected_hessian = (2 * a_val)
vector = np.array(... |
def _global_config(proto, default_context=None):
class GlobalConfig():
pass
config = GlobalConfig()
if (proto is not None):
config.default_context = _context(proto.global_config.default_context)
nn.set_default_context(config.default_context)
else:
config.default_context =... |
def _lang_id(dic: Dictionary, lang: str):
idx = dic.index(lang)
assert (idx != dic.unk_index), 'cannot find language ID for lang {}'.format(lang)
return idx |
class DLNotEqual(ExtendedProofStmt):
def __init__(self, valid_pair, invalid_pair, x, bind=False, simulated=False):
if ((len(valid_pair) != 2) or (len(invalid_pair) != 2)):
raise TypeException('The valid_pair and invalid_pair must be pairs')
self.x = x
(self.alpha, self.beta) = (S... |
def getintegrator(rhs, u0, solver, context):
def func():
return solver.integrate(u0, rhs, params.dt, solver, context)
return func |
def sel_grid_roulette(collection: Sequence[Any]) -> Sequence[Any]:
assert isinstance(collection, containers.Grid)
assert len(collection)
tmp_idx = random.randint(0, (len(collection) - 1))
tmp_ind: IndividualLike = collection[tmp_idx]
bin_coord = collection.index_grid(tmp_ind.features)
bin_pop = ... |
class Column():
ATTRIBUTE_TXT = 'TXT'
ATTRIBUTE_NUM = 'NUM'
ATTRIBUTE_GROUP_BY_ABLE = 'GROUPBY'
def __init__(self, name, natural_name, table=None, attributes=None):
self.name = name
self.natural_name = natural_name
self.table = table
if (attributes is not None):
... |
def measure_model_quality(model, loss_function, X_test, y_test, prev_best_f1=0, with_save=True):
avg_loss = 0
y_pred = []
y_test_for_loss = Variable(torch.LongTensor(y_test))
for (ind, dialog) in tqdm(enumerate(X_test)):
out = forward_pass(model, dialog)
(top_n, top_i) = out.data.topk(1)... |
class FeverOrFNCTermFrequencyFeatureFunction(TermFrequencyFeatureFunction):
def __init__(self, fever_db, fnc_db, lim_unigram=5000):
self.fnc_db = fnc_db
self.fever_db = fever_db
super().__init__(fever_db, lim_unigram)
self.ename = 'evidence'
def bodies(self, data):
ret = ... |
def load_partitions(shards_dir):
log_paths = sorted(list(Path(shards_dir).glob('log_*.json')), key=(lambda x: str(x).split('.')[(- 2)].split('_')[(- 1)]))
partitions = {}
for (i, log_path) in enumerate(log_paths):
print('loading partition {}/{}'.format(i, len(log_paths)))
log = load_json(log... |
def reduce_hamiltonian(ham, retain_explicit=[]):
state = ham.state
new_params = ham.H_params.copy()
(untracked_q, untracked_p) = ([], [])
(new_q, new_p) = ([], [])
(new_qvals, new_pvals) = ([], [])
qp_pairs = state.qp_pairs
for (i, qp_pair) in enumerate(qp_pairs):
(q, p) = qp_pair
... |
class EarlyStopping():
def __init__(self, patience=7, verbose=False, delta=0):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, ... |
def requirements():
return Option('-r', '--requirement', dest='requirements', action='append', default=[], metavar='file', help='Install from the given requirements file. This option can be used multiple times.') |
def test_ppc_init():
adata = synthetic_iid()
(ppc, models_dict) = get_ppc_with_samples(adata, n_samples=42)
assert isinstance(ppc.raw_counts, GCXS)
assert isinstance(ppc.samples_dataset, Dataset)
assert (ppc.n_samples == 42)
assert (ppc.models is models_dict)
assert (ppc.metrics == {})
a... |
class _TransformMod(_TransformHrepresentation):
def __init__(self, inequalities, equations, B, mod):
self.mod = mod
super().__init__(inequalities, equations, B)
def _transform_(self):
from sage.matrix.constructor import matrix
from sage.modules.free_module_element import vector
... |
def make_net():
tops = ['color', 'depth', 'label']
with open('trainval.prototxt', 'w') as f:
f.write(str(fcn('trainval', tops)))
with open('test.prototxt', 'w') as f:
f.write(str(fcn('test', tops))) |
def test_register_invalid_hook_name(dispatcher):
with pytest.raises(TypeError, match="There is no hook with name 'hook'"):
def hook():
pass |
_utils.test(require=ti.extension.quant_basic)
def test_quant_int_full_struct():
qit = ti.types.quant.int(32, True)
x = ti.field(dtype=qit)
bitpack = ti.BitpackedFields(max_num_bits=32)
bitpack.place(x)
ti.root.dense(ti.i, 1).place(bitpack)
x[0] = 15
assert (x[0] == 15)
x[0] = 12
asse... |
def check_bc(existing_schemas):
new_schemas = torch._C._jit_get_all_schemas()
new_schemas += torch._C._jit_get_custom_class_schemas()
new_schema_dict = defaultdict(list)
for s in new_schemas:
new_schema_dict[s.name].append(s)
is_bc = True
broken_ops = []
for existing_schema in existi... |
def loss_entropy_ssl(netC, netC_T, it, iter_l, iter_u, device):
(data, label) = iter_l.__next__()
(data, label) = (data.to(device), label.to(device))
(data_u, _) = iter_u.__next__()
data_u = data_u.to(device)
logit_l = netC(data)
logit_u = netC(data_u)
logit_ut = netC_T(data_u).detach()
... |
def get_local_rank():
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
assert (_LOCAL_PROCESS_GROUP is not None)
return dist.get_rank(group=_LOCAL_PROCESS_GROUP) |
class TestGetPytorchTPC(unittest.TestCase):
def test_get_pytorch_models(self):
tpc = mct.get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
model = mobilenet_v2(pretrained=True)
def rep_data():
(yield [np.random.randn(1, 3, 224, 224)])
(quantized_model, _) = mct.... |
.parametrize('valid_when', [False, True])
def test_ByteMasked(valid_when):
builder = ByteMasked(Numpy('float64', ''), valid_when, '')
subbuilder = builder.append_valid()
subbuilder.append(1.1)
subbuilder = builder.append_null()
subbuilder.append((- 1000))
subbuilder = builder.extend_valid(3)
... |
def update_default(dict_special, dict_default):
for (k, v) in dict_default.items():
if isinstance(v, dict):
if (k not in dict_special):
dict_special[k] = dict()
update_default(dict_special[k], v)
elif (k not in dict_special):
dict_special[k] = v |
def augment_db(db, templates, templates_one_table, sql_components, aug_limit):
count = 1
augment_pairs = []
while ((count < aug_limit) or ((count == (int(aug_limit) + 1)) and (random.random() < ((aug_limit + 1) - count)))):
(sql_gen, question_gen, column_lables) = populate_one(db, templates, templat... |
def load_who_dataset():
module_path = dirname(__file__)
target_filename = join(module_path, 'data', 'who_dataset.csv')
pd = check_pandas_support('load_who_dataset')
raw_data = pd.read_csv(target_filename)
with open(join(module_path, 'descr', 'who_dataset.rst')) as rst_file:
fdescr = rst_file... |
class TensorRef2D_(ctypes.Structure):
_fields_ = [('ptr', ctypes.c_void_p), ('stride', ctypes.c_int)] |
def do_delete_command(service: RemoteService, auth: Authentication, args):
account = service.delete_account(auth, args.api_key)
hlog('Deleted account:')
header = render_header(show_model_groups=args.show_model_groups)
item = render_account(account)
print_item(header, item) |
(10, 1, exceptions=CookieGenException)
def get_cookies():
(tid, c, w) = get_tid_and_c(POST_URL)
r_tid = parse.quote_plus(tid)
inrarnate_url = INRARNATE_URL.format(r_tid, w, c, format(random.random(), '.17f'))
cookies = {'tid': ((tid + '__') + c)}
resp = requests.get(inrarnate_url, headers=headers, c... |
def main(args=None):
if (args is None):
args = sys.argv[1:]
args = parse_args(args)
check_keras_version()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
keras.backend.tensorflow_backend.set_session(get_session())
if ((args.save_path is not None) and (not os.path.exist... |
def get_src_words(src_indices, index2str):
words = []
raw_words = (index2str[i] for i in src_indices)
words = takewhile((lambda w: (w != onmt.IO.PAD_WORD)), raw_words)
return ' '.join(words) |
def get_full_repo_name(model_id: str, organization: Optional[str]=None, token: Optional[str]=None):
if (token is None):
token = HfFolder.get_token()
if (organization is None):
username = whoami(token)['name']
return f'{username}/{model_id}'
else:
return f'{organization}/{mode... |
class AprilTagCodes():
t16h5 = [8987, 11941, 13418, 17849, 31142, 32619, 45912, 59205, 65113, 5485, 14347, 61611, 3460, 18230, 35954, 44816, 2364, 37812, 42243, 18063, 57655, 22421, 57154, 7197, 59868, 29613, 44383, 54576, 1994, 44846]
t25h7 = [4945677, , , , , , , 2895789, , , , , 2342890, , , , 8722037, , 722... |
_start_docstrings(VISION_TEXT_DUAL_ENCODER_START_DOCSTRING)
class FlaxVisionTextDualEncoderModel(FlaxPreTrainedModel):
config_class = VisionTextDualEncoderConfig
module_class = FlaxVisionTextDualEncoderModule
def __init__(self, config: VisionTextDualEncoderConfig, input_shape: Optional[Tuple]=None, seed: in... |
def prepare_data(x, label):
if isinstance(label, dict):
(e, t) = (label['e'], label['t'])
sort_idx = np.argsort(t)[::(- 1)]
x = x[sort_idx]
e = e[sort_idx]
t = t[sort_idx]
return (x, e, t) |
def test_isotonic_regression_output_predict():
pd = pytest.importorskip('pandas')
(X, y) = make_regression(n_samples=10, n_features=1, random_state=42)
regressor = IsotonicRegression()
with sklearn.config_context(transform_output='pandas'):
regressor.fit(X, y)
X_trans = regressor.transfo... |
class ANSI():
_bold = '\x1b[1m'
_red = '\x1b[31m'
_gray = '\x1b[90m'
_reset = '\x1b[0m'
def bold(cls, s):
return '{}{}{}'.format(cls._bold, s, cls._reset)
def red(cls, s):
return '{}{}{}'.format((cls._bold + cls._red), s, cls._reset)
def gray(cls, s):
return '{}{}{}'.... |
class get_pybind_include(object):
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user) |
class MixtureTable(Module):
def __init__(self, dim=1):
super(MixtureTable, self).__init__()
self.dim = dim
self.size = torch.Size()
self.size2 = torch.Size()
self.batchSize = 0
self.backwardSetup = False
self.gradInput = []
self._gaterView = None
... |
def visualize_pos(writer, pos_maps, iteration):
global num_vis_pos
num_vis_pos += 1
stage = 'valid'
for i in range(len(pos_maps)):
pos_map = pos_maps[i]
if isinstance(pos_map, tuple):
num_pos = 2
else:
num_pos = 1
for j in range(num_pos):
... |
class Particle():
def __init__(self, pose, weight):
self.pose = pose
self.w = weight
self.mapMu = []
self.mapID = []
self.mapSigma = []
self.hashMap = {}
def print(self):
print('pose: ', self.pose, ' weight: ', self.w)
def printMap(self):
print... |
def _izip_records(seqarrays, fill_value=None, flatten=True):
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
if (sys.version_info[0] >= 3):
zip_longest = itertools.zip_longest
else:
zip_longest = itertools.izip_longest
for tup in zip_longest(*seqa... |
def pil2cv(image):
new_image = np.array(image, dtype=np.uint8)
if (new_image.ndim == 2):
pass
elif (new_image.shape[2] == 3):
new_image = new_image
elif (new_image.shape[2] == 4):
new_image = cv2.cvtColor(new_image, cv2.COLOR_RGBA2BGRA)
return new_image |
def get_model(method, data_conf, instance_seg=True, embedded_dim=16, direction_pred=True, angle_class=36):
if (method == 'lidar2map'):
model = LiDAR2Map(data_conf, instance_seg=instance_seg, embedded_dim=embedded_dim, direction_pred=direction_pred, direction_dim=angle_class)
elif (method == 'lift_splat'... |
class BuiltinMethod(_BuiltinOverride):
def declare_in_type(self, self_type):
(method_type, sig) = (self.func_type, self.sig)
if (method_type is None):
self_arg = PyrexTypes.CFuncTypeArg('', self_type, None)
self_arg.not_none = True
self_arg.accept_builtin_subtypes... |
def parse_tdp_config(s):
s = s.replace(' ', '').replace('\t', '')
return [('--*.tdp.%s' % l.strip()) for l in s.splitlines() if l.strip()] |
def uri_to_iri(uri, charset='utf-8', errors='werkzeug.url_quote'):
if isinstance(uri, tuple):
uri = url_unparse(uri)
uri = url_parse(to_unicode(uri, charset))
path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)
query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)
fragmen... |
class GoogleSheetsEvaluator(BaseEvaluator):
_exception(backoff.constant, APIError, jitter=None, interval=2, on_backoff=on_error)
def __init__(self, generator):
super().__init__(generator)
api_json = os.environ.get('GOOGLE_CLOUD_CREDENTIAL')
test_sheet_key = '1dgsg17hqRHkrJnKvWQyFwinMJNrs... |
class InstallRequirement(object):
def __init__(self, req, comes_from, editable=False, link=None, markers=None, use_pep517=None, isolated=False, install_options=None, global_options=None, hash_options=None, constraint=False, extras=(), user_supplied=False):
assert ((req is None) or isinstance(req, Requiremen... |
def get_minibatch(doc_iter, size, pos_class=positive_class):
data = [('{title}\n\n{body}'.format(**doc), (pos_class in doc['topics'])) for doc in itertools.islice(doc_iter, size) if doc['topics']]
if (not len(data)):
return (np.asarray([], dtype=int), np.asarray([], dtype=int))
(X_text, y) = zip(*da... |
class VqApcLayer(nn.Module):
def __init__(self, input_size, codebook_size, code_dim, gumbel_temperature):
super(VqApcLayer, self).__init__()
self.codebook_size = codebook_size
self.vq_logits = nn.Linear(input_size, codebook_size)
self.gumbel_temperature = gumbel_temperature
s... |
class TestScoreCAM(unittest.TestCase):
def setUp(self) -> None:
directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../datasets')
self.img = Resize((224, 224)).transform(Image(PilImage.open(os.path.join(directory, 'images/dog_cat_2.png')).convert('RGB')))
self.model = mo... |
def get_audio(audio_path, duration=10, target_sr=16000):
n_samples = int((duration * target_sr))
(audio, sr) = load_audio(path=audio_path, ch_format=STR_CH_FIRST, sample_rate=target_sr, downmix_to_mono=True)
if (len(audio.shape) == 2):
audio = audio.mean(0, False)
input_size = int(n_samples)
... |
_grad()
def test(model, data, split_idx, evaluator, device):
model = model.to(device)
data = data.to(device)
model.eval()
out = model(data.x, data.adj_t)
y_pred = out.argmax(dim=(- 1), keepdim=True)
train_acc = evaluator.eval({'y_true': data.y[split_idx['train']['paper']], 'y_pred': y_pred[split... |
class ArgParser(ap.ArgumentParser):
def __init__(self, *args, **kwargs):
super(ArgParser, self).__init__(*args, **kwargs)
def convert_arg_line_to_args(self, line):
line = line.split()
if (line and (line[0][0] not in ('#', ';'))):
if (line[0][0] != '-'):
line[0... |
def test_option_regular_axis1():
a1 = ak.from_json('[[0.0, 1.1], null, [2.2, 3.3]]')
a2 = ak.from_json('[[4.4, 5.5, 6.6], [7, 8, 9], [7.7, 8.8, 9.9]]')
a1 = ak.to_regular(a1, axis=1)
a2 = ak.to_regular(a2, axis=1)
c = ak.concatenate([a1, a2], axis=1)
assert (c.to_list() == [[0.0, 1.1, 4.4, 5.5, ... |
class Trefethen(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 10.0)] * self.N), ([10.0] * self.N)))
self.custom_bounds = [((- 5), 5), ((- 5), 5)]
self.global_optimum = [[(- 0.), 0.]]
self.fglob = (- 3.)
def... |
def clean_cache(c):
c.run("find . -name '*.pyc' -exec rm -f {} +")
c.run("find . -name '*.pyo' -exec rm -f {} +")
c.run("find . -name '*~' -exec rm -f {} +")
c.run("find . -name '__pycache__' -exec rm -fr {} +")
c.run('rm -fr .mypy_cache') |
class RandomSplitter(AbstractPOPSplitter):
def __init__(self, num_subproblems, split_fraction=0.1):
super().__init__(num_subproblems)
self.split_fraction = split_fraction
def split(self, problem):
sub_problems = [problem.copy() for _ in range(self._num_subproblems)]
for sp in sub... |
def dataio_prepare(hparams, tokenizer):
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(csv_path=os.path.join(hparams['data_folder'], 'train.csv'), replacements={'data_root': hparams['data_folder']})
if (hparams['sorting'] in ['descending', 'ascending']):
train_data = train_data.filtered_sort... |
class GTLayer(nn.Module):
def __init__(self, in_channels, out_channels, num_nodes, first=True):
super(GTLayer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.first = first
self.num_nodes = num_nodes
if (self.first == True):
... |
_tokenizers
class HerbertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = HerbertTokenizer
rust_tokenizer_class = HerbertTokenizerFast
test_rust_tokenizer = True
def setUp(self):
super().setUp()
with open(f'{get_tests_dir()}/fixtures/sample_text_no_unicode.txt... |
def resolver_factory(rootdir: Path, mounts: MountPathType) -> ResolverType:
def resolver(filename: str) -> Path:
path = Path(filename)
for (host_path, mount_path) in mounts:
if ((mount_path == path) or (mount_path in path.parents)):
path = host_path.joinpath(path.relative... |
def p_comp_if(s, body):
pos = s.position()
s.next()
test = p_test_nocond(s)
return Nodes.IfStatNode(pos, if_clauses=[Nodes.IfClauseNode(pos, condition=test, body=p_comp_iter(s, body))], else_clause=None) |
_model
def efficientnet_b4(pretrained=False, **kwargs):
model = _gen_efficientnet('efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model |
def setup_fake_data(node, sdfg, state, bwd) -> Tuple[(str, str)]:
free_fake_data_code = ''
init_code = ''
for (edge, is_input) in node.iter_edges(state):
conn = (edge.dst_conn if is_input else edge.src_conn)
desc = (in_desc_with_name(node, state, sdfg, conn) if is_input else out_desc_with_na... |
def _worker_loop(dataset, index_queue, data_queue, done_event, collate_fn, seed, init_fn, worker_id):
try:
global _use_shared_memory
_use_shared_memory = True
_set_worker_signal_handlers()
torch.set_num_threads(1)
random.seed(seed)
torch.manual_seed(seed)
data... |
class Depth_Loss(nn.Module):
def __init__(self):
super(Depth_Loss, self).__init__()
def forward(self, pred, label):
(n, c, h, w) = pred.size()
assert (c == 1)
pred = pred.squeeze()
label = label.squeeze()
adiff = torch.abs((pred - label))
batch_max = (0.2 ... |
def kaldi_dir_to_csv(data_dir: str, csv: str):
logger.info(f'Convert kaldi data directory {data_dir} into csv {csv}')
data_dir: Path = Path(data_dir)
assert (data_dir / 'wav.scp').is_file()
assert (data_dir / 'segments').is_file()
assert (data_dir / 'utt2spk').is_file()
assert (data_dir / 'reco2... |
class RRInterpreter(StackInterpreter):
name = 'rr'
def __init__(self):
mc_retval = MemoryChunkRRRetval('retval', ty_mpfr)
super(RRInterpreter, self).__init__(ty_mpfr, mc_retval=mc_retval)
self.err_return = '0'
self.mc_py_constants = MemoryChunkConstants('py_constants', ty_python)... |
def get_backend(backend, approx_type):
if (backend == 'kazuki'):
if (approx_type == 'ggn'):
return AsdlGGN
else:
return AsdlEF
elif (backend == 'backpack'):
if (approx_type == 'ggn'):
return BackPackGGN
else:
return BackPackEF
e... |
def get_regularizer(config_data):
reg = config_data['regularizer']
name = reg['name']
if (name == 'l2'):
return regs.l2(l=reg['l2'])
else:
return None |
class TFRobertaModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def count_doc(paragraphs: dict):
text_intro = paragraphs.get('intro')
no_intro = count_words(text_intro)
text_summary = paragraphs.get('Summary:')
if (text_summary and (not ('contains no summary' in text_summary))):
no_summ = count_words(text_summary)
else:
no_summ = None
lengths... |
def _shear_level_to_arg(level, _hparams):
level = ((level / _MAX_LEVEL) * 0.3)
level = _randomly_negate(level)
return (level,) |
.spark
.parametrize('data_dict', ['wrong_item_pandas_dataset', 'wrong_user_pandas_dataset'])
def test_wrong_features_type(data_dict, request):
with pytest.raises(TypeError):
create_dataset(request.getfixturevalue(data_dict)) |
class ADMMSLIM(NeighbourRec):
def _get_ann_infer_params(self) -> Dict[(str, Any)]:
return {'features_col': None}
rho: float
threshold: float = 5
multiplicator: float = 2
eps_abs: float = 0.001
eps_rel: float = 0.001
max_iteration: int = 100
_mat_c: np.ndarray
_mat_b: np.ndarr... |
_module()
class ResizeNoImg():
def __init__(self, img_scale, keep_ratio=True):
self.img_scale = img_scale
self.keep_ratio = keep_ratio
def __call__(self, results):
(w, h) = (results['img_info']['width'], results['img_info']['height'])
if self.keep_ratio:
(new_w, new_h... |
def extract(filename, out_filename, fps):
subprocess.check_output(['ffmpeg', '-loglevel', 'error', '-y', '-i', filename, '-r', str(fps), out_filename])
return filename |
_model
def adv_inception_v3(pretrained=False, **kwargs):
model = _inception_v3('adv_inception_v3', pretrained=pretrained, **kwargs)
return model |
class RTU2a(RTU):
def pre_loop(self, sleep=0.6):
time.sleep(sleep)
def main_loop(self):
assert ((len(wadi1_bin) / 8) == len(wadi1))
count = 0
while True:
if (count >= len(wadi1_bin)):
count = 0
if (wadi1_bin[count] == '1'):
... |
def test_hypoglycemia(tmp_path: pathlib.Path):
outcome_codes = {'child_2', 'child_1_1', 'SNOMED/', 'LOINC/LP416145-3', 'child_1', 'LOINC/14749-6', 'LOINC/15074-8'}
labeler = _create_specific_labvalue_labeler(HypoglycemiaLabValueLabeler, 'severe', outcome_codes)
_assert_value_to_label_correct(labeler, 2.9, 3... |
class EnsembleBertForQuestionAnswering(nn.Module):
def __init__(self, model_1: BertForQuestionAnswering, model_2: BertForQuestionAnswering):
super().__init__()
self.model_1 = model_1
self.model_2 = model_2
self.config = self.model_1.config
self.qa_outputs = nn.Linear((self.mo... |
def req_grad_dict_to_tuple(req_grad: Dict[(Any, bool)]) -> Tuple[bool]:
ret = tuple((v for (i, v) in req_grad.items()))
return ret |
class Property(Trigger):
def __init__(self, value=None):
super().__init__()
self.set(value)
def handle(self, value):
self.value = value
self.notify(value)
def set(self, value):
if isinstance(value, Property):
super().set(value)
self.handle(valu... |
class OrderedPartitions(Partitions):
def __classcall_private__(cls, n, k=None):
if (k is not None):
k = Integer(k)
return super().__classcall__(cls, Integer(n), k)
def __init__(self, n, k):
Partitions.__init__(self)
self.n = n
self.k = k
def __contains__(s... |
class SequenceStartTimeObserver(so.SearchObserver):
def __init__(self) -> None:
self._search_start_time_ns = 0
def before_search_start(self, start_time_ns: int) -> None:
stat.set_sequence_start_time(start_time_ns)
self._search_start_time_ns = start_time_ns
def before_first_search_ite... |
class GmailDeleteContact(VirtualFunctionTool):
name = 'GmailDeleteContact'
summary = 'Delete a contact from the contact list.'
parameters: List[ArgParameter] = [{'name': 'contact_id', 'type': 'string', 'description': 'The unique identifier of the contact.', 'required': True}]
returns: List[ArgReturn] = ... |
def largest_memory_first_greedy_best_fit_v1(graph: Graph, P, node_weight_function, node_mem_estimator: NodeMemoryEstimator):
bins = {i: list() for i in range(P)}
bin_weights = heapdict({i: 0 for i in range(P)})
bin_memory = heapdict({i: 0 for i in range(P)})
node_to_weight = {n: node_weight_function(n) ... |
class ImageDataset(Dataset):
def __init__(self, train, query, gallery, **kwargs):
super(ImageDataset, self).__init__(train, query, gallery, **kwargs)
def __getitem__(self, index):
(img_path, pid, camid, dsetid) = self.data[index]
img = read_image(img_path)
if (self.transform is n... |
def build_mae_model(model_type, crop_size, prompt_cfg, model_root, adapter_cfg=None):
if (not (model_type in ['mae_vitb16', 'mae_vitl16'])):
raise ValueError('Does not support other arch')
if (prompt_cfg is not None):
model = prompt_mae_vit(model_type, prompt_cfg)
else:
model = mae_v... |
def emotify_resampler(path):
(src, _) = load_audio(path=os.path.join(DATASET, 'emotify', 'audio', path), ch_format=STR_CH_FIRST, sample_rate=MUSIC_SAMPLE_RATE, downmix_to_mono=True)
if (src.shape[(- 1)] < DATA_LENGTH):
pad = np.zeros(DATA_LENGTH)
pad[:src.shape[(- 1)]] = src
src = pad
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.