code stringlengths 281 23.7M |
|---|
def _logprob_helper(rv, *values, **kwargs):
logprob = _logprob(rv.owner.op, values, *rv.owner.inputs, **kwargs)
name = rv.name
if ((not name) and (len(values) == 1)):
name = values[0].name
if name:
if isinstance(logprob, (list, tuple)):
for (i, term) in enumerate(logprob):
... |
def test_official_languages():
assert (get_official_languages('FI') == ('fi', 'sv'))
assert (get_official_languages('SE') == ('sv',))
assert (get_official_languages('CH') == ('de', 'fr', 'it'))
assert (get_official_languages('CH', de_facto=True) == ('de', 'gsw', 'fr', 'it'))
assert (get_official_lan... |
def main():
with tf.variable_scope('resnet'):
inputs = tf.random_uniform([BATCH_SIZE, 299, 299, 3], name='Inputs')
(logit, _) = nets.resnet_v1.resnet_v1_152(inputs, 1000, scope=None)
tac = TAC(endpoint=logit, timeline_file='timeline.pickle')
tac.save('tac_rpc_orders.txt') |
def get_distributable(sender: NettingChannelEndState, receiver: NettingChannelEndState) -> TokenAmount:
(_, _, transferred_amount, locked_amount) = get_current_balanceproof(sender)
distributable = (get_balance(sender, receiver) - get_amount_locked(sender))
overflow_limit = max(((UINT256_MAX - transferred_am... |
class SCScoreModifier(SAModifier):
def __init__(self, mu: float=3, sigma: float=1):
self.mu = mu
self.sigma = sigma
def __call__(self, smi, x):
sc_score = scscorer.apply(scscorer.smi_to_fp(smi))
mod_score = np.maximum(sc_score, self.mu)
return (np.exp(((- 0.5) * np.power(... |
def get_uncertainty(models, unlabeled_loader):
models['backbone'].eval()
uncertainty = torch.tensor([]).cuda()
criterion = nn.CrossEntropyLoss()
for j in range(1):
for (inputs, labels) in unlabeled_loader:
inputs = inputs.cuda()
scores = models['backbone'](inputs)[0]
... |
class BertModel(nn.Module):
def __init__(self, args, embedding, encoder, target):
super(BertModel, self).__init__()
self.embedding = embedding
self.encoder = encoder
self.target = target
def forward(self, src, tgt_mlm, tgt_nsp, seg):
emb = self.embedding(src, seg)
... |
class XLNetTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
padding_side = 'left'
def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_ac... |
class Reduction_A(nn.Module):
def __init__(self):
super(Reduction_A, self).__init__()
self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(BasicConv2d(384, 192, kernel_size=1, stride=1), BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1), BasicCon... |
def test_multi_index():
widget = QgridWidget(df=create_multi_index_df())
event_history = init_event_history(['filter_dropdown_shown', 'filter_changed', 'sort_changed'], widget=widget)
widget._handle_qgrid_msg_helper({'type': 'show_filter_dropdown', 'field': 'level_0', 'search_val': None})
widget._handle... |
def test_loading_simple_extension(extensionregistry, mocker):
class SimpleExtension(object):
LOAD_IF = staticmethod((lambda config: True))
extensionregistry.load(mocker.MagicMock())
assert (len(extensionregistry.extensions) == 1)
assert (extensionregistry.extensions[0] == SimpleExtension)
as... |
class MedrxivClusteringS2S(AbsTaskClustering):
def description(self):
return {'name': 'MedrxivClusteringS2S', 'hf_hub_name': 'mteb/medrxiv-clustering-s2s', 'description': 'Clustering of titles from medrxiv. Clustering of 10 sets, based on the main category.', 'reference': ' 'type': 'Clustering', 'category':... |
class DummyEncoder(Encoder):
def trainable(self) -> bool:
return False
def embedding_size(self) -> int:
return 42
def forward(self, batch: TensorInterchange) -> Tensor:
pass
def save(self, output_path: str):
pass
def load(cls, input_path: str) -> Encoder:
pass |
class TestOptimizer(unittest.TestCase):
def testExpandParamsGroups(self):
params = [{'params': ['p1', 'p2', 'p3', 'p4'], 'lr': 1.0, 'weight_decay': 3.0}, {'params': ['p2', 'p3', 'p5'], 'lr': 2.0, 'momentum': 2.0}, {'params': ['p1'], 'weight_decay': 4.0}]
out = _expand_param_groups(params)
gt... |
def test_exporter_handles_extras_next_to_non_extras(tmp_path: Path, poetry: Poetry) -> None:
poetry.locker.mock_lock_data({'package': [{'name': 'localstack', 'python-versions': '*', 'version': '1.0.0', 'optional': False, 'dependencies': {'localstack-ext': [{'version': '>=1.0.0'}, {'version': '>=1.0.0', 'extras': ['... |
def merge_hydrobasins_shape(config_hydrobasin, hydrobasins_level):
basins_path = config_hydrobasin['destination']
output_fl = config_hydrobasin['output'][0]
files_to_merge = ['hybas_{0:s}_lev{1:02d}_v1c.shp'.format(suffix, hydrobasins_level) for suffix in config_hydrobasin['urls']['hydrobasins']['suffixes']... |
def test_mutvars():
p = expr_ast('(lambda (x) (set! x 2))')
assert (len(p.mutated_vars()) == 0)
assert p.lams[0]._mutable_var_flags[0]
p = expr_ast('(lambda (y) (set! x 2))')
assert variables_equal(p.mutated_vars(), make_symbols({'x': None}))
assert (p.lams[0]._mutable_var_flags is None)
p =... |
class EquipmentStore(Gtk.ListStore):
def __init__(self, equipment_service):
super(EquipmentStore, self).__init__(int, str, float, str, bool)
self._equipment_service = equipment_service
for equipment in equipment_service.get_all_equipment():
self._append_row(equipment)
sel... |
def haar_random_vector(n, seed=None):
if (seed is not None):
numpy.random.seed(seed)
vector = numpy.random.randn(n).astype(complex)
vector += (1j * numpy.random.randn(n).astype(complex))
normalization = numpy.sqrt(vector.dot(numpy.conjugate(vector)))
return (vector / normalization) |
class CirruLexer(RegexLexer):
name = 'Cirru'
url = '
aliases = ['cirru']
filenames = ['*.cirru']
mimetypes = ['text/x-cirru']
version_added = '2.0'
flags = re.MULTILINE
tokens = {'string': [('[^"\\\\\\n]+', String), ('\\\\', String.Escape, 'escape'), ('"', String, '#pop')], 'escape': [('... |
class AlbertTrainingArguments(TrainingArguments):
dataloader_num_workers: int = 4
per_device_train_batch_size: int = 4
per_device_eval_batch_size: int = 4
gradient_accumulation_steps: int = 2
seq_length: int = 512
max_steps: int = 1000000
learning_rate: float = 0.00176
warmup_steps: int ... |
class Dataset_ETT_hour(Dataset):
def __init__(self, root_path, flag='train', size=None, features='S', data_path='ETTh1.csv', target='OT', scale=True):
if (size == None):
self.seq_len = ((24 * 4) * 4)
self.label_len = (24 * 4)
self.pred_len = (24 * 4)
else:
... |
class attention_up_block(nn.Module):
def __init__(self, in_ch, out_ch, num_block, block=BasicBlock, norm=nn.BatchNorm2d):
super().__init__()
self.attn = AttentionBlock(in_ch, out_ch, (out_ch // 2))
block_list = []
block_list.append(block((in_ch + out_ch), out_ch))
for i in ra... |
class TypesOracle(walkers.DagWalker):
def get_types(self, formula, custom_only=False):
types = self.walk(formula)
exp_types = self.expand_types(types)
assert (len(types) <= len(exp_types))
if custom_only:
exp_types = [x for x in exp_types if ((not x.is_bool_type()) and (n... |
def _dict_from_weights(weights: str) -> dict:
if (weights in _weights2pairs()):
pairs = _weights2pairs()[weights]
return {'langs': tuple((pair[0] for pair in pairs)), 'codes': tuple((pair[1] for pair in pairs)), 'pairs': dict(pairs)}
elif (weights.lower() in _weights2pairs()):
pairs = _w... |
class TestFreeGC(EndianTest):
def setUp(self):
self.req_args_0 = {'gc': }
self.req_bin_0 = b'<\x00\x02\x00IJ\xf6\x16'
def testPackRequest0(self):
bin = request.FreeGC._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0... |
class Struct(object):
STRUCT = ''
ATTRS = ()
ZONES = {}
def unpack(cls, buffer):
try:
return cls(*buffer.unpack(cls.STRUCT))
except BufferError as e:
raise DNSError(('Error unpacking %s [offset=%d]: %s' % (cls.__name__, buffer.offset, e)))
def fromZone(cls, rd... |
class TestModel(unittest.TestCase):
def test_to_qubo(self):
(a, b) = (Binary('a'), Binary('b'))
exp = (((1 + (a * b)) + a) - 2)
model = exp.compile()
(qubo, offset) = model.to_qubo()
assert_qubo_equal(qubo, {('a', 'a'): 1.0, ('a', 'b'): 1.0})
self.assertTrue((offset =... |
class ether_header_t(ctypes.Structure):
_fields_ = (('ether_dhost', (ctypes.c_ubyte * 6)), ('ether_shost', (ctypes.c_ubyte * 6)), ('ether_type', ctypes.c_ushort))
def __init__(self, ql, base):
self.ql = ql
self.base = base
def updateToMem(self):
self.ql.mem.write(self.base, bytes(sel... |
def test():
avg_psnr = 0
with torch.no_grad():
for batch in testing_data_loader:
(input, target) = (batch[0].to(device), batch[1].to(device))
prediction = model(input)
mse = criterion(prediction, target)
psnr = (10 * log10((1 / mse.item())))
av... |
_config
def test_ratiotile_add_windows(manager):
for i in range(12):
manager.test_window(str(i))
if (i == 0):
assert (manager.c.layout.info()['layout_info'] == [(0, 0, 800, 600)])
elif (i == 1):
assert (manager.c.layout.info()['layout_info'] == [(0, 0, 400, 600), (400... |
def batchmining_specific_parameters(parser):
parser.add_argument('--miner_distance_lower_cutoff', default=0.5, type=float, help='Lower cutoff on distances - values below are sampled with equal prob.')
parser.add_argument('--miner_distance_upper_cutoff', default=1.4, type=float, help='Upper cutoff on distances -... |
class Flan_T5(LLM):
def __init__(self, config, needs_confirmation=False, disable_tqdm=True):
self.device = 'cuda:1'
self.config = config
self.needs_confirmation = needs_confirmation
self.disable_tqdm = disable_tqdm
self.model = AutoModelForSeq2SeqLM.from_pretrained('google/fl... |
class JumpToShip(ContextMenuUnconditional):
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
def display(self, callingWindow, srcContext):
if (srcContext != 'fittingShip'):
return False
fitTabSelected = (self.mainFrame.notebookBrowsers.GetSelection()... |
class TestBOPES(unittest.TestCase):
def test_h2_bopes_sampler(self):
seed = 50
aqua_globals.random_seed = seed
dof = partial(Molecule.absolute_distance, atom_pair=(1, 0))
m = Molecule(geometry=[['H', [0.0, 0.0, 1.0]], ['H', [0.0, 0.45, 1.0]]], degrees_of_freedom=[dof])
f_t = ... |
.parametrize('sampler', [sample_blackjax_nuts, sample_numpyro_nuts])
.parametrize('postprocessing_backend', [None, 'cpu'])
.parametrize('chains', [pytest.param(1), pytest.param(2, marks=pytest.mark.skipif((len(jax.devices()) < 2), reason='not enough devices'))])
.parametrize('postprocessing_vectorize', ['scan', 'vmap']... |
def test_derive_private_key_errors(backend):
curve = ec.SECP256K1()
_skip_curve_unsupported(backend, curve)
with pytest.raises(TypeError):
ec.derive_private_key('one', curve, backend)
with pytest.raises(TypeError):
ec.derive_private_key(10, 'five', backend)
with pytest.raises(ValueEr... |
def test_message_with_multiline_comment():
buf = BytesIO("/* NOTE: hello\nand bonjour\n and servus */\nmsg = _('Bonjour a tous')\n".encode('utf-8'))
messages = list(extract.extract_javascript(buf, ('_',), ['NOTE:'], {}))
assert (messages[0][2] == 'Bonjour a tous')
assert (messages[0][3] == ['NOTE: hell... |
class ResNetBlockBase(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
def freeze(self):
for p in self.parameters():
p.requires_grad = Fal... |
def parse_value(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[(Pos, Any)]:
try:
char: Optional[str] = src[pos]
except IndexError:
char = None
if (char == '"'):
if src.startswith('"""', pos):
return parse_multiline_str(src, pos, literal=False)
return parse_... |
class QueryVersion(rq.ReplyRequest):
_request = rq.Struct(rq.Card8('opcode'), rq.Opcode(0), rq.RequestLength(), rq.Card8('major_version'), rq.Card8('minor_version'), rq.Pad(2))
_reply = rq.Struct(rq.ReplyCode(), rq.Pad(1), rq.Card16('sequence_number'), rq.ReplyLength(), rq.Card16('major_version'), rq.Card16('mi... |
def save_results_charts(G, deformator, params, out_dir):
deformator.eval()
G.eval()
z = make_noise(3, G.dim_z, params.truncation).cuda()
inspect_all_directions(G, deformator, os.path.join(out_dir, 'charts_s{}'.format(int(params.shift_scale))), zs=z, shifts_r=params.shift_scale)
inspect_all_direction... |
class DiffusionPipeline(ConfigMixin):
config_name = 'model_index.json'
def register_modules(self, **kwargs):
from diffusers import pipelines
for (name, module) in kwargs.items():
library = module.__module__.split('.')[0]
pipeline_dir = module.__module__.split('.')[(- 2)]
... |
def convert_bunit(bunit):
bunit_lower = re.sub('\\s', '', bunit.lower())
if (bunit_lower == 'jy/beam'):
unit = (u.Jy / u.beam)
else:
try:
unit = u.Unit(bunit)
except ValueError:
warnings.warn("Could not parse unit {0}. If you know the correct unit, try u.add_e... |
class RegChannel(TourneyButton):
def __init__(self, ctx: Context, letter: str):
super().__init__(emoji=ri(letter))
self.ctx = ctx
async def callback(self, interaction: discord.Interaction):
(await interaction.response.defer())
m = (await self.ctx.simple('Mention the channel where... |
def test_nest_components_weight_init():
model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=1, bias=2, layer='Linear', override=dict(type='Constant', name='reg', val=13, bias=14)), dict(type='Constant', val=3, bias=4, layer='Conv1d'), dict(type='Constant', val=5, bias=6, layer='Conv2d')], componen... |
def selectDevice():
devices = [d for d in usb.find(find_all=True) if (d.bDeviceClass in {0, 2, 255})]
if (not devices):
print('No devices detected')
return None
selection = (- 1)
selected = False
print('PyUSB VCP Terminal: use ctrl+c or ctrl+d to exit')
while (not selected):
... |
def timeout(sec, raise_sec=1):
def decorator(func):
(func)
def wrapped_func(*args, **kwargs):
err_msg = f'Function {func.__name__} timed out after {sec} seconds'
if (sys.platform != 'win32'):
def _handle_timeout(signum, frame):
raise Timeou... |
class ArgumentAdder(_ArgumentChanger):
def __init__(self, index, name, default=None, value=None):
self.index = index
self.name = name
self.default = default
self.value = value
def change_definition_info(self, definition_info):
for pair in definition_info.args_with_default... |
class CodebookReassign(EpochFinishHook):
def __init__(self, freq) -> None:
super().__init__()
self._freq = freq
def epochFinish(self, step: int, epoch: int, trainer: _baseTrainer, *args: Any, logger: Saver, **kwds: Any) -> Any:
if ((epoch % self._freq) != 0):
return
l... |
class ResNet(nn.Module):
def __init__(self, block: Type[Union[(BasicBlock, Bottleneck)]], layers: List[int], num_classes: int=1000, zero_init_residual: bool=False, use_last_fc: bool=False, groups: int=1, width_per_group: int=64, replace_stride_with_dilation: Optional[List[bool]]=None, norm_layer: Optional[Callable[... |
.parametrize(parameter_string, scenarios)
def test_find_mip(direction, subsystem, cut, mechanism, purview, expected):
result = subsystem.find_mip(direction, mechanism, purview)
if expected:
expected = [RepertoireIrreducibilityAnalysis(direction=direction, partition=expected_partition, mechanism=mechanis... |
class NetVLAD(nn.Module):
def __init__(self, num_clusters=16, dim=512, alpha=100.0, normalize_input=True):
super(NetVLAD, self).__init__()
self.num_clusters = num_clusters
self.dim = dim
self.alpha = alpha
self.normalize_input = normalize_input
self.conv = nn.Conv2d(d... |
def validate(model, data_loader, loss_func):
device = next(model.parameters()).device
(all_preds, all_labels) = ({}, {})
with torch.no_grad():
val_loss = 0
for (inputs, labels) in tqdm(data_loader):
inputs['sequence'] = inputs['sequence'].to(device)
preds = model(inpu... |
class _CollectionsApi():
def __init__(self, api_client: 'Union[ApiClient, AsyncApiClient]'):
self.api_client = api_client
def _build_for_collection_cluster_info(self, collection_name: str):
path_params = {'collection_name': str(collection_name)}
headers = {}
return self.api_clien... |
class BotCommitAndPullTest(TestCase):
def test_multiple_updates_in_file(self):
bot = bot_factory()
bot.provider.create_branch = Mock()
bot.provider.create_commit.side_effect = ['sha1', 'sha2', 'sha3']
bot.create_pull_request = Mock()
requirement = Mock()
requirement.u... |
def getAllBuiltinHooks() -> Dict[(HookType, ChainHook)]:
raise NotImplementedError
allHooks = list()
for hook in BuiltInHooks.values():
if hasattr(hook, 'hookType'):
allHooks.append(hook)
else:
allHooks.append(hook())
return splitHooks(*allHooks) |
def esplugin(a):
def _call(s):
args = shlex.split(s)
if (args[0] in es.commands):
try:
es.command(args)
except Exception as e:
print(('error: %s' % str(e)))
return 1
return 0
return {'name': 'ESILSolve', 'license': 'GPL'... |
def setUpModule():
global mol, rhf
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom.extend([['C', ((- 0.), 0., (- 0.))], ['C', (0., 0., (- 0.))], ['C', (1., 1., (- 0.))], ['C', (0., 3., (- 0.))], ['C', ((- 0.), 3., (- 0.))], ['C', ((- 1.), 1., (- 0.))], ['H', ((- 1.), (- 0.), (- 0.))], ['... |
class RogueRecordInfo(Struct):
name: str
finish_time: RogueTime
score: int
final_lineup: List[RogueAvatar]
base_type_list: List[RogueBaseType]
cached_avatars: List[RogueAvatar]
buffs: List[RogueBuffs]
miracles: List[RogueMiracles]
difficulty: int
progress: int
detail_h: Union... |
def qtwebengine_versions(*, avoid_init: bool=False) -> WebEngineVersions:
override = os.environ.get('QUTE_QTWEBENGINE_VERSION_OVERRIDE')
if (override is not None):
return WebEngineVersions.from_pyqt(override, source='override')
if machinery.IS_QT6:
try:
from qutebrowser.qt.webeng... |
class ModelBase():
def save_network(self, network, optimizer, epoch, lr_scheduler, save_dir):
checkpoint = {'network': network.state_dict()}
if (not os.path.exists(save_dir)):
os.makedirs(save_dir)
save_filename = ('%s_net_CR.pth' % str(epoch))
save_path = os.path.join(sa... |
class URIScenarios(Fixture):
def all_settings_given(self):
self.uri = 'myprefix://theuser::123/thedb'
self.database_name = 'thedb'
self.user_name = 'theuser'
self.password = 'thepasswd'
self.host = 'thehost'
self.port = 123
def not_all_settings_given(self):
... |
class BaseModelOutputWithPastAndCrossAttentions(ModelOutput):
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions:... |
def make_dataset(dir):
images = []
assert os.path.isdir(dir), ('%s is not a valid directory' % dir)
train_root = os.path.join(dir, 'train')
if (not os.path.exists(train_root)):
os.mkdir(train_root)
test_root = os.path.join(dir, 'test')
if (not os.path.exists(test_root)):
os.mkdir... |
def create_ema_and_scales_fn(target_ema_mode, start_ema, scale_mode, start_scales, end_scales, total_steps, distill_steps_per_iter):
def ema_and_scales_fn(step):
if ((target_ema_mode == 'fixed') and (scale_mode == 'fixed')):
target_ema = start_ema
scales = start_scales
elif (... |
def _test_preset(rdvgame_file: Path, expected_results_file: Path, mocker):
description = LayoutDescription.from_file(rdvgame_file)
players_config = PlayersConfiguration(0, {0: 'Prime', 1: 'Echoes'})
cosmetic_patches = PrimeCosmeticPatches(use_hud_color=True, hud_color=(255, 0, 0), suit_color_rotations=(0, 4... |
class Saver(object):
def __init__(self, args):
self.args = args
self.directory = os.path.join('run', args.dataset, args.checkname)
self.runs = sorted(glob.glob(os.path.join(self.directory, 'experiment_*')))
run_id = ((int(self.runs[(- 1)].split('_')[(- 1)]) + 1) if self.runs else 0)
... |
class Migration(migrations.Migration):
dependencies = [('sponsors', '0070_auto__2055')]
operations = [migrations.AddField(model_name='requiredimgasset', name='due_date', field=models.DateField(blank=True, default=None, null=True)), migrations.AddField(model_name='requiredimgassetconfiguration', name='due_date',... |
def build_circuit(qubit_pairs: List[List[cirq.Qid]], pauli: str, n_shots: int, rand_state: np.random.RandomState) -> Tuple[(cirq.Circuit, List[Dict[(str, int)]])]:
a_qubits = [pair[0] for pair in qubit_pairs]
b_qubits = [pair[1] for pair in qubit_pairs]
all_qubits = np.concatenate(qubit_pairs)
flip_para... |
class EmulateCallableRedirected():
def __init__(self, conn_number, routing_conn, name):
(self.conn_number, self.routing_conn) = (conn_number, routing_conn)
self.call_name = name
def __call__(self, *args):
return self.routing_conn.reval(*(('RedirectedRun', self.conn_number, self.call_name... |
class Vocab(object):
def __init__(self, src_sents=None, trg_sents=None, src_vocab_size=50000, trg_vocab_size=50000, remove_singleton=True, share_vocab=False):
if ((src_sents is not None) and (trg_sents is not None)):
if share_vocab:
print('initialize share vocabulary ..')
... |
def read_audio(filename, header_only=False, channel=0):
if isinstance(filename, Path):
filename = str(filename)
wf = wave.open(filename)
audio = Audio()
channel_number = wf.getnchannels()
assert (channel < channel_number)
audio.set_header(sample_rate=wf.getframerate(), sample_size=wf.get... |
class Event():
id: strawberry.ID
conference: Annotated[('Conference', strawberry.lazy('api.conferences.types'))]
title: str = strawberry.field(resolver=make_localized_resolver('title'))
slug: str = strawberry.field(resolver=make_localized_resolver('slug'))
content: str = strawberry.field(resolver=ma... |
def get_projects(cache_name):
try:
f = open(cache_name)
except IOError as exc:
if (exc.errno != errno.ENOENT):
raise
(projects, public) = cache_projects(cache_name)
else:
with f:
(projects, public) = json.load(f)
return (projects, public) |
def dataloader_msvd_test(args, tokenizer, subset='test'):
msvd_testset = MSVD_DataLoader(subset=subset, data_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames, frame_order=args.eval_frame_order, ... |
def test_SKCByCriteriaFilterABC_not_implemented_make_mask():
dm = skc.mkdm(matrix=[[7, 5, 35], [5, 4, 26], [5, 6, 28], [1, 7, 30], [5, 8, 30]], objectives=[max, max, min], weights=[2, 4, 1], alternatives=['PE', 'JN', 'AA', 'MM', 'FN'], criteria=['ROE', 'CAP', 'RI'])
class FooFilter(filters.SKCByCriteriaFilterAB... |
def knn(Mxx, Mxy, Myy, k, sqrt):
n0 = Mxx.size(0)
n1 = Myy.size(0)
label = torch.cat((torch.ones(n0), torch.zeros(n1)))
M = torch.cat((torch.cat((Mxx, Mxy), 1), torch.cat((Mxy.transpose(0, 1), Myy), 1)), 0)
if sqrt:
M = M.abs().sqrt()
INFINITY = float('inf')
(val, idx) = (M + torch.d... |
class RetriBertTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
slow_tokenizer_class = Ret... |
class Scalar(pybamm.Symbol):
def __init__(self, value, name=None):
self.value = value
if (name is None):
name = str(self.value)
super().__init__(name)
def _from_json(cls, snippet: dict):
instance = cls.__new__(cls)
instance.__init__(snippet['value'], name=snip... |
def precise_wait(t_end: float, slack_time: float=0.001, time_func=time.monotonic):
t_start = time_func()
t_wait = (t_end - t_start)
if (t_wait > 0):
t_sleep = (t_wait - slack_time)
if (t_sleep > 0):
time.sleep(t_sleep)
while (time_func() < t_end):
pass
ret... |
.parametrize('page_size', [10, 20, 50, 100, 200, 500, 1000])
.parametrize('descending', [False, True])
def test_paginate(page_size, descending, initialized_db):
for i in range(0, 522):
Role.create(name=('testrole%s' % i))
query = Role.select().where((Role.name ** 'testrole%'))
all_matching_roles = l... |
def test_mdp_parent():
mdp = MolMDPExtended('./data/blocks_PDB_105.json')
mdp.build_translation_table()
import tqdm
rng = np.random.RandomState(142)
nblocks = mdp.num_blocks
for i in tqdm.tqdm(range(10000)):
mdp.molecule = mol = BlockMoleculeDataExtended()
nblocks = rng.randint(1... |
def parse(content, strict=False, custom_tags_parser=None):
data = {'media_sequence': 0, 'is_variant': False, 'is_endlist': False, 'is_i_frames_only': False, 'is_independent_segments': False, 'playlist_type': None, 'playlists': [], 'segments': [], 'iframe_playlists': [], 'media': [], 'keys': [], 'rendition_reports':... |
class Effect6352(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Weapon Disruption')), 'falloffEffectiveness', src.getModifiedItemAttr('roleBonus'), **kwargs)
fit.modules.filteredItemBoost... |
def show_list_of_products(update, context):
product = context.user_data[products_data_key]['products'].next()
markup = tamplate_for_show_a_list_of_products(pattern_identifier, context)
text = get_text_for_product(product, context)
update.message.reply_photo(product.image_id, caption=text, reply_markup=m... |
def test_get_accounts(keystore_mock):
account_manager = AccountManager(keystore_mock)
expected_accounts = {'0x0d5a0e4FECE4b84365b9B8DbA6e6D41348C73645': os.path.join(keystore_mock, 'UTC--2016-10-26T16-55-53.Z--0d5a0e4fece4b84365b9b8dba6e6d41348c73645'), '0xd18b82f7b4a0F18e1ED24623D23b20': os.path.join(keystore_... |
class _InvalidRange():
def __init__(self):
self.start = sys.maxsize
self.end = 0
def insert(self, start, length):
if (self.start >= start):
self.start += length
if (self.end >= start):
self.end += length
self.invalidate(start, (start + length))
... |
class Statistics(object):
def __init__(self, loss=0, n_words=0, n_correct=0):
self.loss = loss
self.n_words = n_words
self.n_docs = 0
self.n_correct = n_correct
self.n_src_words = 0
self.start_time = time.time()
def all_gather_stats(stat, max_size=4096):
s... |
def copy_and_replace(original, replace=None, do_not_copy=None):
(replace, do_not_copy) = ((replace or {}), (do_not_copy or {}))
memo = dict(DEFAULT_MEMO)
for item in do_not_copy:
memo[id(item)] = item
for (item, replacement) in replace.items():
memo[id(item)] = replacement
return dee... |
def scan_qrcode(*, parent: Optional[QWidget], config: 'SimpleConfig', callback: Callable[([bool, str, Optional[str]], None)]) -> None:
if ((sys.platform == 'darwin') or (sys.platform in ('windows', 'win32'))):
_scan_qrcode_using_qtmultimedia(parent=parent, config=config, callback=callback)
else:
... |
def test_window_by_interval():
ds = simulate_genotype_call_dataset(n_variant=5, n_sample=3, seed=0)
assert (not has_windows(ds))
ds['variant_position'] = (['variants'], np.array([1, 4, 6, 8, 12]))
ds['interval_contig_name'] = (['intervals'], np.array(['0', '0']))
ds['interval_start'] = (['intervals'... |
class SystemInfo():
WinDir = environ.get('WinDir', '')
ProgramFiles = environ.get('ProgramFiles', '')
ProgramFilesx86 = environ.get('ProgramFiles(x86)', ProgramFiles)
def __init__(self, registry_info, vc_ver=None):
self.ri = registry_info
self.pi = self.ri.pi
self.known_vs_paths ... |
class HierarchicalMachine(Machine):
state_cls = NestedState
transition_cls = NestedTransition
event_cls = NestedEvent
def __init__(self, model=Machine.self_literal, states=None, initial='initial', transitions=None, send_event=False, auto_transitions=True, ordered_transitions=False, ignore_invalid_trigge... |
def start_stats_collection(batched_delta_stats_compute_list: List[DeltaAnnotated], columns: List[str], stat_results_s3_bucket: Optional[str]=None, metastats_results_s3_bucket: Optional[str]=None, deltacat_storage=unimplemented_deltacat_storage, **kwargs) -> Dict[(str, List[DeltaStats])]:
delta_stats_compute_pending... |
def local_gamma(filepath_ref, filepath_eval, result, random_subset=None, max_gamma=1.1, dose_threshold=1, distance_threshold=1):
gamma = run_gamma(filepath_ref, filepath_eval, random_subset, max_gamma, dose_threshold, distance_threshold)
gamma_pass = calculate_pass_rate(gamma)
assert (np.round(gamma_pass, d... |
def get_all_leaf_targets(file: MypyFile) -> list[TargetInfo]:
result: list[TargetInfo] = []
for (fullname, node, active_type) in file.local_definitions():
if isinstance(node.node, (FuncDef, OverloadedFuncDef, Decorator)):
result.append((fullname, node.node, active_type))
return result |
_sentencepiece
_tokenizers
class XLMRobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = XLMRobertaTokenizer
rust_tokenizer_class = XLMRobertaTokenizerFast
test_rust_tokenizer = True
test_sentencepiece = True
def setUp(self):
super().setUp()
tokenizer =... |
def make_dict_structure_fn(cl: type[T], converter: BaseConverter, _cattrs_forbid_extra_keys: (bool | Literal['from_converter'])='from_converter', _cattrs_use_linecache: bool=True, _cattrs_prefer_attrib_converters: bool=False, _cattrs_detailed_validation: (bool | Literal['from_converter'])='from_converter', _cattrs_use_... |
class AnnotationTransform(object):
def __init__(self, keep_difficult=True):
self.class_to_ind = dict(zip(VOC_CLASSES, range(len(VOC_CLASSES))))
self.keep_difficult = keep_difficult
def __call__(self, target):
width = float(target.find('size').find('width').text)
height = float(ta... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.