code stringlengths 281 23.7M |
|---|
def LoadSystemModule(lib_dir, modname):
import imp
for suffix_item in imp.get_suffixes():
if (suffix_item[0] == '_d.pyd'):
suffix = '_d'
break
else:
suffix = ''
filename = ('%s%d%d%s.dll' % (modname, sys.version_info[0], sys.version_info[1], suffix))
filename ... |
def distributed_main(i, main, cfg: FairseqConfig, kwargs):
cfg.distributed_training.device_id = i
if (torch.cuda.is_available() and (not cfg.common.cpu) and (not cfg.common.tpu)):
torch.cuda.set_device(cfg.distributed_training.device_id)
if (cfg.distributed_training.distributed_rank is None):
... |
def correctModule(module):
if (module == '_io'):
return 'io'
if module.startswith('pyunity.physics'):
return 'pyunity.physics'
if module.startswith('pyunity.scenes'):
return 'pyunity.scenes'
if module.startswith('pyunity.values'):
return 'pyunity.values'
return module |
def get_selection(args):
if (not args):
return [(layout, sorted(specs[layout].keys())) for layout in sorted(specs.keys())]
errors = []
selection = []
for arg in args:
if (':' in arg):
(layout, names) = arg.split(':')
if (layout not in specs):
error... |
class CamembertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>',... |
_api()
class pluck(Stream):
def __init__(self, upstream, pick, **kwargs):
self.pick = pick
super(pluck, self).__init__(upstream, **kwargs)
def update(self, x, who=None, metadata=None):
if isinstance(self.pick, list):
return self._emit(tuple([x[ind] for ind in self.pick]), met... |
class JobItem(models.Model):
title = models.CharField('', max_length=255)
link = models.URLField('')
description = models.TextField(' ', null=True, blank=True)
created_at = models.DateTimeField(' ', auto_now_add=True, null=True, blank=True)
updated_at = models.DateTimeField(' ', auto_now=True, null=... |
class Trainer(nn.Module):
def __init__(self, args, device, rank):
super(Trainer, self).__init__()
self.args = args
self.batch_size = args.batch_size
self.gen = Generator(args.size, args.latent_dim_style, args.latent_dim_motion, args.channel_multiplier).to(device)
self.dis = D... |
def get_node_list(cfg, action, core_v1):
def get_node(node_name, label_selector, instance_kill_count, action, core_v1):
list_nodes_func = (list_startable_nodes if (action == Actions.START) else list_killable_nodes)
if (node_name in list_nodes_func(core_v1)):
return [node_name]
el... |
class BinOpNode(CtrlNode):
_dtypes = ['float64', 'float32', 'float16', 'int64', 'int32', 'int16', 'int8', 'uint64', 'uint32', 'uint16', 'uint8']
uiTemplate = [('outputType', 'combo', {'values': (['no change', 'input A', 'input B'] + _dtypes), 'index': 0})]
def __init__(self, name, fn):
self.fn = fn
... |
class DeeplabV3(nn.Module):
def __init__(self, in_channels, out_channels, hidden_channels=256, dilations=(12, 24, 36), norm_act=ABN, pooling_size=None):
super(DeeplabV3, self).__init__()
self.pooling_size = pooling_size
self.map_convs = nn.ModuleList([nn.Conv2d(in_channels, hidden_channels, ... |
class Migration(migrations.Migration):
dependencies = [('options', '0016_meta')]
operations = [migrations.AddField(model_name='option', name='text_lang3', field=models.CharField(blank=True, help_text='The text for this option in the tertiary language.', max_length=256, null=True, verbose_name='Text (tertiary)')... |
class Migration(migrations.Migration):
dependencies = [('questions', '0059_question_default_external_id')]
operations = [migrations.AlterField(model_name='question', name='widget_type', field=models.CharField(choices=[('text', 'Text'), ('textarea', 'Textarea'), ('yesno', 'Yes/No'), ('checkbox', 'Checkboxes'), (... |
def test_force_continue(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path... |
def simulate_genotype_call_dataset(n_variant: int, n_sample: int, n_ploidy: int=2, n_allele: int=2, n_contig: int=1, seed: Optional[int]=0, missing_pct: Optional[float]=None, phased: Optional[bool]=None, additional_variant_fields: Optional[dict]=None) -> Dataset:
if (missing_pct and ((missing_pct < 0.0) or (missing... |
def argparser(prefix=None, method=None, batch_size=50, epochs=60, verbose=200, lr=0.001, thres=0.02, ratio=0.2, seed=0, epsilon=0.1, starting_epsilon=None, l1_proj=None, l1_train='exact', l1_test='exact', opt='sgd', momentum=0.9, weight_decay=0.0005):
parser = argparse.ArgumentParser()
parser.add_argument('--op... |
class TestClientRefund(ClientTestCase):
def setUp(self):
super(TestClientRefund, self).setUp()
self.base_url = '{}/refunds'.format(self.base_url)
def test_refund_all(self):
result = mock_file('refund_collection')
url = self.base_url
responses.add(responses.GET, url, statu... |
def integrate_bodies(body_q: wp.array(dtype=wp.transform), body_qd: wp.array(dtype=wp.spatial_vector), body_f: wp.array(dtype=wp.spatial_vector), body_com: wp.array(dtype=wp.vec3), m: wp.array(dtype=float), I: wp.array(dtype=wp.mat33), inv_m: wp.array(dtype=float), inv_I: wp.array(dtype=wp.mat33), gravity: wp.vec3, dt:... |
class HarmonicSTFT(nn.Module):
def __init__(self, sample_rate=16000, n_fft=513, win_length=None, hop_length=None, pad=0, power=2, normalized=False, n_harmonic=6, semitone_scale=2, bw_Q=1.0, learn_bw=None):
super(HarmonicSTFT, self).__init__()
self.sample_rate = sample_rate
self.n_harmonic = ... |
def initilize_modules(modules):
for m in modules:
if (isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Linear)):
torch.nn.init.xavier_uniform(m.weight)
if (m.bias is not None):
torch.nn.init.constant(m.bias, 0)
elif isinstance(... |
def omega_ratio(returns_tms: SimpleReturnsSeries, threshold: float=0) -> float:
returns_tms = returns_tms.to_simple_returns()
downside = 0
upside = 0
for ret in returns_tms.values:
if (ret < threshold):
downside += (threshold - ret)
else:
upside += (ret - threshol... |
class SamePad(nn.Module):
def __init__(self, filter_size, pad_mode='constant', **kwargs):
super(SamePad, self).__init__()
self.pad_size = [int(((filter_size - 1) / 2.0)), int(math.ceil(((filter_size - 1) / 2.0))), int(((filter_size - 1) / 2.0)), int(math.ceil(((filter_size - 1) / 2.0)))]
sel... |
class TestResponseUnmarshaller():
host_url = '
(scope='session')
def spec_dict(self, v30_petstore_content):
return v30_petstore_content
(scope='session')
def spec(self, v30_petstore_spec):
return v30_petstore_spec
(scope='session')
def response_unmarshaller(self, spec):
... |
def make_dot(role, out_path):
with open(out_path, 'w') as f:
f.write(HEADER)
f.write('\n IDLE [label=<IDLE<BR/><i>start state</i>>]\n // move ERROR down to the bottom\n {rank=same CLOSED ERROR}\n')
edges = Edges()
CORE_EVENTS = {Request, InformationalResponse, Response, Data, EndO... |
def normal_ordered(operator, hbar=1.0):
kwargs = {}
if isinstance(operator, FermionOperator):
ordered_operator = FermionOperator()
order_fn = normal_ordered_ladder_term
kwargs['parity'] = (- 1)
elif isinstance(operator, BosonOperator):
ordered_operator = BosonOperator()
... |
class ImageMsg(Msg):
def __init__(self, toUserName, fromUserName, mediaId):
self.__dict = dict()
self.__dict['ToUserName'] = toUserName
self.__dict['FromUserName'] = fromUserName
self.__dict['CreateTime'] = int(time.time())
self.__dict['MediaId'] = mediaId
def send(self):... |
class UefiConfTable():
_struct_systbl: STRUCT
_fname_arrptr: str
_fname_nitems: str
def __init__(self, ql: Qiling):
self.ql = ql
self.__arrptr_off = self._struct_systbl.offsetof(self._fname_arrptr)
self.__nitems_off = self._struct_systbl.offsetof(self._fname_nitems)
def syste... |
def get_benchmark_returns_from_file(filelike):
log.info('Reading benchmark returns from {}', filelike)
df = pd.read_csv(filelike, index_col=['date'], parse_dates=['date']).tz_localize('utc')
if ('return' not in df.columns):
raise ValueError("The column 'return' not found in the benchmark file \nExpe... |
_tag(takes_context=True)
def associated(context, backend):
user = context.get('user')
context['association'] = None
if (user and user.is_authenticated()):
try:
context['association'] = user.social_auth.filter(provider=backend.name)[0]
except IndexError:
pass
retur... |
class Sampler_SA(Sampler):
def __init__(self, config, proposal, oracle):
super().__init__(config, proposal, oracle)
self.k = 0
self.step_cur_T = 0
self.T = self.T_k(self.k)
def T_k(self, k):
T_0 = 1.0
BETA = self.config['beta']
ALPHA = self.config['alpha']... |
def parse_file_format(path):
if (os.path.isdir(path) or path.endswith('/')):
if path.rstrip('/').lower().endswith('lmdb'):
return 'lmdb'
return 'dir'
if (os.path.isfile(path) and (os.path.splitext(path)[1] == '')):
return 'txt'
path = path.lower()
if path.endswith('.t... |
def plot_segment_duration(round_summary, path, mode_name):
save_path = os.path.join(path, 'segments')
if (not os.path.exists(save_path)):
os.makedirs(save_path)
for key in round_summary.keys():
if ('duration' in key):
(f, ax) = plt.subplots(1, 1, figsize=(12, 9), dpi=100)
... |
def unpack_bip32_root_fingerprint_and_int_path(path: bytes) -> Tuple[(bytes, Sequence[int])]:
if ((len(path) % 4) != 0):
raise Exception(f'unexpected packed path length. path={path.hex()}')
xfp = path[0:4]
int_path = [int.from_bytes(b, byteorder='little', signed=False) for b in chunks(path[4:], 4)]
... |
class FreeTypeFont(base.Font):
glyph_renderer_class = FreeTypeGlyphRenderer
_memory_faces = MemoryFaceStore()
def __init__(self, name, size, bold=False, italic=False, stretch=False, dpi=None):
if stretch:
warnings.warn('The current font render does not support stretching.')
super... |
('beeref.view.BeeGraphicsView.clear_scene')
def test_open_from_file(clear_mock, view, qtbot):
root = os.path.dirname(__file__)
filename = os.path.join(root, 'assets', 'test1item.bee')
view.on_loading_finished = MagicMock()
view.open_from_file(filename)
view.worker.wait()
qtbot.waitUntil((lambda ... |
def compute_integrals(atom, unit, charge, spin, basis, hf_method='rhf', conv_tol=1e-09, max_cycle=50, init_guess='minao', max_memory=None):
atom = _check_molecule_format(atom)
hf_method = hf_method.lower()
if (max_memory is None):
max_memory = param.MAX_MEMORY
try:
verbose = pylogger.QUI... |
def fix_database_soccer_1(sqlite_file):
print('Editing database', sqlite_file)
conn = sqlite3.connect(sqlite_file)
conn.text_factory = (lambda b: b.decode(errors='ignore'))
c = conn.cursor()
query_get_all_tables = "SELECT name FROM sqlite_master WHERE type='table'"
c.execute(query_get_all_tables... |
def get_root_logger(log_level=logging.INFO):
logger = logging.getLogger()
if (not logger.hasHandlers()):
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=log_level)
(rank, _) = get_dist_info()
if (rank != 0):
logger.setLevel('ERROR')
return logger |
def digit_match(x, nl):
found = 0
if (x in nl):
found = 1
elif (x == '1'):
found = 1
elif (int(x) <= 10):
if (re.search(order_dict[int(x)], nl) or re.search(number_dict[str(int(x))], nl)):
found = 1
elif ((format(int(x), ',') in nl) or (x in nl)):
found = ... |
class LeaveOneOutSelectionMethod(SelectionMethod):
name = 'leave-one-domain-out cross-validation'
def _step_acc(self, records):
test_records = get_test_records(records)
if (len(test_records) != 1):
return None
test_env = test_records[0]['args']['test_envs'][0]
n_envs ... |
def test_load_nist_gcm_vectors():
vector_data = textwrap.dedent('\n [Keylen = 128]\n [IVlen = 96]\n [PTlen = 0]\n [AADlen = 0]\n [Taglen = 128]\n\n Count = 0\n Key = 11754cd72aec309bf52f7687212e8957\n IV = 3c819d9a9bedb65\n PT =\n AAD =\n ... |
def validate_result_collection_keys(*args):
invalid_keys = []
for key in args:
if ((not isinstance(key, str)) or bool(re.search('[^\\w+-.]', key))):
invalid_keys.append(key)
if (len(invalid_keys) > 0):
raise KeyError(f"Invalid key(s) provided for ResultCollection. ResultCollectio... |
_model('latent_multilingual_transformer')
class LatentMultilingualTransformerModel(MultilingualTransformerModel):
def add_args(parser):
MultilingualTransformerModel.add_args(parser)
parser.add_argument('--soft-select', action='store_true', help='use soft samples in training an inference')
pa... |
_LAYERS.register_module()
class ConvModule(nn.Module):
_abbr_ = 'conv_block'
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias='auto', conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), inplace=True, with_spectral_norm=False, padding_mode='zeros', o... |
class GAASPGriddedFileHandler(GAASPFileHandler):
y_dims = ('Number_of_Y_Dimension',)
x_dims = ('Number_of_X_Dimension',)
dim_resolutions = {'Number_of_X_Dimension': 10000}
is_gridded = True
def _get_extents(data_shape, res):
x_min = ((- (data_shape[1] / 2.0)) * res)
x_max = ((data_sh... |
def get_static_or_dynamic_shape(tensor):
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for (index, dim) in enumerate(static_tensor_shape):
if (dim is not None):
combined_shape.append(dim)
else:
combined_sh... |
def get_preprocessing(name, is_training=False):
preprocessing_fn_map = {'reidnet': reidnet_preprocessing}
if (name not in preprocessing_fn_map):
raise ValueError(('Preprocessing name [%s] was not recognized' % name))
def preprocessing_fn(image, output_height, output_width):
return preprocess... |
def decode_predict(example):
features = tf.parse_single_example(example, features={'label': tf.FixedLenFeature([], tf.int64), 'FEA_SrcItemId': tf.FixedLenFeature([], tf.string), 'FEA_SrcItemCp': tf.FixedLenFeature([], tf.string), 'FEA_SrcItemFirstCat': tf.FixedLenFeature([], tf.string), 'FEA_SrcItemSecondCat': tf.F... |
def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes):
for managedcluster_scenario_config in scenarios_list:
with open(managedcluster_scenario_config, 'r') as f:
managedcluster_scenario_config = yaml.full_load(f)
for managedcluster_scenario in managedcluster_scenari... |
('pymodbus-repl')
_option(str(pymodbus_version), message=TITLE)
('--verbose', is_flag=True, default=False, help='Verbose logs')
('--broadcast-support', is_flag=True, default=False, help='Support broadcast messages')
('--retry-on-empty', is_flag=True, default=False, help='Retry on empty response')
('--retry-on-error', i... |
class Task(dict):
FIELDS = {'annotations': AnnotationArrayField(label='Annotations'), 'depends': CommaSeparatedUUIDField(label='Depends Upon'), 'description': StringField(label='Description'), 'due': DateField(label='Due'), 'end': DateField(label='Ended'), 'entry': DateField(label='Entered'), 'id': NumericField(lab... |
def test_samefile(tmpdir):
assert tmpdir.samefile(tmpdir)
p = tmpdir.ensure('hello')
assert p.samefile(p)
with p.dirpath().as_cwd():
assert p.samefile(p.basename)
if (sys.platform == 'win32'):
p1 = p.__class__(str(p).lower())
p2 = p.__class__(str(p).upper())
assert p1... |
('pypyr.cache.stepcache.step_cache.get_step')
def test_stop_step_group_with_success_handler_while(mock_step_cache):
mock11 = DeepCopyMagicMock()
def step11(context):
mock11(context)
if (context['whileCounter'] == 2):
raise StopStepGroup()
mock_step_cache.side_effect = [nothing_st... |
class MockVLLMEngine():
def __init__(self, llm_app: VLLMApp, *, node_initializer: VLLMNodeInitializer=None):
assert isinstance(llm_app, VLLMApp), f'Got invalid config {llm_app} of type {type(llm_app)}'
self.llm_app = llm_app.copy(deep=True)
self.engine_config = llm_app.engine_config
... |
_ingredient.config
def cfg_eval():
exp = None
checkpoint = 'latest.pth'
split = 'valid_unseen'
shuffle = False
max_steps = 1000
max_fails = 10
subgoals = 'all'
smooth_nav = False
no_model_unroll = False
no_teacher_force = False
debug = False
x_display = '0'
eval_range... |
.skipif((pytest.config.new_pycket and (not pytest.config.load_expander)), reason='normalizer issues')
def test_caselambda():
run('(case-lambda [(x) 1])')
run('(case-lambda [(x) x])')
run('(case-lambda [() 0])')
run('(case-lambda [() 0] [(x) x])')
run('(case-lambda [x 0])')
run('((case-lambda [()... |
def test_sliding_window():
source = Stream()
L = source.sliding_window(2).sink_to_list()
for i in range(10):
source.emit(i)
assert (L == [(0,), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9)])
L = source.sliding_window(2, return_partial=False).sink_to_list()
for i... |
class DDECCharges(ChargeBase):
type: Literal['DDECCharges'] = 'DDECCharges'
program: Literal['gaussian'] = 'gaussian'
ddec_version: Literal[(3, 6)] = Field(6, description='The version of DDEC partitioning that should be used.')
solvent_settings: Optional[SolventGaussian] = Field(SolventGaussian(), descr... |
class EPICSWidget(object):
_attribute_decorator('WidgetSpecific', 'The PV name', str, {})
def epics_pv_name(self):
return self.__epics_pv_name
_pv_name.setter
def epics_pv_name(self, v):
self.__epics_pv_name = v
self.disconnect()
try:
self.epics_pv = epics.PV(... |
def KUKSpU(cell, *args, **kwargs):
for arg in args:
if isinstance(arg, libkpts.KPoints):
return kukspu_ksymm.KUKSpU(cell, *args, **kwargs)
if ('kpts' in kwargs):
if isinstance(kwargs['kpts'], libkpts.KPoints):
return kukspu_ksymm.KUKSpU(cell, *args, **kwargs)
return k... |
class CassandraConfig(AbstractWriteConfig):
def __init__(self, username: str=None, password: str=None, host: str=None, keyspace: str=None, mode: str=None, format_: str=None, stream_processing_time: str=None, stream_output_mode: str=None, stream_checkpoint_path: str=None, read_consistency_level: str=None, write_cons... |
class LogicalComparison(BinaryScalarOp):
def __init__(self, *args, **kwargs):
BinaryScalarOp.__init__(self, *args, **kwargs)
self.bool = True
def __eq__(self, other):
return (BinaryScalarOp.__eq__(self, other) and (getattr(self, 'bool', False) == getattr(other, 'bool', False)))
def _... |
def import_Market1501(dataset_dir):
market1501_dir = os.path.join(dataset_dir, 'Market-1501')
if (not os.path.exists(market1501_dir)):
print('Please Download Market1501 Dataset')
data_group = ['train', 'query', 'gallery']
for group in data_group:
if (group == 'train'):
name_d... |
class OwlViTFeatureExtractor(OwlViTImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use OwlViTImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs) |
class ViewProviderAsmElementLink(ViewProviderAsmOnTop):
def __init__(self, vobj):
vobj.OverrideMaterial = True
vobj.ShapeMaterial.DiffuseColor = self.getDefaultColor()
vobj.ShapeMaterial.EmissiveColor = self.getDefaultColor()
super(ViewProviderAsmElementLink, self).__init__(vobj)
... |
def to_tensor(data):
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif (isinstance(data, Sequence) and (not mmcv.is_str(data))):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTenso... |
def load_vqvae(vq_conf, vq_ckpt, opt=None):
assert (type(vq_ckpt) == str)
mparam = vq_conf.model.params
n_embed = mparam.n_embed
embed_dim = mparam.embed_dim
ddconfig = mparam.ddconfig
n_down = (len(ddconfig.ch_mult) - 1)
vqvae = VQVAE(ddconfig, n_embed, embed_dim)
map_fn = (lambda stora... |
class BirthdayParty(QObject):
def __init__(self, parent=None):
super(BirthdayParty, self).__init__(parent)
self._host = None
self._guests = []
(Person)
def host(self):
return self._host
def host(self, host):
self._host = host
(QQmlListProperty)
def guests(... |
class TestQuteConfigdiff():
(autouse=True)
def prepare_config(self, config_stub):
config_stub.set_obj('content.javascript.enabled', True, pattern=urlmatch.UrlPattern('chrome-devtools://*'), hide_userconfig=True)
.parametrize('url, expected', [('qute://configdiff/', b'<Default configuration>'), ('qut... |
_grad()
def evaluate_a2d(model, data_loader, postprocessor, device, args):
model.eval()
predictions = []
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
for (samples, targets) in metric_logger.log_every(data_loader, 10, header):
image_ids = [t['image_id'] for t in targets... |
def send_pfs_update(raiden: 'RaidenService', canonical_identifier: CanonicalIdentifier, update_fee_schedule: bool=False) -> None:
if (raiden.routing_mode == RoutingMode.PRIVATE):
return
channel_state = views.get_channelstate_by_canonical_identifier(chain_state=views.state_from_raiden(raiden), canonical_... |
class SummarizationModule(BaseTransformer):
mode = 'summarization'
loss_names = ['loss']
metric_names = ROUGE_KEYS
default_val_metric = 'rouge2'
def __init__(self, hparams, **kwargs):
if (hparams.sortish_sampler and (hparams.gpus > 1)):
hparams.replace_sampler_ddp = False
... |
_REGISTRY.register()
class Vimeo90KDataset(data.Dataset):
def __init__(self, opt):
super(Vimeo90KDataset, self).__init__()
self.opt = opt
(self.gt_root, self.lq_root) = (Path(opt['dataroot_gt']), Path(opt['dataroot_lq']))
with open(opt['meta_info_file'], 'r') as fin:
self... |
class FitTestThread(threading.Thread):
def __init__(self, fitIDs, mainFrame):
threading.Thread.__init__(self)
self.name = 'FitTestThread'
self.mainFrame = mainFrame
self.stopRunning = False
self.fits = fitIDs
def stop(self):
self.stopRunning = True
def run(sel... |
class BaseEmbeddingConfig():
num_embeddings: int
embedding_dim: int
name: str = ''
data_type: DataType = DataType.FP32
feature_names: List[str] = field(default_factory=list)
weight_init_max: Optional[float] = None
weight_init_min: Optional[float] = None
pruning_indices_remapping: Optiona... |
def test_remove_also_deactivates(tmp_path: Path, manager: EnvManager, poetry: Poetry, config: Config, mocker: MockerFixture, venv_name: str) -> None:
config.merge({'virtualenvs': {'path': str(tmp_path)}})
(tmp_path / f'{venv_name}-py3.7').mkdir()
(tmp_path / f'{venv_name}-py3.6').mkdir()
mocker.patch('s... |
def test_iterative_find_nets():
class Top(ComponentLevel3):
def construct(s):
s.w = Wire(Bits32)
s.x = Wire(Bits32)
s.y = Wire(Bits32)
s.z = Wire(Bits32)
connect(s.w[0:16], s.x[8:24])
connect(s.x[16:32], s.y[0:16])
connect(s... |
def _test_initialize_ucx_tcp(protocol):
if (protocol == 'ucx'):
ucp = pytest.importorskip('ucp')
elif (protocol == 'ucxx'):
ucp = pytest.importorskip('ucxx')
kwargs = {'enable_tcp_over_ucx': True}
initialize(protocol=protocol, **kwargs)
with LocalCluster(protocol=protocol, dashboard_... |
def pairwise_distance(feature, squared=False):
pairwise_distances_squared = (math_ops.add(math_ops.reduce_sum(math_ops.square(feature), axis=[1], keep_dims=True), math_ops.reduce_sum(math_ops.square(array_ops.transpose(feature)), axis=[0], keep_dims=True)) - (2.0 * math_ops.matmul(feature, array_ops.transpose(featu... |
class TestConfigureWindow(EndianTest):
def setUp(self):
self.req_args_0 = {'attrs': {'x': (- 27539), 'y': (- 17512), 'width': 39387, 'height': 57679, 'border_width': (- 14551), 'sibling': , 'stack_mode': 2}, 'window': }
self.req_bin_0 = b'\x0c\x00\n\x00t\xd9\xd2\x14\x7f\x00\x00\x00m\x94\x00\x00\x98\... |
def test_hamiltonian_objectives_consistent():
n = 10
graph = nx.complete_graph(n=n)
graph = random_plus_minus_1_weights(graph)
bitstrings = np.random.choice([True, False], size=(100, n))
expected_energies = [hamiltonian_objective(bitstring, graph) for bitstring in bitstrings]
actual_energies = h... |
class SvgDraggableRectangleResizePoint(gui.SvgRectangle, DraggableItem):
def __init__(self, app_instance, compatibility_iterable, **kwargs):
self.w = 15
self.h = 15
super(SvgDraggableRectangleResizePoint, self).__init__(0, 0, self.w, self.h, **kwargs)
DraggableItem.__init__(self, app... |
(frozen=True)
class EnemyAttributeRandomizer(BitPackDataclass, JsonDataclass):
enemy_rando_range_scale_low: float = dataclasses.field(metadata={'min': 0.01, 'max': 25.0, 'precision': 1.0})
enemy_rando_range_scale_high: float = dataclasses.field(metadata={'min': 0.01, 'max': 25.0, 'precision': 1.0})
enemy_ra... |
class WMS_CAMS(WMSBase):
layer_prefix = 'CAMS_'
name = 'CAMS'
def __init__(self, m=None):
self.m = m
try:
self.wmslayers = [key for key in self.m.add_wms.CAMS.add_layer.__dict__.keys() if (not ((key in ['m']) or key.startswith('_')))]
except Exception:
self.wm... |
class YadageSteering(object):
def __init__(self, metadir, controller):
self.metadir = metadir
self.controller = controller
self.adage_kwargs = dict(workdir=os.path.join(metadir, 'adage'))
def connect(cls, metadir, ctrlstring, ctrlopts=None, modelsetup=None, modelopts=None, accept_metadir... |
class AudioSCPDataset(Dataset):
def __init__(self, wav_scp, segments=None, audio_length_threshold=None, return_utt_id=False, return_sampling_rate=False, allow_cache=False):
audio_loader = kaldiio.load_scp(wav_scp, segments=segments)
audio_keys = list(audio_loader.keys())
if (audio_length_thr... |
def test_elevator_echoes_shuffled(echoes_game_patches):
EchoesBasePatchesFactory()
rng = random.Random(1000)
result = elevator_echoes_shuffled(echoes_game_patches.game, rng)
gt = 'Great Temple'
tg = 'Temple Grounds'
sf = 'Sanctuary Fortress'
aw = 'Agon Wastes'
simpler = {source.as_string... |
def download_from_url(url, dst):
download = True
if os.path.exists(dst):
download = query_yes_no(('Seems you have downloaded %s to %s, overwrite?' % (url, dst)), default='no')
if download:
os.remove(dst)
if download:
response = requests.get(url, stream=True)
total... |
class OnDisconnect():
def on_disconnect(self=None) -> Callable:
def decorator(func: Callable) -> Callable:
if isinstance(self, pyrogram.Client):
self.add_handler(pyrogram.handlers.DisconnectHandler(func))
else:
if (not hasattr(func, 'handlers')):
... |
class ResNetEmbeddings(nn.Module):
def __init__(self, config: ResNetConfig):
super().__init__()
self.embedder = ResNetConvLayer(config.num_channels, config.embedding_size, kernel_size=7, stride=2, activation=config.hidden_act)
self.pooler = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
... |
()
('instance')
('vizpdf')
('--viewscope', default='')
('-v', '--verbosity', default='INFO')
def viz(instance, vizpdf, viewscope, verbosity):
logging.basicConfig(level=getattr(logging, verbosity), format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
import yadage.visualize as visualize
wflow = wfl... |
def round3(value):
if (type(value) == type([])):
return list(map(round3, value))
elif (type(value) == type({})):
ret = {}
for each in value:
ret[round3(each)] = round3(value[each])
return ret
elif (type(value) == type(1.0)):
return round(value, 3)
retu... |
class TestResultsName(PyparsingExpressionTestCase):
tests = [PpTestSpec(desc='Match with results name', expr=pp.Literal('xyz').set_results_name('value'), text='xyz', expected_dict={'value': 'xyz'}, expected_list=['xyz']), PpTestSpec(desc='Match with results name - using naming short-cut', expr=pp.Literal('xyz')('va... |
class Effect5485(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Projectile Turret')), 'maxRange', ship.getModifiedItemAttr('shipBonusMF'), skill='Minmatar Frigate', **kwargs) |
def new_node_type(node_id=None, node_str=None):
if (node_id is None):
if (len(CUSTOM_NODE_TYPES) == 0):
node_id = (ALL_TYPES[(- 1)] + 1)
else:
node_id = (CUSTOM_NODE_TYPES[(- 1)] + 1)
assert (node_id not in ALL_TYPES)
assert (node_id not in CUSTOM_NODE_TYPES)
CUST... |
def test_mapping_errors(c: Converter) -> None:
try:
c.structure({'a': 1, 'b': 'str'}, Dict[(str, int)])
except Exception as exc:
assert (transform_error(exc) == ["invalid value for type, expected int $['b']"])
class C():
a: Dict[(str, int)]
try:
c.structure({'a': {'a': '... |
class TestAdmin(CommandTest):
def test_emit(self):
self.call(admin.CmdEmit(), 'Char2 = Test', 'Emitted to Char2:\nTest')
def test_perm(self):
self.call(admin.CmdPerm(), 'Obj = Builder', "Permission 'Builder' given to Obj (the Object/Character).")
self.call(admin.CmdPerm(), 'Char2 = Build... |
def add_subcommand(subparsers, parents):
epilog = textwrap.dedent('\n Examples:\n qtile cmd-obj\n qtile cmd-obj -o cmd\n qtile cmd-obj -o cmd -f prev_layout -i\n qtile cmd-obj -o cmd -f prev_layout -a 3 # prev_layout on group 3\n qtile cmd-obj -o group 3 -f focus_back\... |
_db
def test_add_slot_add_slot(conference_factory, day_factory, slot_factory, admin_graphql_client):
conference = conference_factory(start=datetime(2020, 4, 2, tzinfo=pytz.UTC), end=datetime(2020, 4, 2, tzinfo=pytz.UTC))
day = day_factory(conference=conference, day=date(2020, 4, 2))
slot_factory(day=day, ho... |
class Effect6475(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemIncrease((lambda mod: (mod.item.group.name == 'Structure Doomsday Weapon')), 'lightningWeaponTargetAmount', src.getModifiedItemAttr('structureRigDoomsdayTargetAmountBonus'), ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.