code stringlengths 281 23.7M |
|---|
class Bottleneck(nn.Module):
def __init__(self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0.0):
super().__init__()
first_dilation = (first_dilation or dilation)
... |
def main():
args = parse_args()
root_path = args.root_path
print('Processing training set...')
training_infos = collect_hiertext_info(root_path, args.level, 'train')
convert_annotations(training_infos, osp.join(root_path, 'instances_training.json'))
print('Processing validation set...')
val_... |
def run_scipy():
stochastic_model = pr.StochasticModel()
zeta = (np.log((1 + ((100 / 500) ** 2))) ** 0.5)
lamb = (np.log(500) - (0.5 * (zeta ** 2)))
stochastic_model.addVariable(pr.ScipyDist('X1', lognorm(s=zeta, scale=np.exp(lamb))))
stochastic_model.addVariable(pr.ScipyDist('X2', norm(loc=2000, sc... |
class Migration(migrations.Migration):
dependencies = [('options', '0026_optionset_option_locked'), ('questions', '0057_question_default_text')]
operations = [migrations.AddField(model_name='question', name='default_option', field=models.ForeignKey(blank=True, help_text='The default option for this question. To... |
class TFAutoModelWithLMHead(object):
def __init__(self):
raise EnvironmentError('TFAutoModelWithLMHead is designed to be instantiated using the `TFAutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` or `TFAutoModelWithLMHead.from_config(config)` methods.')
def from_config(cls, config):
... |
def choose_layers(model, candidate_layers):
chosen_layers = []
counter = ([0] * len(candidate_layers))
for (nm, m) in model.named_modules():
for (candidate_idx, candidate) in enumerate(candidate_layers):
if isinstance(m, candidate):
counter[candidate_idx] += 1
... |
def maybe_download(archive_name, target_dir, archive_url):
archive_path = path.join(target_dir, archive_name)
if (not path.exists(target_dir)):
print(('No path "%s" - creating ...' % target_dir))
makedirs(target_dir)
if (not path.exists(archive_path)):
print(('No archive "%s" - downl... |
class TestFakeModeA(FakeStatTestBase):
def setUp(self):
super(TestFakeModeA, self).setUp()
self.mode = 'a'
def test_open_close_new_file(self):
self.check_open_close_new_file()
def test_open_write_close_new_file(self):
self.check_open_write_close_new_file()
def test_open_c... |
class PegasusTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = PegasusTokenizer
model_input_names = ['input_ids', 'attention_mask'... |
def dump_tagged(nodes: Sequence[object], tag: (str | None), str_conv: StrConv) -> str:
from mypy.types import Type, TypeStrVisitor
a: list[str] = []
if tag:
a.append((tag + '('))
for n in nodes:
if isinstance(n, list):
if n:
a.append(dump_tagged(n, None, str_c... |
def add_callbacks(args, dataloaders):
vars(args)['logger'] = WandbLogger(project='esasuperres', entity='whyhowltd', config=args)
vars(args)['callbacks'] = [ImagePredictionLogger(train_dataloader=dataloaders['train'], val_dataloader=dataloaders['val'], test_dataloader=dataloaders['test'], log_every_n_epochs=1, w... |
class STFTLoss(nn.Module):
def __init__(self, fft_size=1024, hop_size=120, win_size=600):
super(STFTLoss, self).__init__()
self.fft_size = fft_size
self.hop_size = hop_size
self.win_size = win_size
self.register_buffer('window', torch.hann_window(win_size))
self.sc_lo... |
def pytask_extend_command_line_interface(cli: click.Group) -> None:
additional_parameters = [click.Option(['--n-entries-in-table'], default=15, type=click.IntRange(min=0), help='How many entries to display in the table during the execution. Tasks which are running are always displayed.'), click.Option(['--sort-tabl... |
class Effect7086(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Precursor Weapon')), 'trackingSpeed', ship.getModifiedItemAttr('shipBonusPC2'), skill='Precursor Cruiser', **kwargs) |
def modify_boundary(image, regional_sample_rate=0.1, sample_rate=0.1, move_rate=0.0, iou_target=0.8):
if (int(cv2.__version__[0]) >= 4):
(contours, _) = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
else:
(_, contours, _) = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_... |
class CollaborativeCallback(transformers.TrainerCallback):
def __init__(self, dht: hivemind.DHT, optimizer: hivemind.CollaborativeOptimizer, model: torch.nn.Module, local_public_key: bytes, statistics_expiration: float):
super().__init__()
self.model = model
(self.dht, self.collaborative_opt... |
def read_cc_block(fid, pointer):
if ((pointer != 0) and (pointer is not None)):
temp = dict()
fid.seek(pointer)
(temp['BlockType'], temp['BlockSize'], temp['valueRangeKnown'], temp['valueRangeMinimum'], temp['valueRangeMaximum'], temp['physicalUnit'], temp['cc_type'], temp['numberOfValuePair... |
def score(ref, hypo):
scorers = [(Bleu(4), ['Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4']), (Rouge(), 'ROUGE_L')]
final_scores = {}
for (scorer, method) in scorers:
(score, scores) = scorer.compute_score(ref, hypo)
if (type(score) == list):
for (m, s) in zip(method, score):
... |
def is_config_or_test(example, scan_width=5, coeff=0.05):
keywords = ['unit tests', 'test file', 'configuration file']
lines = example['content'].splitlines()
count_config = 0
count_test = 0
for (_, line) in zip(range(scan_width), lines):
for keyword in keywords:
if (keyword in l... |
class QlArchRISCV(QlArch):
type = QL_ARCH.RISCV
bits = 32
_property
def uc(self) -> Uc:
return Uc(UC_ARCH_RISCV, UC_MODE_RISCV32)
_property
def regs(self) -> QlRegisterManager:
regs_map = dict(**riscv_const.reg_map, **riscv_const.reg_csr_map, **riscv_const.reg_float_map)
... |
class BertForRetriever(nn.Module):
def __init__(self, config, args):
super(BertForRetriever, self).__init__()
self.bert_q = BertModel.from_pretrained(args.bert_model_name)
self.bert_c = BertModel.from_pretrained(args.bert_model_name)
self.proj_q = nn.Linear(config.hidden_size, 128)
... |
class Baker(DynMap):
def _rhs(x, y, a):
eps2 = (2.0 - 1e-10)
x_flr = ((eps2 * x) // 1)
xp = ((eps2 * x) - x_flr)
yp = (((a * y) + x_flr) / 2)
return (xp, yp)
def _rhs_inv(xp, yp, a):
eps2 = (2.0 - 1e-10)
if (yp > 0.5):
xflr = (0.5 + ((yp * a) /... |
def get_outdir(path, *paths, inc=False):
outdir = os.path.join(path, *paths)
if (not os.path.exists(outdir)):
os.makedirs(outdir)
elif inc:
count = 1
outdir_inc = ((outdir + '-') + str(count))
while os.path.exists(outdir_inc):
count = (count + 1)
outdi... |
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.... |
def _save_tofile(version, tmp_path, use_str):
handler = _get_saving_handler(version)
(pdf, kwargs) = next(handler)
path = (tmp_path / 'test_save_tofile.pdf')
dest = (str(path) if use_str else path)
pdf.save(dest, **kwargs)
assert path.is_file()
saved_pdf = pdfium.PdfDocument(path)
handle... |
def clamp(input, min=None, max=None):
ndim = input.ndimension()
if (min is None):
pass
elif isinstance(min, (float, int)):
input = torch.clamp(input, min=min)
elif isinstance(min, torch.Tensor):
if ((min.ndimension() == (ndim - 1)) and (min.shape == input.shape[1:])):
... |
class MainTest(unittest.TestCase):
def test_random(self) -> None:
min_rating = 4
min_uc = 5
min_sc = 0
name = 'random'
max_len = 4
mask_prob = 0.2
random_user_count = 5
random_item_count = 40
random_size = 200
dupe_factor = 1
ra... |
class RINEX_input(unittest.TestCase):
def test(self):
run_test(self, ['-R', '05 06 1985 13:50:02'], ' Month/Day/Year H:M:S 11/06/2010 13:00:00 GPS\n Modified Julian Date 55506. GPS\n GPSweek DayOfWeek SecOfWeek 584 6 565200.000000\n FullGPSweek Zcount ... |
def create_nested_marker(name: str, constraint: (BaseConstraint | VersionConstraint)) -> str:
from poetry.core.constraints.generic import Constraint
from poetry.core.constraints.generic import MultiConstraint
from poetry.core.constraints.generic import UnionConstraint
from poetry.core.constraints.versio... |
class Extract():
def __init__(self, argv=sys.argv[1:]):
inputdir = None
outputfile = None
subset_list = None
batch_size = 1
(opts, args) = getopt.getopt(argv, 'i:o:b:s', ['inputdir=', 'outfile=', 'batch_size=', 'subset_list='])
for (opt, arg) in opts:
if (... |
class ClimateFEVER(AbsTaskRetrieval, BeIRTask):
def description(self):
return {'name': 'ClimateFEVER', 'beir_name': 'climate-fever', 'description': 'CLIMATE-FEVER is a dataset adopting the FEVER methodology that consists of 1,535 real-world claims regarding climate-change.', 'reference': ' 'type': 'Retrieva... |
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('--device', default='cuda:0', help='Device used for inference')
parser.add_arg... |
class XmlLexer(RegexLexer):
flags = (re.MULTILINE | re.DOTALL)
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml']
url = '
vers... |
.usefixtures('current_fastest')
def test_create_long_path(tmp_path):
if (sys.platform == 'darwin'):
max_shebang_length = 512
else:
max_shebang_length = 127
count = (max_shebang_length - len(str(tmp_path)))
folder = (((tmp_path / ('a' * (count // 2))) / ('b' * (count // 2))) / 'c')
fo... |
def after_branch_decrefs(label: BasicBlock, pre_live: AnalysisDict[Value], source_defined: set[Value], source_borrowed: set[Value], source_live_regs: set[Value], ordering: dict[(Value, int)], omitted: Iterable[Value]) -> tuple[(tuple[(Value, bool)], ...)]:
target_pre_live = pre_live[(label, 0)]
decref = ((sourc... |
class ql_file():
def __init__(self, path: AnyStr, fd: int):
self.__path = path
self.__fd = fd
self.__closed = False
self._is_map_shared = False
self._mapped_offset = (- 1)
self.close_on_exec = False
def open(cls, path: AnyStr, flags: int, mode: int, dir_fd: Option... |
class Buffer():
def __init__(self, initial_bytes: Optional[bytes]=None) -> None:
self.buffer = bytearray()
self.bytes_used = 0
if initial_bytes:
self.feed(initial_bytes)
def feed(self, new_bytes: bytes) -> None:
self.buffer += new_bytes
def consume_at_most(self, n... |
class Calculations():
def __init__(self, session, display='loss', loss_keys=['loss'], selections=['raw'], avg_samples=500, smooth_amount=0.9, flatten_outliers=False, is_totals=False):
logger.debug('Initializing %s: (session: %s, display: %s, loss_keys: %s, selections: %s, avg_samples: %s, smooth_amount: %s,... |
class VQVAEModel(BaseModel):
def name(self):
return 'VQVAE-Model'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
self.model_name = self.name()
self.device = opt.device
assert (opt.vq_cfg is not None)
configs = omegaco... |
_model
def test_kappa_wild():
Monomer('A', ['site'])
Monomer('B', ['site'])
Initial(A(site=None), Parameter('A_0', 100))
Initial(B(site=None), Parameter('B_0', 100))
Initial((A(site=1) % B(site=1)), Parameter('AB_0', 1000))
Rule('deg_A', (A(site=pysb.WILD) >> None), Parameter('k', 1))
Observ... |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=False):
super().__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, stride, ... |
class SongList(AllTreeView, SongListDnDMixin, DragScroll, util.InstanceTracker):
__gsignals__: GSignals = {'songs-removed': (GObject.SignalFlags.RUN_LAST, None, (object,)), 'orders-changed': (GObject.SignalFlags.RUN_LAST, None, [])}
headers: list[str] = []
star = list(Query.STAR)
def menu(self, header: ... |
def parse_args():
parser = argparse.ArgumentParser(description='Train Reddit-Multi-5k Model')
parser.add_argument('--data_path', nargs='?', default='../../Data/Reddit5k', help='Input data path.')
parser.add_argument('--model_path', nargs='?', default='../../params/', help='path for saving trained model.')
... |
def test_event_loop_fixture_finalizer_handles_loop_set_to_none_async_with_fixture(pytester: Pytester):
pytester.makepyfile(dedent(' import asyncio\n import pytest\n\n .asyncio\n async def test_async_with_explicit_fixture_request(event_loop):\n asyncio.get_e... |
class SimilarityDataLoader(DataLoader, Generic[T_co]):
def __init__(self, dataset: Dataset, **kwargs):
if ('collate_fn' not in kwargs):
kwargs['collate_fn'] = self.__class__.pre_collate_fn
self._original_dataset = dataset
self._original_params = kwargs
self._indexing_data... |
def launch_experiments(variant_generator):
variants = variant_generator.variants()
for (i, variant) in enumerate(variants):
print('Launching {} experiments.'.format(len(variants)))
run_sac_experiment(run_experiment, mode=args.mode, variant=variant, exp_prefix=((variant['prefix'] + '/') + args.ex... |
def str2int(strtab):
inttab = []
for i in strtab:
inttab.append(int(i, 16))
ba = bytearray(inttab)
if (len(strtab) == 4):
fmt = 'I'
elif (len(strtab) == 8):
fmt = 'Q'
else:
raise Exception(("String array of len %d can't be unpacked to an int" % len(strtab)))
r... |
.parametrize('version, expected_version', [(None, LATEST_PYSCRIPT_VERSION), ('2022.9.1', '2022.9.1')])
def test_wrap_pyscript_version(invoke_cli: CLIInvoker, version: Optional[str], expected_version: str, tmp_path: Path, app_details_args: list[str]) -> None:
command = 'print("Hello World!")'
args = ['create', '... |
def worker(proc_id, gpu_ranks, args, model):
set_seed(args.seed)
if args.dist_train:
rank = gpu_ranks[proc_id]
gpu_id = proc_id
elif args.single_gpu:
rank = None
gpu_id = proc_id
else:
rank = None
gpu_id = None
if args.dist_train:
train_loader ... |
class TrainProgressMonitor(Callback):
def __init__(self, loggers: Union[(MetricLogger, List[MetricLogger])]) -> None:
if (not isinstance(loggers, list)):
loggers = [loggers]
self._loggers: List[MetricLogger] = loggers
def on_train_start(self, state: State, unit: TTrainUnit) -> None:
... |
class SawyerHandlePressSideV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'gripper': obs[3], 'handle_pos': obs[4:7], 'unused_info': obs[7:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effo... |
def calculate_activation_statistics(imgs, model, batch_size=32, dims=2048, cuda=False, normalize=False, verbose=0, is_ref=False):
model.eval()
if cuda:
device = torch.device('cuda')
else:
device = torch.device('cpu')
model.to(device)
with torch.no_grad():
features = []
... |
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
raise NotImplementedError
def end_of_epoch(self) -> bool:
raise NotImplementedError
def iterations_in_epoch(self) -> int:
... |
.parametrize('username,password', users)
.parametrize('export_format', export_formats)
def test_detail_export(db, client, username, password, export_format):
client.login(username=username, password=password)
instance = View.objects.first()
url = ((reverse(urlnames['detail_export'], args=[instance.pk]) + ex... |
def set_any_tvars(node: TypeAlias, newline: int, newcolumn: int, options: Options, *, from_error: bool=False, disallow_any: bool=False, special_form: bool=False, fail: (MsgCallback | None)=None, unexpanded_type: (Type | None)=None) -> TypeAliasType:
if (from_error or disallow_any):
type_of_any = TypeOfAny.f... |
def read_system_cpu(path, cpu_status={}):
cpu_status['online'] = True
if os.path.isfile((path + '/online')):
with open((path + '/online'), 'r') as f:
cpu_status['online'] = (f.read().strip() == '1')
if os.path.isdir((path + '/cpufreq')):
with open((path + '/cpufreq/scaling_govern... |
class TestAssertNotAlmostEqual(TestCase):
def test_simple(self):
self.assertNotAlmostEqual(100, klm)
self.assertNotAlmostEqual(456, (aaa and bbb))
self.assertNotAlmostEqual(789, (ccc or ddd))
self.assertNotAlmostEqual(123, (True if You else False))
def test_simple_msg(self):
... |
_env_with_credentials
def open(fp, mode='r', driver=None, width=None, height=None, count=None, crs=None, transform=None, dtype=None, nodata=None, sharing=False, opener=None, **kwargs):
if (not isinstance(fp, str)):
if (not (hasattr(fp, 'open') or hasattr(fp, 'read') or hasattr(fp, 'write') or isinstance(fp,... |
class GSLS(Optimizer):
_OPTIONS = ['maxiter', 'max_eval', 'disp', 'sampling_radius', 'sample_size_factor', 'initial_step_size', 'min_step_size', 'step_size_multiplier', 'armijo_parameter', 'min_gradient_norm', 'max_failed_rejection_sampling']
def __init__(self, maxiter: int=10000, max_eval: int=10000, disp: boo... |
def test_android_defaults(env_android):
mock_jnius = get_jnius_mock()
with patch.dict('sys.modules', {'jnius': mock_jnius}):
pp = platform.get_platform_paths('pypyr', 'config.yaml')
mock_jnius.autoclass.assert_called_once_with('android.content.Context')
assert (pp == platform.PlatformPaths(confi... |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, dat... |
def test_setting_fields_to_self_do_nothing():
options = Options(MagicMock())
initial_serialize = options._serialize_fields()
with options:
for field in randovania.interface_common.options._SERIALIZER_FOR_FIELD.keys():
setattr(options, field, getattr(options, field))
assert (options._... |
.parametrize('n_splits, axis, values, sizes', [(0, 0, set_test_value(pt.vector(), rng.normal(size=20).astype(config.floatX)), set_test_value(pt.vector(dtype='int64'), [])), (5, 0, set_test_value(pt.vector(), rng.normal(size=5).astype(config.floatX)), set_test_value(pt.vector(dtype='int64'), rng.multinomial(5, (np.ones(... |
def muti_loss_fusion(preds, target):
loss0 = 0.0
loss = 0.0
for i in range(0, len(preds)):
if ((preds[i].shape[2] != target.shape[2]) or (preds[i].shape[3] != target.shape[3])):
tmp_target = F.interpolate(target, size=preds[i].size()[2:], mode='bilinear', align_corners=True)
... |
def put_radio(name: str, options: List[Union[(Dict[(str, Any)], Tuple, List, str)]]=None, *, label: str='', inline: bool=None, value: str=None, help_text: str=None, scope: str=None, position: int=OutputPosition.BOTTOM) -> Output:
from pywebio.input import radio
check_dom_name_value(name, 'pin `name`')
singl... |
def test_recipe_access():
class RetortWithRecipe(Retort):
recipe = [PlaceholderProvider(1)]
with pytest.raises(AttributeError, match=full_match_regex_str("Can not read 'recipe' attribute")):
RetortWithRecipe.recipe
with pytest.raises(AttributeError, match=full_match_regex_str("Can not set 'r... |
class DataFM(object):
def __init__(self, fm_model_file):
self.name_field = {'weekday': 0, 'hour': 1, 'useragent': 2, 'IP': 3, 'region': 4, 'city': 5, 'adexchange': 6, 'domain': 7, 'slotid': 8, 'slotwidth': 9, 'slotheight': 10, 'slotvisibility': 11, 'slotformat': 12, 'creative': 13, 'advertiser': 14, 'slotpr... |
class Dict(NodeNG, Instance):
_astroid_fields = ('items',)
def __init__(self, lineno: (int | None), col_offset: (int | None), parent: (NodeNG | None), *, end_lineno: (int | None), end_col_offset: (int | None)) -> None:
self.items: list[tuple[(InferenceResult, InferenceResult)]] = []
super().__in... |
def eval(net, vocab, data_iter, criterion):
net.eval()
total_loss = 0
batch_num = 0
for batch in data_iter:
(features, targets, _, doc_lens, _) = batch
(features, targets) = (Variable(features), Variable(targets.float()))
if use_gpu:
features = features.cuda()
... |
def get_entity_bios(seq, id2label):
chunks = []
chunk = [(- 1), (- 1), (- 1)]
for (indx, tag) in enumerate(seq):
if (not isinstance(tag, str)):
tag = id2label[tag]
if tag.startswith('S-'):
if (chunk[2] != (- 1)):
chunks.append(chunk)
chunk ... |
class FloatPred(Codec):
codec_id = 'imagecodecs_floatpred'
def __init__(self, shape, dtype, axis=(- 1), dist=1):
self.shape = tuple(shape)
self.dtype = numpy.dtype(dtype).str
self.axis = axis
self.dist = dist
def encode(self, buf):
buf = protective_squeeze(numpy.asarr... |
class RawTokenLexer(Lexer):
name = 'Raw token data'
aliases = []
filenames = []
mimetypes = ['application/x-pygments-tokens']
url = '
version_added = ''
def __init__(self, **options):
self.compress = get_choice_opt(options, 'compress', ['', 'none', 'gz', 'bz2'], '')
Lexer.__i... |
def test_get_cache_dir_old_pip(monkeypatch):
monkeypatch.setattr(cache, '_PIP_VERSION', Version('1.0.0'))
cache_dir = _get_cache_dir(Path('/tmp/foo/cache_dir'))
assert (str(cache_dir) == '/tmp/foo/cache_dir')
cache_dir = _get_cache_dir(None)
assert (cache_dir == (Path.home() / '.pip-audit-cache')) |
(frozen=True)
class NoAny(CustomCheck):
deep: bool = False
allowed_sources: Container['AnySource'] = field(default_factory=(lambda : frozenset({pyanalyze.value.AnySource.unreachable})))
def can_assign(self, value: 'Value', ctx: 'CanAssignContext') -> 'CanAssign':
if self.deep:
vals = val... |
def _format_subcommand(command):
(yield '.. object:: {}'.format(command.name))
if (CLICK_VERSION < (7, 0)):
short_help = command.short_help
else:
short_help = command.get_short_help_str()
if short_help:
(yield '')
for line in statemachine.string2lines(short_help, tab_widt... |
_environment_variables(model=MyHandlerEnvVars)
def my_handler(event: dict[(str, Any)], context: LambdaContext) -> dict[(str, Any)]:
env_vars = get_environment_variables(model=MyHandlerEnvVars)
return {'statusCode': HTTPStatus.OK, 'headers': {'Content-Type': 'application/json'}, 'body': json.dumps({'message': 's... |
class StlPairTransform():
def __init__(self, train_transform=True, pair_transform=True):
if (train_transform is True):
self.transform = transforms.Compose([transforms.RandomApply([transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)], p=0.8), transforms.RandomGrayscale(p... |
def test_conference_ranking_does_not_exists(conference_factory, graphql_client):
conference = conference_factory(topics=['Sushi'])
query = '\n query($code: String!, $topic: ID!) {\n conference(code: $code) {\n ranking(topic: $topic) {\n isPublic\n ... |
class encoder(nn.Module):
def __init__(self, in_dim=(17 * 3), out_dim=128, h_dim=128):
super(encoder, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.h_dim = h_dim
self.fc1 = residual_linear(in_dim, h_dim)
self.fc2 = residual_linear(h_dim, h_dim)
... |
class TestDOTAKF(TestDOTA):
def eval(self):
txt_name = '{}.txt'.format(self.cfgs.VERSION)
real_test_img_list = self.get_test_image()
kf = build_whole_network.DetectionNetworkKF(cfgs=self.cfgs, is_training=False)
self.test_dota(det_net=kf, real_test_img_list=real_test_img_list, txt_na... |
class F10_Rescue(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.r... |
def main():
args = parse_args()
if (len(args.shape) == 1):
input_shape = (3, args.shape[0], args.shape[0])
elif (len(args.shape) == 2):
input_shape = ((3,) + tuple(args.shape))
else:
raise ValueError('invalid input shape')
model = init_pose_model(args.config)
if (args.inp... |
class FHBlock(dict):
def __init__(self, fid=None, pointer=None):
if (fid is not None):
self.read(fid, pointer)
def read(self, fid, pointer):
fid.seek(pointer)
(self['id'], reserved, self['length'], self['link_count'], self['fh_fh_next'], self['fh_md_comment'], self['fh_time_n... |
class TestResourceData(TestCase):
def test_no_duplicate_links(self):
for path in RESOURCES_PATH.rglob('*.yaml'):
with self.subTest(resource=path.stem):
content = yaml.safe_load(path.read_text())
url_links = tuple((item['url'] for item in content.get('urls', ())))
... |
def test_write_paged(tmpdir):
data_fname = tmpdir.join('test_read.sigmf-data')
actual = cp.random.rand(100).astype(cp.complex64)
cusignal.write_bin(str(data_fname), actual)
expect = cusignal.read_bin(str(data_fname), dtype=cp.complex64)
cp.testing.assert_array_equal(actual, expect) |
class _CaeMeshGmsh():
known_element_dimensions = ['From Shape', '1D', '2D', '3D']
known_element_orders = ['1st', '2nd']
known_mesh_algorithm_2D = ['Automatic', 'MeshAdapt', 'Delaunay', 'Frontal', 'BAMG', 'DelQuad']
known_mesh_algorithm_3D = ['Automatic', 'Delaunay', 'New Delaunay', 'Frontal', 'Frontal D... |
def get_grad_norm(model):
grads = []
for p in model.parameters():
if (p.grad is not None):
grads.append(p.grad.data.view((- 1), 1))
if (len(grads) == 0):
grads.append(torch.FloatTensor([0]))
grad_norm = torch.norm(torch.cat(grads))
if grad_norm.is_cuda:
grad_norm ... |
class SingleFieldLinearNormalizer(DictOfTensorMixin):
avaliable_modes = ['limits', 'gaussian']
_grad()
def fit(self, data: Union[(torch.Tensor, np.ndarray, zarr.Array)], last_n_dims=1, dtype=torch.float32, mode='limits', output_max=1.0, output_min=(- 1.0), range_eps=0.0001, fit_offset=True):
self.pa... |
def test_reductions_with_start_state(stream):
example = pd.DataFrame({'name': [], 'amount': []})
sdf = DataFrame(stream, example=example)
output0 = sdf.amount.mean(start=(10, 2)).stream.gather().sink_to_list()
output1 = sdf.amount.count(start=3).stream.gather().sink_to_list()
output2 = sdf.amount.su... |
class SmilesType(click.ParamType):
name = 'SMILES'
def convert(self, value, param, ctx):
if (not isinstance(value, str)):
return value
try:
if ('*' in value):
raise MolProcessingError("SMILES must not contain a '*' term")
(mol, frags) = parse_s... |
_fixtures(WebFixture)
def test_carousel_basics(web_fixture):
widget = Carousel(web_fixture.view, 'my_carousel_id')
[main_div] = widget.children
assert (main_div.get_attribute('id') == 'my_carousel_id')
assert (main_div.get_attribute('class') == 'carousel slide')
[indicator_list, carousel_inner, left... |
class RandomKCompressor(TopKCompressor):
def __init__(self):
super().__init__()
self.name = 'randomk'
self.counter = 0
def compress(self, tensor, name=None, sigma_scale=3, ratio=0.05):
with torch.no_grad():
numel = tensor.numel()
k = max(int((numel * ratio... |
def test_for_loop_nested_if_grad(test, device):
n = 32
val = np.ones(n, dtype=np.float32)
expected_val = [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0]
expected_grad = [2.0, 2.0, 2.0, 2.0, 2... |
_REGISTRY.register()
class VideoRecurrentSplitClipsTestDataset(VideoTestDataset):
def __init__(self, opt):
super(VideoRecurrentSplitClipsTestDataset, self).__init__(opt)
ori_folders = sorted(list(self.imgs_lq.keys()))
ori_num_frames_per_folder = {}
ori_imgs_lq_paths = {}
ori_... |
def test_cpm_block():
with pytest.raises(AssertionError):
CpmBlock(3, channels=[3, 3, 3], kernels=[1])
model = CpmBlock(3, channels=[3, 3, 3], kernels=[1, 1, 1])
model.train()
imgs = torch.randn(1, 3, 10, 10)
feat = model(imgs)
assert (feat.shape == torch.Size([1, 3, 10, 10])) |
def draw_indexed(size, mode, indices, **data):
vao_id = GLuint()
glGenVertexArrays(1, vao_id)
glBindVertexArray(vao_id)
program = get_default_shader()
program.use()
buffers = []
for (name, (fmt, array)) in data.items():
location = program.attributes[name]['location']
count = ... |
def test_signal_reference():
signal1 = xodr.SignalReference(s=10.0, t=(- 2), orientation=xodr.Orientation.positive)
signal2 = xodr.SignalReference(s=20.0, t=(- 2), orientation=xodr.Orientation.positive)
signal2_wRev = xodr.SignalReference(s=20.0, t=(- 2), orientation=xodr.Orientation.positive)
road = xo... |
def test_concatenation_ab(a: FixtureA, b: FixtureB) -> None:
concAB = a.concatenate(b)
assert (not concAB.accepts(''))
assert (not concAB.accepts('a'))
assert (not concAB.accepts('b'))
assert (not concAB.accepts('aa'))
assert concAB.accepts('ab')
assert (not concAB.accepts('ba'))
assert ... |
class BiorxivClusteringS2S(AbsTaskClustering):
def description(self):
return {'name': 'BiorxivClusteringS2S', 'hf_hub_name': 'mteb/biorxiv-clustering-s2s', 'description': 'Clustering of titles from biorxiv. Clustering of 10 sets, based on the main category.', 'reference': ' 'type': 'Clustering', 'category':... |
class RegionAllocator():
def __init__(self, capacity):
self.allocator = allocation.Allocator(capacity)
self.regions = []
def capacity(self):
return self.allocator.capacity
def check_region(self, region):
for other in self.regions:
if (other is region):
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.