code stringlengths 101 5.91M |
|---|
_start_docstrings('Xxx Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). ', XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING)
class XxxForQuestionAnswering(XxxPreTrained... |
def test_hdbscan_best_balltree_metric():
(labels, p, persist, ctree, ltree, mtree) = hdbscan(X, metric='seuclidean', V=np.ones(X.shape[1]))
n_clusters_1 = (len(set(labels)) - int(((- 1) in labels)))
assert (n_clusters_1 == n_clusters)
labels = HDBSCAN(metric='seuclidean', V=np.ones(X.shape[1])).fit(X).l... |
def test_Subscription():
url = (brokerIp + '/v2/subscriptions')
headers = {'Content-Type': 'application/json'}
response = requests.post(url, data=json.dumps(v2data.subscription_data), headers=headers)
assert (response.status_code == 201) |
class Tree(nn.Module):
def __init__(self, block, in_channels, out_channels, level=1, stride=1):
super(Tree, self).__init__()
self.level = level
if (level == 1):
self.root = Root((2 * out_channels), out_channels)
self.left_node = block(in_channels, out_channels, stride... |
def get_global_rank():
if (os.environ.get('PMI_RANK') is not None):
return int((os.environ.get('PMI_RANK') or 0))
elif (os.environ.get('OMPI_COMM_WORLD_RANK') is not None):
return int((os.environ.get('OMPI_COMM_WORLD_RANK') or 0))
else:
return 0 |
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Double_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Packet const >, double, ns3::empty, ns3::empty, ns3::e... |
def main(xargs):
assert torch.cuda.is_available(), 'CUDA is not available.'
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads(xargs.workers)
prepare_seed(xargs.rand_seed)
logger = prepare_logger(args)
... |
def register_Ns3CallbackImpl__Void_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::emp... |
class TestClass(object):
def arr(self):
return [1, 2, 3]
def compound_arr(self):
return [dict(a=1)] |
class LogClustering():
def __init__(self, config):
self.config = config
self._loglines = pd.DataFrame()
self._timestamps = pd.DataFrame()
self._attributes = pd.DataFrame()
self._feature_df = pd.DataFrame()
self._clusters = pd.DataFrame()
self.MAX_LEN = 100
... |
class VSALoss(GANLoss):
def __init__(self, cfg):
super(VSALoss, self).__init__(cfg)
self.cfg = cfg
self.loss_map.update({'vsa': self.loss_vsa})
def loss_vsa(self, output, target):
loss = torch.mean((1.0 - torch.cosine_similarity(output['source'], target['source'].detach(), dim=1)... |
def GetAPOption():
opt = TestOptions().parse()
opt.num_threads = 1
opt.batch_size = 1
opt.serial_batches = True
opt.no_flip = True
opt.display_id = (- 1)
opt.dataroot = 'APDrawingGAN/dataset/data'
opt.name = 'formal_author'
opt.model = 'test'
opt.dataset_mode = 'single'
opt.n... |
class WordStemmer(Registrable):
default_implementation = 'pass_through'
def stem_word(self, word: Token) -> Token:
raise NotImplementedError |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-gt-dir', default='../mmediting/data/mfqe_v2/test_gt')
parser.add_argument('-enh-dir', default='../mmediting/data/mfqe_v2/test_lq')
parser.add_argument('-save-dir', default='log')
parser.add_argument('-ignored-frms', type=json.loads... |
class FocalLoss(nn.Module):
def __init__(self, apply_nonlin=None, alpha=None, gamma=2, balance_index=0, smooth=1e-05, size_average=True):
super(FocalLoss, self).__init__()
self.apply_nonlin = apply_nonlin
self.alpha = alpha
self.gamma = gamma
self.balance_index = balance_inde... |
def namedtupledict(typename, field_names, *args, **kwargs):
field_names_map = {n: i for (i, n) in enumerate(field_names)}
kwargs.setdefault('rename', True)
data = namedtuple(typename, field_names, *args, **kwargs)
def getitem(self, key):
if isinstance(key, string_types):
key = field_... |
class QuiverMutationType_abstract(UniqueRepresentation, SageObject):
def _repr_(self):
return self._description
def plot(self, circular=False, directed=True):
return self.standard_quiver().plot(circular=circular, directed=directed)
def show(self, circular=False, directed=True):
self.... |
_REGISTRY.register()
class Charades(torch.utils.data.Dataset):
def __init__(self, cfg, mode, num_retries=10):
assert (mode in ['train', 'val', 'test']), "Split '{}' not supported for Charades ".format(mode)
self.mode = mode
self.cfg = cfg
self._video_meta = {}
self._num_retri... |
class MVTec(AnomalibDataModule):
def __init__(self, root: (Path | str), category: str, image_size: ((int | tuple[(int, int)]) | None)=None, center_crop: ((int | tuple[(int, int)]) | None)=None, normalization: (str | InputNormalizationMethod)=InputNormalizationMethod.IMAGENET, train_batch_size: int=32, eval_batch_si... |
.parametrize('num_experts, tower_dnn_hidden_units, task_types, sparse_feature_num, dense_feature_num', [(3, (32, 16), ['binary', 'binary'], 3, 3)])
def test_ESMM(num_experts, tower_dnn_hidden_units, task_types, sparse_feature_num, dense_feature_num):
model_name = 'ESMM'
sample_size = SAMPLE_SIZE
(x, y_list,... |
def main(instanc_size=511, num_threads=24):
crop_path = './crop{:d}'.format(instanc_size)
if (not isdir(crop_path)):
mkdir(crop_path)
for sub_set in sub_sets:
sub_set_base_path = join(ann_base_path, sub_set)
videos = sorted(listdir(sub_set_base_path))
n_videos = len(videos)
... |
def get_val_dataloader(cfg):
val_dataset = Scan3RDataset(cfg, split='val')
val_dataloader = torch_util.build_dataloader(val_dataset, batch_size=cfg.val.batch_size, num_workers=cfg.num_workers, shuffle=False, collate_fn=val_dataset.collate_fn, pin_memory=True, drop_last=True)
return (val_dataset, val_dataloa... |
def eval_fn(hparams, scope=None, target_session=''):
log_device_placement = hparams.log_device_placement
out_dir = hparams.out_dir
num_train_steps = hparams.num_train_steps
steps_per_stats = hparams.steps_per_stats
steps_per_external_eval = hparams.steps_per_external_eval
steps_per_eval = (10 * ... |
def targeted_vals(model, dataset, title, attack, lowind, upind, real_dir, adv_dir, targeted_lr, t_radius):
vals = np.zeros(0)
if (attack == 'real'):
for i in range(lowind, upind):
image_dir = os.path.join(real_dir, (str(i) + '_img.pt'))
assert os.path.exists(image_dir)
... |
class Tiger(environment.Environment):
default_listen_accuracy = 0.85
def __init__(self, options={}):
environment.Environment.__init__(self, options=options)
self.valid_actions = list(tiger_action_enum.keys())
self.valid_observations = list(tiger_observation_enum.keys())
self.vali... |
class ZeroForm(FormsSpace_abstract, Module, UniqueRepresentation):
def __classcall__(cls, group=HeckeTriangleGroup(3), base_ring=ZZ, k=QQ(0), ep=None, n=None):
(group, base_ring, k, ep, n) = canonical_parameters(group, base_ring, k, ep, n)
return super().__classcall__(cls, group=group, base_ring=bas... |
def set_cfg_roland(cfg):
cfg.gnn.only_update_top_state = False
cfg.gnn.embed_update_method = 'moving_average'
cfg.gnn.gru_kernel = 'linear'
cfg.gnn.mlp_update_layers = 2
cfg.meta = CN()
cfg.meta.is_meta = False
cfg.meta.method = 'moving_average'
cfg.meta.alpha = 0.9
cfg.remark = ''
... |
class ConvPushforward3(nn.Module):
def __init__(self, input_size=28, channels=1, nlayers_conv=2, nlayers_mlp=3, **kwargs):
super(ConvPushforward3, self).__init__()
self.input_size = input_size
self.channels = channels
self.upconv1 = nn.Conv2d(1, 128, 3, 1, 2, dilation=2)
self... |
def train_model(config_file, sub_output_dir, data_iter=None, all_datasets=None):
params = Params.from_file(config_file, '')
serialization_dir = sub_output_dir
prepare_environment(params)
create_serialization_dir(params, serialization_dir, False)
prepare_global_logging(serialization_dir, False)
c... |
def test_single_label_warning():
image = np.array([[0, 0, 0, 1, 0], [1, 1, 1, 0, 0], [1, 1, 1, 0, 0]], int)
with expected_warnings(['use a boolean array?']):
remove_small_objects(image, min_size=6) |
def test():
behavior = {('*', 'point'): Point}
builder = ak.ArrayBuilder(behavior=behavior)
with builder.record('point'):
builder.field('x').real(1.0)
builder.field('y').real(2.0)
builder.field('z').real(3.0)
assert ak.almost_equal(builder, builder.snapshot()) |
def difficult_branches(a: str, x: int, y: int) -> None:
if (x == 1337):
if (y == 42):
print('Yes')
else:
print('No')
if (a == 'a'):
if (y == (- 1)):
print('Maybe')
else:
print("I don't know")
if (str(x) == a):
print('Can... |
class CosineWarmUpScheduler(LRScheduler):
def __init__(self, optimizer, warmup, total_steps, last_epoch=(- 1)):
self.warmup = warmup
self.total_steps = total_steps
super(CosineWarmUpScheduler, self).__init__(optimizer, last_epoch)
def get_lr(self):
progress = (self.last_epoch / s... |
def test(args):
model = model_fn(args.model_dir, args.device)
testdata_path = glob(os.path.join(args.input_dir, '*.json.gz'))
ans_list = []
for p in tqdm(testdata_path):
data = load_data(p)
result = inference(model, data, args.device)
ans = is_correct_answer(result)
ans_l... |
class ChordREMI(BaseEventREMI):
def __init__(self, tone, typ, slash, bar, position):
super().__init__('chord', bar, position)
self.tone = tone
self.typ = typ
self.slash = slash |
class AdaLearningNode(ActiveLearningNodeNBA, AdaNode):
def __init__(self, initial_stats=None, random_state=None):
super().__init__(initial_stats)
self._adwin = ADWIN()
self.error_change = False
self._random_state = check_random_state(random_state)
def n_leaves(self):
retu... |
class FortranFormatParser():
def __init__(self):
self.tokenizer = Tokenizer()
def parse(self, s):
self.tokenizer.input(s)
tokens = []
try:
while True:
t = self.tokenizer.next_token()
if (t is None):
break
... |
def load_pretrained_model(path, device, which_type):
if (which_type == 'obj'):
categories = len(object_detector_objs)
elif (which_type == 'recep'):
categories = 32
mask_rcnn = get_model_instance_segmentation((categories + 1))
mask_rcnn.load_state_dict(torch.load(path, map_location=device... |
def calc_distance(z_continuous, codebook, dim_dict):
z_continuous_flat = z_continuous.view((- 1), dim_dict)
distances = ((torch.sum((z_continuous_flat ** 2), dim=1, keepdim=True) + torch.sum((codebook ** 2), dim=1)) - (2 * torch.matmul(z_continuous_flat, codebook.t())))
return distances |
def process_utterance(wav: np.ndarray, text: str, out_dir: Path, basename: str, skip_existing: bool, hparams):
mel_fpath = out_dir.joinpath('mels', ('mel-%s.npy' % basename))
wav_fpath = out_dir.joinpath('audio', ('audio-%s.npy' % basename))
if (skip_existing and mel_fpath.exists() and wav_fpath.exists()):
... |
class ConvBnAct(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, norm_layer=nn.BatchNorm2d, norm_kwargs=None, act_layer=nn.ReLU, apply_act=True, drop_block=None, aa_layer=None):
super(ConvBnAct, self).__init__()
use_aa = (aa_layer i... |
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3UanTxMode_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Packet const >, ns3::UanTxMode, ns3::empty, ns3:... |
def astar_len(adj, start, target):
G = nx.from_numpy_matrix(adj)
return nx.astar_path_length(G, start, target) |
def add_factor_ids(factors):
for (idx, factor) in enumerate(factors):
factor.id = f'f_{idx}' |
_builder('scienceqa')
class ScienceQABuilder(BaseDatasetBuilder):
train_dataset_cls = IconQADataset
eval_dataset_cls = IconQAEvalDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/scienceqa/defaults.yaml'} |
def sentence_to_token_ids(sentence, vocabulary, tokenizer=None, normalize_digits=False):
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence)
if (not normalize_digits):
return [vocabulary.get(w, UNK_ID) for w in words]
return [vocabulary.get(re.sub(_D... |
class Network(BaseSearchSpace):
def __init__(self, init_ch, dataset, config, groups=1, base_width=64, dilation=1, norm_layer=None):
super(Network, self).__init__(init_ch, dataset, config)
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
if ((groups != 1) or (base_width != 64)... |
class TestByteBounds(object):
def test_byte_bounds(self):
a = arange(12).reshape(3, 4)
(low, high) = utils.byte_bounds(a)
assert_equal((high - low), (a.size * a.itemsize))
def test_unusual_order_positive_stride(self):
a = arange(12).reshape(3, 4)
b = a.T
(low, hig... |
_utils.test()
def test_multiple_calls():
N = 5
a = ti.field(float, shape=N)
b = ti.field(float, shape=N)
loss_1 = ti.field(float, shape=())
loss_2 = ti.field(float, shape=())
ti.root.lazy_dual()
for i in range(N):
a[i] = i
b[i] = i
def multiple_calls():
loss_1[Non... |
def c_sub_decl(name, return_type, args):
args = make_c_args(args)
return c_sub_template.format(name=name, upname=name.upper(), args=args) |
class Sampler(torch.utils.data.Sampler, metaclass=ABCMeta):
def __init__(self, dataset, data_type, **kwargs):
self.dataset = dataset
self.data_type = data_type |
class Conv2dBnRelu(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, pooling=None, activation=nn.ReLU(inplace=True)):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
self.bn = nn... |
def is_cr_lf(fname):
f = open(fname, 'r')
line = f.readline()
f.close()
sz = len(line)
return ((sz >= 2) and (line[(sz - 2)] == '\r') and (line[(sz - 1)] == '\n')) |
.experimental
def test_ncis_activations_sigmoid(spark, prev_relevance):
res = NCISPrecision._sigmoid(prev_relevance, 'relevance')
gt = spark.createDataFrame(data=[[0, 0, (1 / (1 + (math.e ** (- 100))))], [0, 4, (1 / (1 + (math.e ** 0)))], [1, 10, (1 / (1 + (math.e ** 5)))], [4, 6, (1 / (1 + (math.e ** (- 11.5))... |
.parametrize('dtype, storage_format', [(ti.f32, 'col_major'), (ti.f32, 'row_major'), (ti.f64, 'col_major'), (ti.f64, 'row_major')])
_utils.test(arch=ti.cpu)
def test_sparse_matrix_addition(dtype, storage_format):
n = 8
Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100, dtype=dtype, storage_for... |
class BasicWideBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicWideBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,... |
_model
def SMPConv_B(pretrained=False, **kwargs):
model = SMPConvNet(large_kernel_sizes=[31, 29, 27, 13], layers=[2, 2, 20, 2], channels=[128, 256, 512, 1024], n_points_divide=4, drop_path_rate=0.5)
return model |
def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'):
(h, w) = dst_img.shape[:2]
return imresize(img, (w, h), return_scale, interpolation) |
class DataArguments():
dataset_path: str = field(default='tatsu-lab/alpaca_farm')
dataset_name: Literal[('alpaca_human_preference', 'alpaca_gpt4_preference', 'alpaca_noisy_multi_preference')] = field(default='alpaca_noisy_multi_preference', metadata={'help': 'Name of the dataset. Fetches the human or GPT-4 pref... |
def PC_S_calc(classes):
try:
return (1 / len(classes))
except Exception:
return 'None' |
_utils.test(arch=[ti.cpu, ti.cuda, ti.vulkan], exclude=[vk_on_mac], debug=True)
def test_print_string_format_with_spec_mismatch():
def foo1(x):
return (x + 1)
def test_i(i: ti.i32):
print('{:u}'.format(foo1(i)))
def test_u(u: ti.u32):
print('{:d}'.format(foo1(u)))
def test_f(u: t... |
class MultiLevelModel(nn.Module):
def __init__(self, bacth_size, img_shape, num_class):
super(MultiLevelModel, self).__init__()
pass
def forward(self, x):
pass
def reinit_hidden(self):
self.bottom_clstm.reinit_hidden()
self.middle_clstm.reinit_hidden()
self.to... |
class LayoutLMv3ImageProcessingTester(unittest.TestCase):
def __init__(self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, apply_ocr=True):
size = (size if (size is not None) else {'height': 18, 'width': 18})
self.parent = pare... |
def _indices_product(indices: _Indices) -> List[List[int]]:
empty_list = torch.jit.annotate(List[int], [])
result = [empty_list]
for idx in indices:
result_temp = torch.jit.annotate(List[List[int]], [])
for res in result:
for i in range(idx):
result_temp.append((r... |
class TestExperimentWrapper():
def test_experiment_wrapper_method_call(self):
data = base64.b64encode(pickle.dumps(method_call)).decode('utf-8')
args = ['', '--args_data', data, '--log_dir', 'data/', '--resume_from_dir', 'resume_dir/', '--resume_from_epoch', 'first']
run_experiment(args)
... |
def get_default_args(fn):
if (fn is None):
return {}
signature = inspect.signature(fn)
return {k: v.default for (k, v) in signature.parameters.items() if (v.default is not inspect.Parameter.empty)} |
class MediumPayloadRateQuantitiesWithMag():
SIZE = 34
def from_reader(reader: _ResponseReader):
assert (reader.remaining() >= MediumPayloadRateQuantitiesWithMag.SIZE)
rv = MediumPayloadRateQuantitiesWithMag()
rv.timestamp = Timestamp.from_reader(reader)
rv.acceleration = Accelera... |
def partial_manual_bn(x, mask, gain=None, bias=None, return_mean_var=False, eps=1e-05):
float_x = x.float()
m = (torch.sum(float_x, [0, 2, 3], keepdim=True) / (torch.sum(mask, [0, 2, 3], keepdim=True) + eps))
m2 = (torch.sum((float_x ** 2), [0, 2, 3], keepdim=True) / (torch.sum(mask, [0, 2, 3], keepdim=True... |
def test_visualize_dumped_camera_parameter():
dumped_dir = 'tests/data/camera/dumped'
non_json_file_path = os.path.join(dumped_dir, 'non_json_file.txt')
with open(non_json_file_path, 'w') as f_write:
f_write.write('test string\n')
visualize_dumped_camera_parameter(dumped_dir, interactive=False, ... |
def _copy_array_if_base_present(a):
if (a.base is not None):
return a.copy()
return a |
class Sequential(torch.nn.ModuleDict):
def __init__(self, *layers, input_shape=None, **named_layers):
super().__init__()
if ((not layers) and (input_shape is None) and (not named_layers)):
raise ValueError('Must pass either layers or input shape')
self.length_layers = []
... |
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
self.pool_method = 'max_pool'
def forward(self, xyz: torch.Tensor, features: torch.Tensor=None, new_xyz=None):
new_features_list... |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, conv_layer=None, norm_layer=None, activation_layer=None):
super(BasicBlock, self).__init__()
self.conv1 = conv_layer(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.nor... |
class TestDisturbingFunction(unittest.TestCase):
def setUp(self):
self.alpha = 0.5
self.LaskarRobutel = dict()
self.LaskarRobutel['C1'] = {(0, ((1 / 2), 0, 0)): (1 / 2)}
self.LaskarRobutel['C2'] = {(1, ((3 / 2), 0, 0)): ((+ 3) / 8), (0, ((3 / 2), 1, 0)): ((- 1) / 4), (2, ((3 / 2), 1,... |
def get_precision(capsule1_path, region1_path, capsule2_path, region2_path):
class_coefs = []
capsules = []
regions = []
capsules.append(cv2.imread(capsule1_path))
capsules.append(cv2.imread(capsule2_path))
regions.append(cv2.imread(region1_path))
regions.append(cv2.imread(region2_path))
... |
class RetryOnRpcErrorClientInterceptor(grpc.UnaryUnaryClientInterceptor, grpc.StreamUnaryClientInterceptor):
def __init__(self, sleeping_policy, status_for_retry: Optional[Tuple[grpc.StatusCode]]=None):
self.sleeping_policy = sleeping_policy
self.status_for_retry = status_for_retry
def _intercep... |
class VotingHeadTemplate(nn.Module):
def __init__(self, model_cfg):
super().__init__()
self.model_cfg = model_cfg
self.num_class = 1
self.build_losses(self.model_cfg.LOSS_CONFIG)
self.forward_ret_dict = None
def build_losses(self, losses_cfg):
self.add_module('cls... |
def batch_counter_hook(module, inputs, output):
inputs = inputs[0]
batch_size = inputs.shape[0]
module.__batch_counter__ += batch_size |
class LossLLE(nn.Module):
def __init__(self):
super(LossLLE, self).__init__()
self.loss_cs = nn.CosineSimilarity()
self.loss_oa = OutlierAwareLoss()
def forward(self, out, gt):
loss = (self.loss_oa(out, gt) + (1 - self.loss_cs(out.clip(0, 1), gt)).mean())
return loss |
class VCTK_VCC2020Dataset(Dataset):
def __init__(self, split, trdev_data_root, eval_data_root, spk_embs_root, lists_root, eval_lists_root, fbank_config, spk_emb_source, num_ref_samples, train_dev_seed=1337, **kwargs):
super(VCTK_VCC2020Dataset, self).__init__()
self.split = split
self.fbank_... |
def test_function_resampler_fit():
X = np.array([[1, np.nan], [2, 3], [np.inf, 4]])
y = np.array([0, 1, 1])
def func(X, y):
return (X[:1], y[:1])
sampler = FunctionSampler(func=func, validate=False)
sampler.fit(X, y)
sampler.fit_resample(X, y) |
class ResnetAdaILNBlock(nn.Module):
def __init__(self, dim, use_bias):
super(ResnetAdaILNBlock, self).__init__()
self.pad1 = nn.ReflectionPad2d(1)
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias)
self.norm1 = adaILN(dim)
self.relu1 = nn.ReLU... |
_utils.test()
def test_atan2():
N = 1
x = ti.field(ti.i32, shape=(N,))
y = ti.field(ti.i32, shape=(N,))
def test_case_0() -> ti.f32:
i = ti.i32(2)
return ti.atan2(i, 1)
def test_case_1() -> ti.f32:
x[0] = ti.i32(2)
return ti.atan2(x[0], 1)
def test_case_2() -> ti.... |
class ZipProjectCheckout(ProjectCheckout):
def __init__(self, name: str, version: str, revision_url: str, md5_checksum: str, base_path: str):
super().__init__(revision_url, base_path, name)
self.md5_checksum = md5_checksum
self.version = version
self.__base_checkout_dir = self.checko... |
class SparseHalfCheetahEnv(HalfCheetahEnv, Serializable):
FILE = 'half_cheetah.xml'
def __init__(self, *args, **kwargs):
super(SparseHalfCheetahEnv, self).__init__(*args, **kwargs)
Serializable.__init__(self, *args, **kwargs)
def step(self, action):
self.forward_dynamics(action)
... |
def require_bs4(test_case):
return unittest.skipUnless(is_bs4_available(), 'test requires BeautifulSoup4')(test_case) |
def test_long():
filename = os.path.join(SAMPLES_DIR, 'long_test_data.avro')
data = [12, 435, 56, 12, 67, 34, 89, 2345, 536, 8769]
assert (ak.from_avro_file(file=filename).to_list() == data) |
def test_broadcast_kwargs():
x = np.arange(10)
y = np.arange(10)
with assert_raises_regex(TypeError, 'broadcast_arrays\\(\\) got an unexpected keyword*'):
broadcast_arrays(x, y, dtype='float64') |
class Bottleneck_depthwise_ir(Bottleneck):
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck_depthwise_ir, self).__init__(inplanes, planes, stride, downsample)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, grou... |
def count_vars(scope=''):
v = get_vars(scope)
return sum([np.prod(var.shape.as_list()) for var in v]) |
.skip
def test_starred_target():
a = np.zeros((1,), dtype=np.float32)
a[0] = np.pi
(b, c, d, e) = starred_target(a=a)
assert (b[0] == np.float32(np.pi))
assert (c[0] == (np.float32(2) * np.float32(np.pi)))
assert (c[1] == (np.float32(3) * np.float32(np.pi)))
assert (c[2] == (np.float32(4) * ... |
def save_load_progress(progress, update=[], filename=SAVED_PROGRESS):
print('Saving sweep progress to "{}", please be patient...'.format(filename))
if os.path.exists(filename):
with open(filename, 'rb') as f:
progress = pickle.load(f)
progress += update
with open(filename, 'wb') as f... |
class InsecureCacheControlAdapter(CacheControlAdapter):
def cert_verify(self, conn, url, verify, cert):
super(InsecureCacheControlAdapter, self).cert_verify(conn=conn, url=url, verify=False, cert=cert) |
def ensure_binary(s, encoding='utf-8', errors='strict'):
if isinstance(s, binary_type):
return s
if isinstance(s, text_type):
return s.encode(encoding, errors)
raise TypeError(("not expecting type '%s'" % type(s))) |
class NgramVocab(SubtokenVocab):
def __init__(self, n, token_vocab, *args, **kwargs):
recount = kwargs.pop('recount', False)
initialize_zero = kwargs.pop('initialize_zero', False)
super(TokenVocab, self).__init__(*args, **kwargs)
self._n = n
self._token_vocab = token_vocab
... |
class SawyerBlockPickingEnv(SawyerXYZEnv):
def __init__(self):
liftThresh = 0.1
hand_low = ((- 0.5), 0.4, 0.07)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.5), 0.4, 0.07)
obj_high = (0.5, 1, 0.5)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
... |
class QuasiWeakModularFormsRing(FormsRing_abstract, UniqueRepresentation):
def __classcall__(cls, group=HeckeTriangleGroup(3), base_ring=ZZ, red_hom=False, n=None):
(group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n)
return super().__classcall__(cls, group=group, base... |
def dataio_prepare(hparams):
data_folder = hparams['data_folder']
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(csv_path=hparams['csv_train'], replacements={'data_root': data_folder})
if (hparams['sorting'] == 'ascending'):
train_data = train_data.filtered_sorted(sort_key='duration')
... |
class LLama2_QA(AbstractLLama2):
def prompt(self):
return ' [INST] I want you to act as a question answering model for tabular data.\n I will pass you a table with one question. \n I want you to only reply with the output of the question executed on the table.\n I want you to ret... |
def _compute_p_max(m_max):
sqrt_m_max = np.sqrt(m_max)
p_low = int(np.floor(sqrt_m_max))
p_high = int(np.ceil((sqrt_m_max + 1)))
return max((p for p in range(p_low, (p_high + 1)) if ((p * (p - 1)) <= (m_max + 1)))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.