code stringlengths 101 5.91M |
|---|
class WideRepresentation(Representation):
def __init__(self, **kwargs):
super().__init__(**kwargs)
'\n Gets the action space used by the wide representation\n\n Parameters:\n width: the current map width\n height: the current map height\n num_tiles: the total number of the til... |
class RTEPipe(MatchingPipe):
def process_from_file(self, paths=None):
data_bundle = RTELoader().load(paths)
return self.process(data_bundle) |
('/download_package', methods=['GET'])
def download_package():
name = request.args.get('name', None)
return api.download_compute_package(name) |
def register_Ns3SsServiceFlowManager_methods(root_module, cls):
cls.add_constructor([param('ns3::SsServiceFlowManager const &', 'arg0')])
cls.add_constructor([param('ns3::Ptr< ns3::SubscriberStationNetDevice >', 'device')])
cls.add_method('AddServiceFlow', 'void', [param('ns3::ServiceFlow *', 'serviceFlow')... |
def convert_examples_to_features_lm(examples, label_map, max_seq_length, tokenizer, subword_map):
ori_label_map = {key: (idx + 1) for (idx, key) in enumerate(label_map.keys())}
ori_label_map['O'] = 0
features = []
for (ex_index, example) in tqdm(enumerate(examples), desc='Examples2Features'):
te... |
class PoolFormerModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def _prepare_ldflags(extra_ldflags, with_cuda, verbose, is_standalone):
if IS_WINDOWS:
python_path = os.path.dirname(sys.executable)
python_lib_path = os.path.join(python_path, 'libs')
extra_ldflags.append('c10.lib')
if with_cuda:
extra_ldflags.append('c10_cuda.lib')
... |
def resnext50(**kwargs):
model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], **kwargs)
return model |
class BlipImageBaseProcessor(BaseProcessor):
def __init__(self, mean=None, std=None, do_normalize=True):
if (mean is None):
mean = (0., 0.4578275, 0.)
if (std is None):
std = (0., 0., 0.)
if do_normalize:
self.normalize = transforms.Normalize(mean, std)
... |
def add_while_op(while_net, cond_blob, lexical_scope, loop_body_net, condition_body_net=None):
(input_blob_names, output_blob_names) = get_external_blob_names(loop_body_net, lexical_scope)
input_blob_names |= output_blob_names
loop_inputs = [core.BlobReference(name=b, net=None) for b in input_blob_names]
... |
class ProcessGroupNCCLTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
self.num_gpus = torch.cuda.device_count()
if (self.num_gpus < 2):
raise un... |
class blocked_grad(torch.autograd.Function):
def forward(ctx, x, mask):
ctx.save_for_backward(x, mask)
return x
def backward(ctx, grad_output):
(x, mask) = ctx.saved_tensors
return ((grad_output * mask), (mask * 0.0)) |
_CASCADE_OUTPUTS.register('box_output')
class Box_output(nn.Module):
def __init__(self, dim_in):
super().__init__()
self.dim_in = dim_in
self.cls_score = nn.Linear(self.dim_in, cfg.MODEL.NUM_CLASSES)
if cfg.FAST_RCNN.CLS_AGNOSTIC_BBOX_REG:
self.bbox_pred = nn.Linear(self.... |
.slow
_version(sympy, '0.7')
_version(mp, '0.19')
class TestInversion(object):
.xfail_on_32bit('rtol only 2e-9, see gh-6938')
def test_log(self):
with mp.workdps(30):
logcoeffs = mp.taylor((lambda x: mp.log((1 + x))), 0, 10)
expcoeffs = mp.taylor((lambda x: (mp.exp(x) - 1)), 0, 1... |
def init_nlp_model(special_chars=SPECIAL_CHARACTERS, model_name='en_core_web_lg'):
nlp = spacy.load(model_name)
for key in special_chars:
nlp.tokenizer.add_special_case(key, [dict(ORTH=key)])
return nlp |
class SVHN(data.Dataset):
url = ''
filename = ''
file_md5 = ''
split_list = {'train': [' 'train_32x32.mat', 'e26dedcc434d2e4c54c9b2d4a06d8373'], 'test': [' 'test_32x32.mat', 'eb5a983be6af1b164d9cef3'], 'extra': [' 'extra_32x32.mat', 'a93ce644f1a588dc4d68dda5feec44a7']}
def __init__(self, root, split... |
def get_default_config():
config = ml_collections.ConfigDict()
config.actor_lr = 0.0003
config.value_lr = 0.0003
config.critic_lr = 0.0003
config.hidden_dims = (256, 256)
config.discount = 0.99
config.expectile = 0.9
config.temperature = 10.0
config.dropout_rate = None
config.tau... |
def train(argv=None):
print('Reading data...')
(X_train_total, y_train_total, X_train, y_train, X_vald, y_vald, X_test, y_test) = data.load_data()
learning_rates = [1e-06, 5e-06, 1e-05, 5e-05, 0.0001, 0.0005, 0.001]
num_epochs = 500
num_components = 500
batch_size = 128
try:
model = ... |
def get_pai_tf_cmd(cluster_config, tarball, params_file, entry_file, model_name, oss_model_path, train_table, val_table, res_table, project):
job_name = '_'.join(['sqlflow', model_name]).replace('.', '_')
cf_quote = json.dumps(cluster_config).replace('"', '\\"')
submit_tables = _max_compute_table_url(train_... |
def dim_cmp_value(obj):
if isinstance(obj, _d.Dim):
obj = obj.get_same_base()
return ('', obj.description, obj.kind, obj.dimension, (obj.dyn_size_ext.dims if (obj.dyn_size_ext is not None) else None))
if isinstance(obj, _m.MarkedDim):
return (obj.__class__.__name__, obj.tag)
return o... |
def parse_finish_time(log_f):
lines = open(log_f, 'r').readlines()
for k in range(1, min(1000, len(lines))):
line = lines[(- k)].lower()
if (('(speed:' in line) and ('per timing, total_time:' in line)):
finish_time = lines[(- k)].split('(speed:')[0].split()[(- 1)].strip()
... |
def test_count_nonzero():
assert (ak.count_nonzero(array, axis=None) == 11)
assert ak.almost_equal(ak.count_nonzero(array, axis=None, keepdims=True, mask_identity=False), ak.to_regular([[11]]))
assert ak.almost_equal(ak.count_nonzero(array, axis=None, keepdims=True, mask_identity=True), ak.to_regular(ak.Arr... |
def _make_env(rank):
def _init():
task = generate_task(task_generator_id='reaching')
env = CausalWorld(task=task, enable_visualization=False, seed=rank)
return env
set_global_seeds(0)
return _init |
class Sharpness(_Enhance):
def __init__(self, image):
self.image = image
self.degenerate = image.filter(ImageFilter.SMOOTH)
if ('A' in image.getbands()):
self.degenerate.putalpha(image.getchannel('A')) |
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_... |
def f_saliency_whitebox_weighted_subtree(wb, im):
img_probe = wb.net.preprocess(im.pil())
(img_saliency, P_img, P_subtree, k_subtree) = wb.weighted_subtree_ebp(img_probe, k_poschannel=0, k_negchannel=1, topk=64, do_max_subtree=False, subtree_mode='all', do_mated_similarity_gating=True, verbose=False)
img_sa... |
class FileIdManager():
def __init__(self, *attrs):
self.attrs = attrs[0]
self.nattr = len(self.attrs)
def get_id_from_args(self, args):
tmp = list()
for attr in self.attrs:
if (attr == '*'):
tmp.append('*')
elif (type(attr) != str):
... |
def test_translation(basic_translation_scenario):
(model, tokenizer, data) = basic_translation_scenario
common.test_additivity(shap.explainers.PartitionExplainer, model, tokenizer, data) |
class BaseLoss():
def __init__(self, config: LossConfig, metrics_fn: Callable, loss_fns: dict[(str, Callable)], loss_weights: Optional[dict[(str, float)]]=None) -> None:
self.config = config
self.loss_fns = loss_fns
self.metrics_fn = metrics_fn
assert callable(self.metrics_fn)
... |
def get_digits_in_number(number):
count = 0
while (number > 0):
count += 1
number //= 10
return count |
_task('cross_lingual_lm')
class CrossLingualLMTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner')
parser.add_argument('--tokens-per-samp... |
def main():
args = parse_args()
print('Called with args:')
print(args)
if (not torch.cuda.is_available()):
sys.exit('Need a CUDA device to run the code.')
if (args.cuda or (cfg.NUM_GPUS > 0)):
cfg.CUDA = True
else:
raise ValueError('Need Cuda device to run !')
if (arg... |
class TestLocalRunner(TfGraphTestCase):
def test_session(self):
with LocalTFRunner(snapshot_config):
assert (tf.compat.v1.get_default_session() is not None), 'LocalTFRunner() should provide a default tf session.'
sess = tf.compat.v1.Session()
with LocalTFRunner(snapshot_config, s... |
class CountNonzero(KernelReducer):
name: Final = 'count_nonzero'
preferred_dtype: Final = np.float64
needs_position: Final = False
def apply(self, array: ak.contents.NumpyArray, parents: ak.index.Index, starts: ak.index.Index, shifts: (ak.index.Index | None), outlength: ShapeItem) -> ak.contents.NumpyAr... |
class Backbone(BackboneBase):
def __init__(self, name: str, train_backbone: bool, return_interm_layers: bool, dilation: bool):
backbone = getattr(torchvision.models, name)(replace_stride_with_dilation=[False, False, dilation], pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels ... |
def save_images_with_nll(images, nlls):
num_images = images.shape[0]
num_images_per_row = 4
num_images_per_column = (((num_images + num_images_per_row) - 1) // num_images_per_row)
idx = 0
for i in range(num_images_per_column):
for j in range(num_images_per_row):
plt.subplot2grid(... |
_function_dispatch(_put_dispatcher)
def put(a, ind, v, mode='raise'):
try:
put = a.put
except AttributeError:
raise TypeError('argument 1 must be numpy.ndarray, not {name}'.format(name=type(a).__name__))
return put(ind, v, mode=mode) |
def eval_step_ema(apply_fn, state, batch):
logits = apply_fn(state.ema.variables, batch['image'], training=False, mutable=False)
return compute_metrics(logits, batch['label']) |
class _Dataset(Dataset):
def __init__(self, paths, config, device):
self._paths = paths
self._config = config
self._angle_lim = (np.pi / 4)
self._device = device
def __getitem__(self, item):
scene_id = (item // 50)
paths = np.random.choice(self._paths[scene_id], 2... |
def train(batch_hg, dg_node_feat_discrete, lg_node_feat_continuous, lg_node_feat_discrete, dg_edge_feat, lg_edge_feat, y, model, optimizer):
model.train()
batch_size = batch_hg.batch_size
optimizer.zero_grad()
(dg_y_hat, lg_y_hat, y_hat, _) = model(batch_hg, dg_node_feat_discrete, lg_node_feat_continuou... |
def save_output(batch_size, rootdir, samples, outputs, is_testtime=False):
for i in range(batch_size):
is_match = outputs['match'][i].item()
if True:
sdf_scan = samples['sdf_scan'][i].numpy()
df_cad = samples['df_cad'][i].numpy()
heatmap_pred = outputs['heatmap'][... |
(frozen=True)
class Annotation():
alias_set: Sequence[str]
is_write: bool
def parse(ann: str) -> 'Annotation':
m = re.match('^([a-z])(!?)$', ann)
assert (m is not None), f'unrecognized alias annotation {ann}'
alias_set = [m.group(1)]
is_write = (m.group(2) == '!')
r =... |
def label_(name, options, answer):
if (name == 'Negation template for positive and negative'):
label = options.index(answer.replace(' review.', ''))
else:
label = options.index(answer)
return label |
def test_expand2():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
assert (((((1 / (y * z)) - (y * z)) * y) * z).expand() == (1 - ((y * z) ** 2)))
assert ((2 * (x + (2 * (y + z)))).expand(deep=False) == ((2 * x) + (4 * (y + z))))
ex = (x + (2 * (y + z)))
assert (ex.expand(deep=False) == ex) |
class StrategyChain(object):
def __init__(self, acceptance_param, visit_dist, func_wrapper, minimizer_wrapper, rand_state, energy_state):
self.emin = energy_state.current_energy
self.xmin = np.array(energy_state.current_location)
self.energy_state = energy_state
self.acceptance_param... |
def myfunctrosenbrock(p, fjac=None):
res = N.array([(1 - p[0]), (- (1 - p[0])), (10 * (p[1] - (p[0] ** 2))), ((- 10) * (p[1] - (p[0] ** 2)))])
status = 0
return [status, res] |
def to_sparse(spmat):
return torch.sparse.FloatTensor(torch.LongTensor([spmat.tocoo().row, spmat.tocoo().col]), torch.FloatTensor(spmat.tocoo().data), torch.Size(spmat.tocoo().shape)) |
def Dx(test, x, k=1):
assert isinstance(test, (Expr, BasisFunction))
if (k == 0):
return test
if (k > 1):
for _ in range(k):
test = Dx(test, x, 1)
return test
if isinstance(test, BasisFunction):
test = Expr(test)
dtest = Expr(test._basis, copy.deepcopy(tes... |
class WeiboSenti100kPipe(CLSBasePipe):
def __init__(self, bigrams=False, trigrams=False):
super().__init__()
self.bigrams = bigrams
self.trigrams = trigrams
def _chracter_split(self, sent):
return list(sent)
def _tokenize(self, data_bundle, field_name=Const.INPUT, new_field_n... |
def get_session(gpu_fraction=0.75):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
print(('with nthreads=%s' % num_threads))
return tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options, intra_op_parallelism_threads=num_thr... |
def sync_grad_sum(network):
if (misc.get_world_size() == 1):
return
misc.all_reduce_sum([param.grad.data for param in network.parameters() if (param.grad is not None)]) |
def main(argv=None):
if (argv is None):
argv = sys.argv[1:]
from optparse import OptionParser
parser = OptionParser(usage='usage: %prog [options] input_xml_file')
parser.add_option('--rootpath', type=str, default=ROOT_PATH, help=('path to datasets. (default: %s)' % ROOT_PATH))
parser.add_opt... |
def home_location(traj, start_night='22:00', end_night='07:00', show_progress=True):
if (constants.UID not in traj.columns):
return pd.DataFrame([_home_location_individual(traj, start_night=start_night, end_night=end_night)], columns=[constants.LATITUDE, constants.LONGITUDE])
if show_progress:
d... |
class DistEvalHook(EvalHook):
def __init__(self, dataloader, interval=1, gpu_collect=False, **eval_kwargs):
if (not isinstance(dataloader, DataLoader)):
raise TypeError('dataloader must be a pytorch DataLoader, but got {}'.format(type(dataloader)))
self.dataloader = dataloader
se... |
class MusicAVQADataset(BaseDataset):
def __init__(self, **kwargs):
super().__init__(kwargs['vis_processor'], kwargs['text_processor'], kwargs['vis_root'], kwargs['ann_paths'])
self.modalities = kwargs['modalities']
for modality in self.modalities:
if ('image' in modality):
... |
def dict_update(d, u):
for (k, v) in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = dict_update(d.get(k, {}), v)
else:
d[k] = v
return d |
class ResizeImage():
def __init__(self, size):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
(th, tw) = self.size
return img.resize((th, tw)) |
.parametrize('comp', [1, 3])
.parametrize('resx,resy', [(91, 81)])
.parametrize('scale', [1, 2, 3])
_utils.test(arch=get_host_arch_list())
def test_image_resize_sum(resx, resy, comp, scale):
shape = (resx, resy)
if (comp != 1):
shape = (shape + (comp,))
old_img = np.random.rand(*shape).astype(np.flo... |
def _bn_relu(x, bn_name=None, relu_name=None):
norm = BatchNormalization(axis=CHANNEL_AXIS, name=bn_name)(x)
return Activation('relu', name=relu_name)(norm) |
def run_rand_seq_delete(algo, X_train, delete_count):
times = []
X_train_del = X_train.copy()
for i in range(delete_count):
print(('Delete %d/%d...' % ((i + 1), delete_count)))
id = numpy.random.randint(0, len(X_train_del))
start = time.time()
algo.delete(id, X_train_del)
... |
class EdgeResidual(nn.Module):
def __init__(self, in_chs, out_chs, exp_kernel_size=3, exp_ratio=1.0, fake_in_chs=0, stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_connect_rate=0.0):
super(EdgeResidual,... |
def boomerang_mobility(locations, orientations):
r_vectors = get_boomerang_r_vectors_15(locations[0], orientations[0])
return force_and_torque_boomerang_mobility(r_vectors, locations[0]) |
def recursive_build_binary(seq):
if ((seq[0] == '(') and (seq[(- 1)] == ')') and len(seq)):
node = TreeNode(is_leaf=False)
children_seqs = []
children_seq = []
counter = 0
for token in seq[1:(- 1)]:
children_seq.append(token)
if (token == '('):
... |
_lr_scheduler('tri_stage')
class TriStageLRSchedule(LegacyFairseqLRScheduler):
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if (len(args.lr) > 1):
raise ValueError('Cannot use a fixed learning rate schedule with tri-stage lr. Consider --lr-scheduler=fixed instea... |
class TFBartForConditionalGeneration(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=123, help='Random seed.')
parser.add_argument('--dataset_str', type=str, default='dblp', help="['dblp','example']")
parser.add_argument('--epoch_num', type=int, default=30, help='Number of epochs to trai... |
class ObjectProxy(metaclass=_ObjectProxyMetaType):
def __init__(self, wrapped, usage_trace: (UsageTraceNode | None)=None, is_kwargs: bool=False) -> None:
object.__setattr__(self, '__wrapped__', wrapped)
object.__setattr__(self, '_self_usage_trace_node', (UsageTraceNode(name='ROOT') if (usage_trace i... |
class MEInitStatesArchive(InitStatesArchive, MEGrid):
def __init__(self, bin_sizes, bin_bounds, n_init_states, map_dims, **kwargs):
InitStatesArchive.__init__(self, bin_sizes, bin_bounds, n_init_states, map_dims, **kwargs)
MEGrid.__init__(self, bin_sizes, bin_bounds, **kwargs)
def add(self, item... |
def calculate_frechet_distance(activations_pred, activations_target, eps=1e-06):
(mu1, sigma1) = fid_calculate_activation_statistics(activations_pred)
(mu2, sigma2) = fid_calculate_activation_statistics(activations_target)
diff = (mu1 - mu2)
(covmean, _) = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
... |
def conv3x3(in_channels, out_channels, stride=1, padding=1, bias=True, groups=1):
return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=padding, bias=bias, groups=groups) |
class MLP(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout):
super(MLP, self).__init__()
self.lins = torch.nn.ModuleList()
self.lins.append(torch.nn.Linear(in_channels, hidden_channels))
for _ in range((num_layers - 2)):
... |
class ConstantInit(InitialConditions):
def __init__(self, a=0, b=0):
self.a = a
self.b = b
self.repr_init()
def init_a(self, shape, id, direction):
return self.a
def init_b(self, shape, id, direction):
assert (shape is not None)
return (self.b * np.ones(shape)... |
def multiscale_lossL2(flows_gt, flows_pyramid, conf, weights, name='multiscale_loss'):
with tf.name_scope(name) as ns:
loss = 0.0
for (l, (weight, fs)) in enumerate(zip(weights, flows_pyramid)):
(_, h, w, _) = tf.unstack(tf.shape(fs))
fs_gt_down = tf.image.resize_nearest_neig... |
class CarromTable(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 10.0)] * self.N), ([10.0] * self.N)))
self.global_optimum = [(9., 9.), ((- 9.), 9.), (9., (- 9.)), ((- 9.), (- 9.))]
self.fglob = (- 24.)
def fun(self... |
.parametrize('ratio, user_answer, item_answer, min_interactions_per_group, split_by_fraqtions', [(0.5, [[1, 1, 2, 2, 3, 3], [1, 1, 1, 2, 2, 2, 3, 3, 3]], [[1, 2, 1, 2, 1, 5], [3, 4, 5, 3, 9, 10, 3, 1, 2]], 5, True), (0.5, [[1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3], []], [[1, 2, 3, 4, 5, 1, 2, 3, 9, 10, 1, 5, 3, 1, ... |
def random_embeddings(vocabulary_size, embedding_size):
return nn.Embedding((vocabulary_size + 1), embedding_size) |
class _DatasetCatalog(UserDict):
def register(self, name, func):
assert callable(func), 'You must register a function with `DatasetCatalog.register`!'
assert (name not in self), "Dataset '{}' is already registered!".format(name)
self[name] = func
def get(self, name):
try:
... |
class OneHotEncoder(Encoder):
def __init__(self) -> None:
super().__init__()
self._build_indexes()
def _build_indexes(self):
self.P_NAME_IDX_INC_OH = 2
self.A_TYPE_IDX_INC_OH = 3
self.HEALTH_IDX_INC_OH = 5
self.CARRY_IDX_INC_OH = 1
self.MONEY_IDX_INC_OH = ... |
def mandelbrot_plot(f=None, **kwds):
parameter = kwds.pop('parameter', None)
x_center = kwds.pop('x_center', 0.0)
y_center = kwds.pop('y_center', 0.0)
image_width = kwds.pop('image_width', 4.0)
max_iteration = kwds.pop('max_iteration', None)
pixel_count = kwds.pop('pixel_count', 500)
level_s... |
class TestHypot(object):
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
def test_reduce(self):
assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0)
assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0)
... |
class TestLatexRepr(object):
def as_latex(self, obj):
obj._repr_latex_scalar = (lambda x: str(x))
try:
return obj._repr_latex_()
finally:
del obj._repr_latex_scalar
def test_simple_polynomial(self):
p = Polynomial([1, 2, 3])
assert_equal(self.as_la... |
def register_Ns3PointToPointNetDevice_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('SetDataRate', 'void', [param('ns3::DataRate', 'bps')])
cls.add_method('SetInterframeGap', 'void', [param('ns3::Time', 't')])
cls.add... |
def main(args):
if (not os.path.exists(args.index_dir)):
os.makedirs(args.index_dir)
data_train = load_raw_trainset(args.data_dir)
id2post = build_mapping(data_train, args.index_dir)
indexer = Indexer(args.index_dir)
indexer.build_index(id2post) |
_torch
class ChineseCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = ((ChineseCLIPVisionModel,) if is_torch_available() else ())
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_te... |
class MultiAgentObservationSpace(list):
def __init__(self, ma_space):
for x in ma_space:
assert isinstance(x, gym.spaces.space.Space)
super().__init__(ma_space)
def sample(self):
return [sa_space.sample() for sa_space in self]
def contains(self, obs):
for (space, ... |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--num-batches', dest='num_batches', help='Number of minibatches to run', default=200, type=int)
parser.add_argument('--sleep', dest='sleep_time', help='Seconds sleep to emulate a network running', default=0.1, type=float)
parser.a... |
class TransductiveExperiment():
def __init__(self, run_cfg: RunConfiguration, data_cfg: DataConfiguration, model_cfg: ModelConfiguration, train_cfg: TrainingConfiguration, ex: Optional[Experiment]=None):
self.run_cfg = run_cfg
self.model_cfg = model_cfg
self.data_cfg = data_cfg
self.... |
class TimeOut(contextlib.ContextDecorator):
def __init__(self, seconds: float):
self.seconds = seconds
def _timeout_handler(self, signum, frame):
raise TimeoutError('Code timed out.')
def __enter__(self):
if hasattr(signal, 'SIGALRM'):
signal.signal(signal.SIGALRM, self._... |
def main():
frame = np.zeros((300, 600, 3), np.uint8)
cvui.init(WINDOW_NAME)
while True:
frame[:] = (49, 52, 49)
rectangle = cvui.Rect(50, 50, 100, 100)
cvui.rect(frame, rectangle.x, rectangle.y, rectangle.width, rectangle.height, )
status = cvui.iarea(rectangle.x, rectangle.... |
def add_scalar_to_space(observation_space: Union[(spaces.Box, spaces.Dict)]) -> spaces.Box:
if isinstance(observation_space, spaces.Dict):
observation_space = observation_space['observation']
if (not isinstance(observation_space, spaces.Box)):
raise ValueError('This method can only add reward to... |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('xshape, ishape, axis, batch_dims', [((2, 3, 4), (2,), 0, 0), ((2, 3, 4), (2,), 1, 0), ((2, 3, 4), (2,), 2, 0), ((2, 3, 4), (2,), (- 1), 0), ((2, 3, 4), (2,), (- 2), 0), ((2, 3, 4), (2,), (- 3), 0), ((2, 3, 4), (2, 2), 0, 0), ((2, 3, 4), (2, ... |
def program_for_node(program, sdfg: SDFG, state: SDFGState, node: onnx_op.ONNXOp, extra_vars: Optional[Dict[(str, Any)]]=None) -> SDFG:
input_names = node.schema.non_variadic_inputs()
variadic_input_names = node.schema.variadic_inputs()
output_names = node.schema.non_variadic_outputs()
variadic_output_n... |
class EWC(Regularizer):
def task_start_do(self, freeze_layers=[]):
self.load_reg_params()
task_start_do(self.config, self.model, freeze_layers)
def task_end_do(self):
updater = Omega_update(self.config, self.model.parameters(), lr=0.0001, momentum=0.9)
compute_importance(self.con... |
_model_architecture('lstm_lm', 'lstm_lm')
def base_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_hidden_size = getattr(args, 'decoder_hidde... |
def build_transformer(cfg, default_args=None):
return build_from_cfg(cfg, TRANSFORMER, default_args) |
def watch_hs(filename, number):
print(' { 0, 0}, /* 0 */')
num = (number - 1)
hall_symbols = read_spg_csv(filename)
hs = HallSymbol(hall_symbols[num][0])
for (char, vals) in zip(('L', 'N', 'V'), hs.get_LNV()):
print(('%s: %s' % (char, vals)))
(G_R, G_T) = hs.get_operations()
... |
def test_shichi_consistency():
def shichi(x):
(shi, chi) = sc.shichi((x + 0j))
return (shi.real, chi.real)
x = np.r_[((- np.logspace(np.log10(700), (- 30), 200)), 0, np.logspace((- 30), np.log10(700), 200))]
(shi, chi) = sc.shichi(x)
dataset = np.column_stack((x, shi, chi))
FuncData(... |
def _copy_python_package(module, dest):
module_path = _find_python_module_path(module)
if (not module_path):
raise SQLFlowDiagnostic(("Can't find module %s" % module))
shutil.copytree(module_path, path.join(dest, path.basename(module_path))) |
class TestMLPConcat(TfGraphTestCase):
def setup_method(self):
super(TestMLPConcat, self).setup_method()
self.obs_input = np.array([[1, 2, 3, 4]])
self.act_input = np.array([[1, 2, 3, 4]])
input_shape_1 = self.obs_input.shape[1:]
input_shape_2 = self.act_input.shape[1:]
... |
def move_chexpert_single_target(root_folder, destination_root, disease_name):
actual_val_ratio = 0.1
fine_tune_ratio_list = ALL_SEMI_RATIO
root_path = Path(root_folder)
def move_to_category(category, folder, df):
dst_path = (((Path(destination_root) / 'data') / folder) / category)
os.mak... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.