code stringlengths 101 5.91M |
|---|
def gen_pairs(grid_size: int, pair_num: int, stride: int=1) -> np.ndarray:
neighbors = [(i, j) for i in range((- stride), (stride + 1)) for j in range((- stride), (stride + 1)) if ((i != 0) or (j != 0))]
total_pairs = []
for _ in range(pair_num):
while True:
x1 = np.random.randint(0, gri... |
def ctcBeamSearch(mat, classes, lm, beamWidth=10):
blankIdx = len(classes)
(maxT, maxC) = mat.shape
last = BeamState()
labeling = ()
last.entries[labeling] = BeamEntry()
last.entries[labeling].prBlank = 1
last.entries[labeling].prTotal = 1
for t in range(maxT):
curr = BeamState()... |
def hf_bucket_url(identifier, postfix=None, cdn=False) -> str:
endpoint = (CLOUDFRONT_DISTRIB_PREFIX if cdn else S3_BUCKET_PREFIX)
if (postfix is None):
return '/'.join((endpoint, identifier))
else:
return '/'.join((endpoint, identifier, postfix)) |
def weight_srcfocus(model, src_coords, delta=0.01, full=True):
w_dim = as_tuple((model.grid.dimensions if full else model.grid.dimensions[(- 1)]))
isrc = tuple(((np.float32(model.padsizes[i][0]) + (src_coords[(0, i)] / model.spacing[i])) for i in range(model.dim)))
h = (np.prod(model.spacing) ** (1 / model.... |
.timeout(10)
def test_pickle():
max_path_length = 16
env = GarageEnv(PointEnv())
policy = FixedPolicy(env.spec, scripted_actions=[env.action_space.sample() for _ in range(max_path_length)])
tasks = SetTaskSampler(PointEnv)
n_workers = 8
workers = WorkerFactory(seed=100, max_path_length=max_path_... |
class AnomalibCometLogger(ImageLoggerBase, CometLogger):
def __init__(self, api_key: (str | None)=None, save_dir: (str | None)=None, project_name: (str | None)=None, rest_api_key: (str | None)=None, experiment_name: (str | None)=None, experiment_key: (str | None)=None, offline: bool=False, prefix: str='', **kwargs)... |
_args('v', 'i', 'none')
def log_softmax(g, input, dim, dtype=None):
input_dim = input.type().dim()
if (input_dim is None):
return _unimplemented('dim', 'ONNX and PyTorch use different strategies to split the input. Input rank must be known at export time.')
if (dim < 0):
dim = (input_dim + d... |
def empty_dataset(query_point_shape: ShapeLike, observation_shape: ShapeLike) -> Dataset:
qp = tf.zeros((tf.TensorShape([0]) + query_point_shape), tf.float64)
obs = tf.zeros((tf.TensorShape([0]) + observation_shape), tf.float64)
return Dataset(qp, obs) |
_config
def task_finetune_irtr_f30k():
exp_name = 'finetune_irtr_f30k'
datasets = ['f30k']
loss_names = _loss_names({'itm': 0.5, 'irtr': 1})
batch_size = 256
max_epoch = 10
max_steps = None
warmup_steps = 0.1
get_recall_metric = True
draw_false_text = 15
learning_rate = 0.0001 |
def test_stdc_module():
x_stdc = STDCModule(in_channels=32, out_channels=32, stride=4)
assert (x_stdc.layers[0].conv.in_channels == 32)
assert (x_stdc.layers[3].conv.out_channels == 4)
x = torch.randn(2, 32, 32, 64)
x_out = x_stdc(x)
assert (x_out.shape == torch.Size([2, 32, 32, 64])) |
_module
class MultiClsHead(nn.Module):
FEAT_CHANNELS = {'resnet50': [64, 256, 512, 1024, 2048]}
FEAT_LAST_UNPOOL = {'resnet50': ((2048 * 7) * 7)}
def __init__(self, pool_type='adaptive', in_indices=(0,), with_last_layer_unpool=False, backbone='resnet50', norm_cfg=dict(type='BN'), num_classes=1000):
... |
class OLSQ_qiskit(OLSQ):
def __init__(self, objective_name, if_transition_based):
super().__init__(objective_name, if_transition_based)
def setdevice(self, device, mode: str=None):
if (mode == 'ibm'):
config = device.configuration()
edges = config.coupling_map
... |
class TestMbartCc25Enro(TestCasePlus):
def setUp(self):
super().setUp()
data_cached = cached_path(' extract_compressed_file=True)
self.data_dir = f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
_torch_gpu
def test_model_download(self):
MarianMTModel.from_pretrained(MARIAN_MODE... |
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_model: int=512, d_ff: int=2048, dropout_p: float=0.3) -> None:
super(PositionwiseFeedForward, self).__init__()
self.feed_forward = nn.Sequential(Linear(d_model, d_ff), nn.Dropout(dropout_p), nn.ReLU(), Linear(d_ff, d_model), nn.Dropout(d... |
def colon_ish(text: Optional[str]):
if (text is None):
return False
return (text.strip()[(- 1)] in {'-', ';', ':', ','}) |
class SchemeMorphism_point_projective_field(SchemeMorphism_point_projective_ring):
def __init__(self, X, v, check=True):
SchemeMorphism.__init__(self, X)
if check:
from sage.schemes.elliptic_curves.ell_point import EllipticCurvePoint_field
from sage.rings.ring import Commutat... |
class MockedDataLoader():
def __init__(self, train_val, configs, num_workers=4, pin_memory=False, prefetch_factor=2):
assert (train_val in {'train', 'validate'})
F_bin = configs['n_mels']
segn = int((configs['segment_size'] * configs['sample_rate']))
T = (((segn + configs['stft_hop']... |
def cdf2(D, grid):
rv = multivariate_normal([0, 0], [[1, 0], [0, 1]])
grid = np.dot(grid, D)
cdf = rv.cdf(grid)
return cdf |
class Batch_generator(data.Dataset):
def __init__(self, nb_answer, ori_img, img_dir, box_dir, que_dir, prep_dir, mode='train'):
self.mode = mode
self.ori_img = ori_img
self.img_dir = img_dir
self.nb_answer = nb_answer
self.box_dir = box_dir
self.top_answer = json.load... |
class FitParamT(BaseEstimator):
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def fit_predict(self, X, y, should_succeed=False):
self.fit(X, y, should_su... |
def _check_round_over(state, action):
fold = (action == FOLD)
call = ((state._last_action != INVALID_ACTION) & (action == CALL))
_continue = ((state._round == 0) & call)
round_over = (fold | call)
terminated = (round_over & (~ _continue))
reward = jax.lax.select(fold, jnp.float32([(- 1), (- 1)])... |
class PVRCNNPlusPlus(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
batch_dict = self.vfe(batch_dict)
batch... |
def get_vendor_version_from_module(module_name):
module = get_module_from_module_name(module_name)
version = getattr(module, '__version__', None)
if (not version):
pkg_set = pkg_resources.WorkingSet([os.path.dirname(module.__file__)])
package = pkg_set.find(pkg_resources.Requirement.parse(mo... |
def test_case38():
url = (brokerIp + '/ngsi-ld/v1/entities/')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata26), headers=headers)
print(r.content)
print(r.s... |
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if (value is not None):
value = ('\n'.join(value) + '\n')
cmd.write_or_delete_file(argname, filename, value, force) |
def main():
args = parse_args()
if (args.job_dir == ''):
args.job_dir = (get_shared_folder() / '%j')
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition... |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_it_aic(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_re... |
.parametrize('val', [0, 1])
_utils.test(ti.cpu)
def test_static_if(val):
x = ti.field(ti.i32)
ti.root.dense(ti.i, 1).place(x)
def static():
if ti.static((val > 0.5)):
x[0] = 1
else:
x[0] = 0
static()
assert (x[0] == val) |
def traced_forward_context_manager(model, with_submodules=False):
forward_trace = ForwardTrace()
context_manager = ForwardTracer(model, forward_trace, with_submodules=with_submodules)
return (context_manager, forward_trace) |
def adjusted_prec(p, prec):
if (prec <= 2):
defect = 0
adjusted = 2
else:
defect = Integer(((2 * prec) - 3)).exact_log(p)
adjusted = ((prec + defect) - 1)
while (((adjusted - defect) - 1) < prec):
adjusted += 1
return adjusted |
def url_quote(string, charset='utf-8', errors='strict', safe='/:', unsafe=''):
if (not isinstance(string, (text_type, bytes, bytearray))):
string = text_type(string)
if isinstance(string, text_type):
string = string.encode(charset, errors)
if isinstance(safe, text_type):
safe = safe.... |
class NeuralBanditModel(BayesianNN):
def __init__(self, optimizer, hparams, name):
self.opt_name = optimizer
self.name = name
self.hparams = hparams
self.verbose = getattr(self.hparams, 'verbose', True)
self.times_trained = 0
self.build_model()
def build_layer(sel... |
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
return pil_loader(path) |
class WatermarkDetector():
MODEL_URL: str = '
WATERMARK_THRESHOLD: float = 0.9
def load_model():
try:
import timm
except ModuleNotFoundError as e:
handle_module_not_found_error(e, ['heim'])
model = timm.create_model('efficientnet_b3a', pretrained=True, num_cla... |
def train_nn(dataset, neurons=(20,), **kwargs):
(train_x, train_y, test_x, test_y) = (dataset['train_x'], dataset['train_y'], dataset['test_x'], dataset['test_y'])
is_categorical = dataset.get('is_categorical', None)
model = MLPClassifier(hidden_layer_sizes=neurons, **kwargs)
if (is_categorical is not N... |
def recall(y_true: np.ndarray, y_pred: np.ndarray, average: str='micro'):
(y_true, y_pred) = _validate_input(y_true, y_pred)
functions = {'micro': _recall_micro, 'macro': _recall_macro}
return functions[average](y_true, y_pred) |
class DataCollatorForMultipleChoice():
tokenizer: PreTrainedTokenizerBase
padding: Union[(bool, str, PaddingStrategy)] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
def __call__(self, features):
label_name = ('label' if ('label' in features[0].keys()) else ... |
class LukeForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def user_features(spark):
return spark.createDataFrame([(1, 20.0, (- 3.0), 1), (2, 30.0, 4.0, 0), (3, 40.0, 0.0, 1)]).toDF('user_idx', 'age', 'mood', 'gender') |
class TruncInst(ConversionInst):
code = 'trunc'
def type_constraints(self, tcs):
tcs.integer(self)
tcs.integer(self.arg)
tcs.specific(self, self.ty)
tcs.specific(self.arg, self.src_ty)
tcs.width_order(self, self.arg) |
def get_output_nodes():
output_op = State('Dense', units=1, activation='linear', name='output')
return output_op |
def _create_dummy_ann_file(ann_file):
ann_info1 = {'file_name': '1.png', 'height': 200, 'width': 200, 'annotations': [{'text': 'store', 'box': [11.0, 0.0, 22.0, 0.0, 12.0, 12.0, 0.0, 12.0], 'label': 1, 'edge': 1}, {'text': 'MyFamily', 'box': [23.0, 2.0, 31.0, 1.0, 24.0, 11.0, 16.0, 11.0], 'label': 2, 'edge': 1}]}
... |
('tasks.implementations.dataset_check_misuse.Project.repository')
('tasks.implementations.dataset_check_misuse._get_all_misuses')
class TestDatasetCheckMisuseLocation():
def setup(self):
version_meta = {'build': {'src': '-source_dir-'}}
self.project = create_project('-project-')
self.version... |
def standalone_TradeoffWordSplitter():
nlp = spacy.load('en_core_web_sm')
matcher = Matcher(nlp.vocab)
matcher.add('trade-off', None, [{'ORTH': 'trade'}, {'ORTH': '-'}, {'ORTH': 'off'}])
matcher.add('trade-offs', None, [{'ORTH': 'trade'}, {'ORTH': '-'}, {'ORTH': 'offs'}])
matcher.add('Trade-off', No... |
class Dataset(data.Dataset):
def __init__(self, datasets, max_samples=None, **defaults):
self.max_sampels = max_samples
self.datasets = {}
self.num_samples = 0
for ds in datasets:
coco = hydra.utils.get_class(f'datasets.{ds.name}.Dataset')
params = {**defaults... |
class GenerationAdapter(InContextLearningAdapter):
def generate_requests(self, eval_instance: Instance, train_trial_index: int, training_instances: List[Instance]) -> List[RequestState]:
prompt: Prompt = self.construct_prompt(training_instances, eval_instance, include_output=False, reference_index=None)
... |
class TrainingArguments():
output_dir: str = field(metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'})
overwrite_output_dir: bool = field(default=False, metadata={'help': 'Overwrite the content of the output directory. Use this to continue training if output_d... |
def head_rel_to_tree(head, rel, len_):
head = head[:len_].tolist()
root = None
nodes = [Tree() for _ in head]
for i in range(len(nodes)):
h = head[i]
nodes[i].idx = i
nodes[i].rel = rel[i]
nodes[i].dist = (- 1)
if (h == 0):
root = nodes[i]
else... |
def choose_boundary():
boundary = binascii.hexlify(os.urandom(16))
if six.PY3:
boundary = boundary.decode('ascii')
return boundary |
class EWCParamsComputer(ASR):
def on_fit_start(self):
(self.params, self.fisher) = ({}, {})
self.num_samples = 0
def fit_batch(self, batch):
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
with self.no_s... |
def test_root_labels():
text = '( (SBARQ-FOO (WHNP-BAR (WP Who)) (SQ#ASDF (VP=1 (VBZ sits) (PP (IN in) (NP (DT this) (NN seat))))) (. ?)))'
trees = tree_reader.read_trees(text)
assert (['ROOT'] == Tree.get_root_labels(trees))
text = (('( (SBARQ-FOO (WHNP-BAR (WP Who)) (SQ#ASDF (VP=1 (VBZ sits) (PP (IN i... |
class AlignedDataset(BaseDataset):
def __init__(self, opt):
BaseDataset.__init__(self, opt)
self.dir_AB = os.path.join(opt.dataroot, opt.phase)
self.AB_paths = sorted(make_dataset(self.dir_AB))
assert (self.opt.load_size >= self.opt.crop_size)
self.input_nc = (self.opt.output... |
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=(lambda x: x)):
executor = ProcessPoolExecutor(max_workers=num_workers)
futures = []
index = 1
with open(os.path.join(in_dir, 'metadata.csv'), encoding='utf-8') as f:
for line in f:
parts = line.strip().split('|')
w... |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args,... |
def register_Ns3TcpYeah_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::TcpYeah const &', 'sock')])
cls.add_method('CongestionStateSet', 'void', [param('ns3::Ptr< ns3::TcpSocketState >', 'tcb'), param('ns3::TcpSocketState::TcpCongState_t const', 'newState')], is_virtual=T... |
def trainStep(network, criterion, optimizer, X, y):
optimizer.zero_grad()
outputs = network(X)
loss = criterion(outputs, y)
loss.backward()
optimizer.step()
accuracy = (float(torch.sum((torch.argmax(outputs, dim=1) == y)).item()) / y.shape[0])
return (loss, accuracy) |
class VectorPartitions(UniqueRepresentation, Parent):
def __classcall_private__(cls, vec, min=None, parts=None, distinct=False, is_repeatable=None):
if (min is None):
min = find_min(vec)
if (parts is None):
parts = list(IntegerVectorsIterator(vec, min=min))
if (([0] *... |
class TFHubertPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def obfuscate_observation(obs):
obs_int = obs.argmax((- 1))
obs_int = np.where((obs_int != 0), 1, obs_int)
obs = np.eye(obs.shape[(- 1)])[obs_int]
return obs |
def mlp_actor_critic(x, a, hidden_sizes=(400, 300), activation=tf.nn.relu, output_activation=None, policy=mlp_gaussian_policy, action_space=None):
with tf.variable_scope('pi'):
(mu, pi, logp_pi) = policy(x, a, hidden_sizes, activation, output_activation)
(mu, pi, logp_pi) = apply_squashing_func(mu, ... |
def main():
args = parse_args()
models_root = args.root
models_out = args.out
mmcv.mkdir_or_exist(models_out)
raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True))
used_configs = []
for raw_config in raw_configs:
if osp.exists(osp.join(models_root, raw_config)):
... |
def wdn_ky2():
graph = nx.Graph()
with open((graph_dir + 'ky2.txt')) as f:
lines = f.readlines()
for line in lines:
if (len(line.split('\t')) == 9):
(u, v) = line.strip().split('\t')[1:3]
u = u.strip()
v = v.strip()
if (... |
def get_status_code_and_reason(response: (Response | None)) -> str:
if (response is None):
return ''
return f'{response.status_code} - {response.reason}' |
def _good_shape(x, shape, axes):
if (shape and (not axes)):
shape = _helper._iterable_of_int(shape, 'shape')
if (len(shape) != np.ndim(x)):
raise ValueError('when given, axes and shape arguments have to be of the same length')
return shape |
def get_flow(im1, im2):
im1 = np.array(im1)
im2 = np.array(im2)
im1 = (im1.astype(float) / 255.0)
im2 = (im2.astype(float) / 255.0)
alpha = 0.012
ratio = 0.75
minWidth = 20
nOuterFPIterations = 7
nInnerFPIterations = 1
nSORIterations = 30
colType = 0
(u, v, im2W) = pyflow... |
def _compute_aspect_ratios_slow(dataset, indices=None):
print("Your dataset doesn't support the fast path for computing the aspect ratios, so will iterate over the full dataset and load every image instead. This might take some time...")
if (indices is None):
indices = range(len(dataset))
class Subs... |
class DropImputer():
def __init__(self, null_values: Optional[List[Any]], fill_value: str='') -> None:
self.null_values = null_values
self.fill_value = fill_value
self.isdrop = False
def fit(self, col_df: dd.Series) -> Any:
self.isdrop = (True in col_df.map(self.check_isdrop).val... |
def register_Ns3Ipv6OptionRouterAlert_methods(root_module, cls):
cls.add_constructor([param('ns3::Ipv6OptionRouterAlert const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetOptionNumber', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=Tr... |
.parametrize('action_by_evaluation_policy, estimated_rewards_by_reg_model, description', invalid_input_of_create_estimator_inputs)
def test_meta_create_estimator_inputs_using_invalid_input_data(action_by_evaluation_policy, estimated_rewards_by_reg_model, description: str, synthetic_continuous_bandit_feedback: BanditFee... |
class GraphConvolution(Layer):
def __init__(self, input_dim, output_dim, placeholders, index=0, dropout=0.0, sparse_inputs=False, act=tf.nn.relu, bias=False, featureless=False, norm=False, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
self.dropout = dropout
self.act = act
... |
.parametrize('input_meters, expected_resolution', [(50, 10), (500, 8), (5000, 6)])
def test__meters_to_resolution(h3_tess, input_meters, expected_resolution):
assert (h3_tess._meters_to_resolution(input_meters) == expected_resolution) |
def load_mldoc_dataset(dataset_path, lang, dev_size=0.05, seed=1):
data = {}
instances = []
lcode_to_lang = {'en': 'english', 'fr': 'french', 'de': 'german', 'ja': 'japanese', 'zh': 'chinese', 'it': 'italian', 'ru': 'russian', 'es': 'spanish'}
lang = lcode_to_lang[lang]
modes = ['train.1000', 'dev',... |
class SymmetricTensorDescription():
def __init__(self, element, layout, fill_mode, alignment=1, complex_transform=cutlass.complex_transform.none, side_mode=SideMode.Left):
self.element = element
self.layout = layout
self.fill_mode = fill_mode
self.alignment = alignment
self.c... |
def test_integer_dtype(int_func):
random.seed()
(fname, args, md5) = int_func
f = getattr(random, fname)
actual = f(*args, size=2)
assert_((actual.dtype == np.dtype('l'))) |
_utils.test(arch=[ti.cuda])
def test_ndarray_caching_allocator():
n = 8
a = ti.ndarray(ti.i32, shape=n)
a.fill(2)
a = 1
b = ti.ndarray(ti.i32, shape=n)
b.fill(2) |
class mask_rcnn_fcn_head_v1upXconvs_gn(nn.Module):
def __init__(self, dim_in, roi_xform_func, spatial_scale, num_convs):
super().__init__()
self.dim_in = dim_in
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
self.num_convs = num_convs
dilation = cf... |
class Dataset(object):
def add_cmdline_argument(cls, parser):
group = parser.add_argument_group('Dataset')
group.add_argument('--data_dir', type=str, required=False, help='The dataset dir.')
group.add_argument('--data_name', type=str, required=True, choices=['uniDAunDial', 'camrest', 'kvret'... |
class TooManyRequests(_RetryAfter):
code = 429
description = 'This user has exceeded an allotted request count. Try again later.' |
def mse_loss_per_tensor(y: tf.Tensor, x: tf.Tensor, normalized: bool=False, p: int=2) -> tf.Tensor:
_loss = tf.reduce_mean(tf.pow(tf.abs((y - x)), p))
return ((_loss / tf.reduce_mean(tf.pow(tf.abs(x), p))) if normalized else _loss) |
class FiniteWordPath_hexagonal_grid_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_hexagonal_grid, FiniteWord_class):
pass |
class TensorboardSummary(object):
def __init__(self, directory, use_dist=False):
self.directory = directory
self.use_dist = use_dist
def create_summary(self):
writer = SummaryWriter(logdir=os.path.join(self.directory))
return writer
def visualize_image(self, writer, dataset, ... |
def _kendall_tau_nxn(df: EDAFrame) -> da.Array:
return df.frame.repartition(npartitions=1).map_partitions(partial(pd.DataFrame.corr, method='kendall')).to_dask_array() |
def main():
parser = argparse.ArgumentParser(description='script to convert superpoint model from pytorch to onnx')
parser.add_argument('--weight_file', default='weights/superglue_outdoor.pth', help='pytorch weight file (.pth)')
parser.add_argument('--output_dir', default='output', help='output directory')
... |
def _get_local_path(openml_path: str, data_home: str) -> str:
return os.path.join(data_home, 'openml.org', (openml_path + '.gz')) |
def distributed_main(i, args, start_rank=0):
args.device_id = i
if (args.distributed_rank is None):
args.distributed_rank = (start_rank + i)
main(args, init_distributed=True) |
def parking_spaces_query(bboxes_ism, params={}):
magic_numbers = {'EMPTY_SPOT_IOU': 0.25, 'COALESCE_IOU': 0.5, 'MIN_TIME': 30}
magic_numbers.update(params)
EMPTY_SPOT_IOU = magic_numbers['EMPTY_SPOT_IOU']
COALESCE_IOU = magic_numbers['COALESCE_IOU']
MIN_TIME = magic_numbers['MIN_TIME']
first_key... |
class MHSA_stage_adapt(nn.Module):
def __init__(self, seq_length, dim, num_layers, num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, num_domains=4, norm_layer=nn.LayerNorm, adapt_method=None, crpe_window={3: 2, 5: 3, 7: 3}):
super(MHSA_stage_adapt, se... |
def job_fssdJ5q_imqb2_optv(p, data_source, tr, te, r):
return job_fssdJ1q_imq_optv(p, data_source, tr, te, r, J=5, b=(- 2.0)) |
def sdae_coil100(dropout=0.2, slope=0.0, dim=10):
return SDAE(dim=[49152, 500, 500, 2000, dim], dropout=dropout, slope=slope) |
def confirmAuth(headers):
try:
token = cPickle.loads(base64.b64decode(headers['AuthToken']))
if (not check_hmac(token['signature'], token['data'], getSecretKey())):
raise AuthFail
secure_data = token['data']
return secure_data
except:
raise AuthFail |
class OpusState():
def __init__(self, source_dir, eos_token_id=0):
npz_path = find_model_file(source_dir)
self.state_dict = np.load(npz_path)
cfg = load_config_from_state_dict(self.state_dict)
if (cfg['dim-vocabs'][0] != cfg['dim-vocabs'][1]):
raise ValueError
if ... |
def create_vit(model_cfg):
model_cfg = model_cfg.copy()
backbone = model_cfg.pop('backbone')
normalization = model_cfg.pop('normalization')
model_cfg['n_cls'] = 1000
mlp_expansion_ratio = 4
model_cfg['d_ff'] = (mlp_expansion_ratio * model_cfg['d_model'])
if (backbone in default_cfgs):
... |
class TFGPT2ModelTest(TFCommonTestCases.TFCommonModelTester):
all_model_classes = ((TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel) if is_tf_available() else ())
class TFGPT2ModelTester(object):
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_token_type_ids=True, us... |
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-06, affine=True) |
def _extract_labels(filename, num_labels):
print('Extracting labels from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read((1 * num_labels))
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels |
.parametrize('lr', [0.0001])
.parametrize('module', [torch.nn.Linear(2, 3)])
def test_rmsprop_factory(lr: float, module: torch.nn.Module) -> None:
factory = RMSpropFactory()
optim = factory.create(module.named_modules(), lr)
assert isinstance(optim, RMSprop)
assert (optim.defaults['lr'] == lr)
RMSpr... |
class MpnnArxivConfig(ArxivConfig):
def __init__(self, hidden, aggr) -> None:
super().__init__(hidden)
self.aggr = aggr
def model(self, hparams):
return MpnnArxivNet(hidden_dim=self.hidden, num_graph_layers=NUM_LAYERS, dropout=hparams['dropout'], residual=True, aggr=self.aggr)
def pr... |
def search_absorbe_bn(model):
prev = None
for m in model.children():
if (is_bn(m) and is_absorbing(prev)):
absorb_bn(prev, m)
search_absorbe_bn(m)
prev = m |
def theta_series_degree_2(Q, prec):
from sage.arith.misc import integer_floor as floor
from sage.misc.functional import sqrt
from sage.misc.timing import cputime
from sage.misc.verbose import verbose
if (Q.base_ring() != ZZ):
raise TypeError('the quadratic form must be integral')
if (not... |
class ImageMujocoEnv(ProxyEnv, Env):
def __init__(self, wrapped_env, imsize=32, keep_prev=0, init_camera=None, camera_name=None, transpose=False, grayscale=False, normalize=False):
self.quick_init(locals())
super().__init__(wrapped_env)
self.imsize = imsize
if grayscale:
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.