code stringlengths 101 5.91M |
|---|
class Stacking2TaskGenerator(BaseTask):
def __init__(self, variables_space='space_a_b', fractional_reward_weight=1, dense_reward_weights=np.array([750, 250, 250, 125, 0.005]), activate_sparse_reward=False, tool_block_mass=0.02, tool_block_size=0.065, joint_positions=None, tool_block_1_position=np.array([0, 0, 0.032... |
class Inequality(Hrepresentation):
def type(self):
return self.INEQUALITY
def is_inequality(self):
return True
def is_facet_defining_inequality(self, other):
from sage.geometry.polyhedron.base import Polyhedron_base
if (not isinstance(other, Polyhedron_base)):
rai... |
def test_smooth_l1_loss():
loss_cfg = dict(type='SmoothL1Loss')
loss = build_loss(loss_cfg)
fake_pred = torch.zeros(1, 3, 2)
fake_target = torch.zeros(1, 3, 2)
assert torch.allclose(loss(fake_pred, fake_target), torch.tensor(0.0))
fake_pred = torch.ones(1, 3, 2)
fake_target = torch.zeros(1, ... |
_start_docstrings('Xxx Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ', XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING)
class XxxForSequenceClassification(XxxPreTrainedModel):
def __init__(self, config):
super().__in... |
class SR_gf2_2(SR_gf2):
def inversion_polynomials_single_sbox(self, x=None, w=None, biaffine_only=None, correct_only=None, groebner=False):
e = self.e
if ((x is None) and (w is None)):
names = ([('w%d' % i) for i in reversed(range(e))] + [('x%d' % i) for i in reversed(range(e))])
... |
_model
def ecaresnet50(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['ecaresnet50']
block_args = dict(attn_layer='eca')
model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, block_args=block_args, **kwargs)
model.default_cfg = defaul... |
def plot_curves(xy_list, xaxis, title):
plt.figure(figsize=(8, 2))
maxx = max((xy[0][(- 1)] for xy in xy_list))
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i]
plt.scatter(x, y, s=2)
(x, y_mean) = window_func(x, y, EPISODES_WINDOW, np.mean)
plt.plot(x, y... |
class LineByLineTextDataset_shuffle_belief(Dataset):
def __init__(self, tokenizer, args, file_path, block_size=512):
assert os.path.isfile(file_path)
logger.info('Creating features from dataset file at %s', file_path)
with open(file_path, encoding='utf-8') as f:
lines = [line for... |
class QuotientOfSimplicialSet_finite(QuotientOfSimplicialSet, PushoutOfSimplicialSets_finite):
def __init__(self, inclusion, vertex_name='*'):
subcomplex = inclusion.domain()
PushoutOfSimplicialSets_finite.__init__(self, [inclusion, subcomplex.constant_map()], vertex_name=vertex_name)
ambien... |
def forward_variable_and_check_equal(variable_a, variable_b):
def forward_output(variable):
if isinstance(variable, nn.Variable):
variable.forward()
else:
y = F.sink(*variable)
for v in variable:
v.persistent = True
y.forward()
for ... |
def rename_keys(s_dict):
keys = list(s_dict.keys())
for key in keys:
layer_to_block_of_layer = '.*/layers_(\\d+)'
new_key = key
if re.match(layer_to_block_of_layer, key):
new_key = re.sub('layers_(\\d+)', 'block/\\1/layer', new_key)
layer_to_block_of_layer = '(encoder... |
def get_test_data(input_shape):
if (input_shape == (1,)):
paths = {'observations': [[((- np.pi) / 2)], [((- np.pi) / 3)], [((- np.pi) / 4)], [(np.pi / 4)], [(np.pi / 3)], [(np.pi / 4)]]}
expected = [[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]]
elif (input_shape == (2,)):
paths = {'obs... |
def save_ckpt(path):
torch.save({'cur_itrs': cur_itrs, 'model_state': model.module.state_dict(), 'optimizer_state': optimizer.state_dict(), 'scheduler_state': scheduler.state_dict(), 'best_score': best_score}, path)
print(('Model saved as %s' % path)) |
def get_score_locations() -> Tuple[(str, str)]:
pid = os.getpid()
project_root = os.path.dirname(os.path.abspath(__file__))
filename_current_score = os.path.join(project_root, f'{CURRENT_SCORE_LOCATION}.json')
filename_new_score = os.path.join(project_root, f'{NEW_SCORE_LOCATION}_{pid}.json')
return... |
class CoreNLPClient(RobustService):
DEFAULT_ENDPOINT = '
DEFAULT_TIMEOUT = 60000
DEFAULT_THREADS = 5
DEFAULT_OUTPUT_FORMAT = 'serialized'
DEFAULT_MEMORY = '5G'
DEFAULT_MAX_CHAR_LENGTH = 100000
def __init__(self, start_server=StartServer.FORCE_START, endpoint=DEFAULT_ENDPOINT, timeout=DEFAULT... |
def get_frozen_and_tunable_parameter_names(model: torch.nn.Module) -> List:
frozen_parameter_names = []
tunable_parameter_names = []
for (name, parameter) in model.named_parameters():
if (not parameter.requires_grad):
frozen_parameter_names.append(name)
else:
tunable_... |
def clean_ismn(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', split: bool=False, inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame:
if (output_format not in {'compact', 'standard'}):
raise ValueError(f'output_format {output_format} is inval... |
def format_index(x) -> str:
if isinstance(x, float):
return 'No RMM'
elif isinstance(x, str):
rate = (100 * float(x))
return f'{int(rate)}%' |
def test_warm_start_smaller_n_estimators():
(X, y) = make_hastie_10_2(n_samples=20, random_state=1)
clf = EasyEnsembleClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
with pytest.raises(ValueError):
clf.fit(X, y) |
def add_run_args(parser: argparse.ArgumentParser):
parser.add_argument('-o', '--output-path', type=str, help='Where to save all the output', default='benchmark_output')
parser.add_argument('-n', '--num-threads', type=int, help='Max number of threads to make requests', default=4)
parser.add_argument('--skip-... |
class FunctionFieldOrderInfinite_basis(FunctionFieldOrderInfinite):
def __init__(self, basis, check=True):
if (len(basis) == 0):
raise ValueError('basis must have positive length')
field = basis[0].parent()
if (len(basis) != field.degree()):
raise ValueError('length o... |
def run_inference(model, tokenizer, data, setting, k_samples, num_examplars, prefix, orig_style, opp_style, full_style_description, max_examples, save_path, max_model_token_length, delim_left, delim_right, device):
with open(save_path, 'w') as f:
pass
pbar = tqdm(data, desc='Generating examples')
fo... |
class CosineLRScheduler(Scheduler):
def __init__(self, optimizer: torch.optim.Optimizer, t_initial: int, t_mul: float=1.0, lr_min: float=0.0, decay_rate: float=1.0, warmup_t=0, warmup_lr_init=0, warmup_prefix=True, cycle_limit=0, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, in... |
def report_score(golds, preds, mode='test'):
res = {}
res['Acc_SA'] = accuracy_score(golds['total'], preds['total'])
res['F1_SA'] = f1_score(golds['total'], preds['total'], labels=[0, 1, 2], average='macro')
res['F1_ESA'] = f1_score(golds['explicits'], preds['explicits'], labels=[0, 1, 2], average='macr... |
_function(pre=[textblob_polarity])
def polarity_positive(x):
return (1 if (x.polarity > 0.3) else (- 1)) |
def cache_cfg_urls():
__C.TRAIN.WEIGHTS = cache_url(__C.TRAIN.WEIGHTS, __C.DOWNLOAD_CACHE)
__C.TEST.WEIGHTS = cache_url(__C.TEST.WEIGHTS, __C.DOWNLOAD_CACHE)
__C.TRAIN.PROPOSAL_FILES = tuple((cache_url(f, __C.DOWNLOAD_CACHE) for f in __C.TRAIN.PROPOSAL_FILES))
__C.TEST.PROPOSAL_FILES = tuple((cache_url(... |
def parse_args():
parser = argparse.ArgumentParser(description='Convert ctw1500 annotations to COCO format')
parser.add_argument('root_path', help='ctw1500 root path')
parser.add_argument('-o', '--out-dir', help='output path')
parser.add_argument('--split-list', nargs='+', help='a list of splits. e.g., ... |
def obj_from_dict(info, parent=None, default_args=None):
assert (isinstance(info, dict) and ('type' in info))
assert (isinstance(default_args, dict) or (default_args is None))
args = info.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
if (parent is not None):
obj_ty... |
def srwl_uti_parse_str2list(_str):
sLoc = copy(_str)
sLoc = sLoc.replace('[', '')
sLoc = sLoc.replace(']', '')
sLoc = sLoc.replace('(', '')
sLoc = sLoc.replace(')', '')
sLoc = sLoc.replace('{', '')
sLoc = sLoc.replace('}', '')
resList = []
if (',' in sLoc):
resList = sLoc.spl... |
def test__build_events_df_events():
events = np.array([[.0, .0, 0.], [.0, .0, 0.], [.0, .0, 0.]])
returned = analysis._build_events_df(events)
expected = pd.DataFrame({'start': [, , ], 'end': [, , ], 'score': [0.572644, 0.572644, 0.572644]})
pd.testing.assert_frame_equal(returned, expected) |
class ClassificationModel():
def __init__(self, num_labels=2, max_length=256, model_name_or_path='albert-large-v2', config_name=None, tokenizer_name=None):
NUM_LABELS = num_labels
self.max_seq_length = 256
self.model_name_or_path = model_name_or_path
self.config_name = config_name
... |
def get_position(schema: ONNXSchema, is_input: bool, parameter_name: str):
if ('__' in parameter_name):
(parameter_name, variadic_number) = parse_variadic_param(parameter_name)
else:
variadic_number = None
matches = [(i, param) for (i, param) in enumerate((schema.inputs if is_input else sche... |
class TestQATModule(TestCase):
(batch_size=st.integers(2, 4), input_channels_per_group=st.sampled_from([2, 3, 4]), height=st.integers(5, 10), width=st.integers(5, 10), output_channels_per_group=st.sampled_from([2, 3]), groups=st.integers(1, 3), kernel_h=st.integers(1, 3), kernel_w=st.integers(1, 3), stride_h=st.int... |
class FiniteWordPath_hexagonal_grid_iter(WordDatatype_iter, FiniteWordPath_hexagonal_grid, FiniteWord_class):
pass |
def cluster_positions(positions, thresh: float):
positions = sorted(positions)
clusters = [Cluster([positions[0]], thresh)]
mappings = {positions[0]: 0}
for p in positions[1:]:
if clusters[(- 1)].assess(p):
clusters[(- 1)].add(p)
else:
clusters.append(Cluster([p],... |
class TempModelStorage(ModelStorage):
def __init__(self):
self.default_dir = os.environ.get('FEDN_MODEL_DIR', '/tmp/models')
if (not os.path.exists(self.default_dir)):
os.makedirs(self.default_dir)
self.models = {}
self.models_metadata = {}
def exist(self, model_id):
... |
def scandir_SIDD(dir_path, keywords=None, recursive=False, full_path=False):
if ((keywords is not None) and (not isinstance(keywords, (str, tuple)))):
raise TypeError('"keywords" must be a string or tuple of strings')
root = dir_path
def _scandir(dir_path, keywords, recursive):
for entry in ... |
def load_txt_info(gt_file, img_info):
(contours, words) = get_contours_txt(gt_file)
anno_info = []
for contour in contours:
if (contour.shape[0] == 2):
continue
category_id = 1
coordinates = np.array(contour).reshape((- 1), 2)
polygon = Polygon(coordinates)
... |
class Partition12(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]']
TENSORS = []
def __init__(self,... |
class sNFW(MassProfile):
def __init__(self, b=None, rs=None, x=None, y=None):
self.b = b
self.rs = rs
self.x = x
self.y = y
self.q = 1.0
self.pa = 0.0
self.theta = 0.0
def deflections(self, xin, yin):
from numpy import arctanh, arctan, arctan2, log... |
.parametrize('argv', argv_cases)
def test_join_matches_subprocess(Parser, runner, argv):
cmd = [sys.executable, '-c', 'import json, sys; print(json.dumps(sys.argv[1:]))']
joined = Parser.join((cmd + argv))
json_out = runner(joined).decode()
assert (json.loads(json_out) == argv) |
def coverage(prob, p_v):
def convert_to_list_of_sets(p_v):
return [set(np.argwhere((p_v == part_id)).flatten()) for part_id in np.unique(p_v)]
return cov(prob.G, convert_to_list_of_sets(p_v)) |
def _dump_parameters(outdir, params):
try:
os.makedirs(outdir)
except OSError:
pass
for (i, r) in enumerate(params):
o = os.path.join(outdir, ('conf_%04d.yml' % (i + 1)))
with open(o, 'w') as fout:
yaml.dump(r, fout) |
class BackboneEncoderUsingLastLayerIntoW(Module):
def __init__(self, num_layers, mode='ir', opts=None):
super(BackboneEncoderUsingLastLayerIntoW, self).__init__()
print('Using BackboneEncoderUsingLastLayerIntoW')
assert (num_layers in [50, 100, 152]), 'num_layers should be 50,100, or 152'
... |
class TestParam():
def __init__(self, value, required_extensions):
self._value = value
self._required_extensions = required_extensions
def __repr__(self):
return f'Param({self._value}, {self._required_extensions})'
def value(self):
return self._value
def required_extensio... |
class RegexMatchEach(RegexMatch):
def _f(self, c):
tokens = c.get_attrib_tokens(self.attrib)
return (True if (tokens and all([(self.r.match(t) is not None) for t in tokens])) else False) |
def _generate_random_int(low: int=10, high: int=20, forbidden_values: Optional[List[int]]=None):
if (forbidden_values is None):
forbidden_values = []
value = random.randint(low, high)
while (value in forbidden_values):
value = random.randint(low, high)
return value |
def train_detector(model, dataset, cfg, distributed=False, validate=False, logger=None):
if (logger is None):
logger = get_root_logger(cfg.log_level)
if distributed:
_dist_train(model, dataset, cfg, validate=validate)
else:
_non_dist_train(model, dataset, cfg, validate=validate) |
class NetworksTest(tf.test.TestCase):
def testGetNetworkFn(self):
batch_size = 5
num_classes = 1000
for net in nets_factory.networks_map:
with self.test_session():
net_fn = nets_factory.get_network_fn(net, num_classes)
image_size = getattr(net_fn, ... |
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1].data
one_hots = torch.zeros(*labels.size()).to(logits.device)
one_hots.scatter_(1, logits.view((- 1), 1), 1)
scores = (one_hots * labels)
return scores |
def save_error_tensor(data, index=0, scale=1, mask=None):
while (len(data.shape) > 2):
data = data[0]
data = data.detach().cpu().numpy()
colored_data = color_error_image(data, scale=scale, mask=(mask.detach().cpu().numpy() if (mask is not None) else None))
out_path = os.path.join(str(folder), ('... |
def ADR_dataset(args=None):
dataset = Dataset(name='tweet', path='preprocess/Tweets/vec_adr.p', min_length=5, max_length=100, args=args)
set_balanced_pos_weight(dataset)
return dataset |
class Struct(dummy):
def __getattribute__(self, key):
if (key == '__dict__'):
return super(dummy, self).__getattribute__('__dict__')
return self.__dict__.get(key, 0) |
class BlobShape(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BLOBSHAPE |
def unpack_windows_zip(fname):
with zipfile.ZipFile(fname, 'r') as zf:
lib = [x for x in zf.namelist() if ((OPENBLAS_LONG in x) and x.endswith('a') and (not x.endswith('dll.a')) and (not x.endswith('dev.a')))]
if (not lib):
return ('could not find libopenblas_%s*.a in downloaded zipfile'... |
class TestSum(unittest.TestCase):
def test_objective_function(self):
param = None
obj = objective.Sum([objective.Constant(5)])
self.assertEqual(obj.calculate_objective_function(param), 5)
obj = objective.Sum([objective.Constant(2), objective.Constant(4)])
self.assertEqual(obj... |
def test_capacitor_unit():
cap = Capacitor(10)
val = cap.value()
assert (cap.energy() == 10)
assert (cap.unit == 'GHz')
sq.set_unit_cap('F')
cap = Capacitor(val)
assert (cap.energy() == 10)
assert (cap.unit == 'F') |
def imnormalize(img, mean, std, to_rgb=True):
img = (np.float32(img) if (img.dtype != np.float32) else img.copy())
return imnormalize_(img, mean, std, to_rgb) |
class ThreadDown(Thread):
def __init__(self, dict_name, pos, data_queue, res_queue):
Thread.__init__(self)
self.dict_name = dict_name
self.pos = pos
self.data_queue = data_queue
self.res_queue = res_queue
def run(self):
while (not exitFlag):
if (not se... |
class SpikeFunction():
def __init__(self, v, eps=1e-07):
if (not v):
v = [(0, 0)]
v = sorted([(float(x[0]), float(x[1])) for x in v])
notify = False
for i in reversed(range((len(v) - 1))):
if ((v[(i + 1)][0] - v[i][0]) <= eps):
notify = True
... |
def train(argv=None):
set_up_environment(visible_devices=FLAGS.visible_devices)
(train_set, test_set, vald_set) = higgs_dataset(batch_size=FLAGS.batch_size, num_parallel_calls=8, buffer_size=10000, seed=0, scale=True, include_vald=True, flip_indices=FLAGS.flip_indices)
if FLAGS.evaluate:
print('Eval... |
def to_torch(ndarray):
if (type(ndarray).__module__ == 'numpy'):
return torch.from_numpy(ndarray)
elif (not torch.is_tensor(ndarray)):
raise ValueError('Cannot convert {} to torch tensor'.format(type(ndarray)))
return ndarray |
def test_username_password():
with corenlp.CoreNLPClient(properties=USERNAME_PASS_PROPS, username='user-1234', password='1234', server_id='test_server_username_pass') as client:
ann = client.annotate(EN_DOC, output_format='text', username='user-1234', password='1234')
assert (ann.strip() == USERNAME... |
class RecordGenerator(Generator, ak._lookup.RecordLookup):
def __init__(self, contents, fields, parameters, flatlist_as_rvec):
self.contents = tuple(contents)
self.fields = (None if (fields is None) else tuple(fields))
self.parameters = parameters
self.flatlist_as_rvec = flatlist_as_... |
class SOLOCheckpointer(Callback):
def __init__(self, args: Namespace, logdir: Union[(str, Path)]=Path('trained_models'), frequency: int=1, keep_previous_checkpoints: bool=False):
super().__init__()
self.args = args
self.logdir = Path(logdir)
self.frequency = frequency
self.ke... |
def test_vector_constraint():
def quad(x):
x = np.asarray(x)
return [np.sum((x ** 2))]
nlc = NonlinearConstraint(quad, [2.2], [3])
oldc = new_constraint_to_old(nlc, np.array([1.0, 1.0]))
res = shgo(rosen, [(0, 10), (0, 10)], constraints=oldc, sampling_method='sobol')
assert np.all((n... |
def full_pip_freeze():
logging.info('pip freeze --all:\n%s', pip(['freeze', '--all']).decode('utf-8')) |
def make_sdfg(implementation, dtype, storage=dace.StorageType.Default):
n = dace.symbol('n')
suffix = ('_device' if (storage != dace.StorageType.Default) else '')
transient = (storage != dace.StorageType.Default)
sdfg = dace.SDFG('matrix_lufact_getrf_{}_{}'.format(implementation, str(dtype)))
state ... |
def remap_input(op, blob_name_remapping):
new_list = [blob_name_remapping.get(b, b) for b in op.input]
del op.input[:]
op.input.extend(new_list) |
def gpt3wrapper(max_repeat=20, **arguments):
i = 0
while (i < max_repeat):
try:
response = openai.Completion.create(**arguments)
return response
except KeyboardInterrupt:
raise KeyboardInterrupt
except Exception as e:
print(e)
p... |
class kstwobign_gen(rv_continuous):
def _shape_info(self):
return []
def _pdf(self, x):
return (- scu._kolmogp(x))
def _cdf(self, x):
return scu._kolmogc(x)
def _sf(self, x):
return sc.kolmogorov(x)
def _ppf(self, q):
return scu._kolmogci(q)
def _isf(self,... |
def get_reward(session):
last_session = session[(- 1)]
observation = last_session['observation']
if observation.startswith('Your score'):
tokens = observation.split(':')
for (idx, token) in enumerate(tokens):
if ('Your score (min 0.0, max 1.0)' in token):
cur_idx ... |
def all_gather_list(data, group=None, max_size=16384):
rank = get_rank()
world_size = get_world_size()
buffer_size = (max_size * world_size)
if ((not hasattr(all_gather_list, '_buffer')) or (all_gather_list._buffer.numel() < buffer_size)):
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_s... |
class TimeAccuRecorder():
def __init__(self, dataset_type, val_index, answer_dir):
self.file_path = os.path.join(answer_dir, ('time_vs_accu_data_%s_idx_%d.txt' % (dataset_type, val_index)))
self.data = []
def add_data(self, time, val_accu):
self.data.append((time, val_accu))
def save... |
def test_2d_access_sdfgapi():
sdfg = dace.SDFG('access2d_sdfg')
sdfg.add_array('A', [4, 2], dace.float64)
begin_state = sdfg.add_state()
state_true = sdfg.add_state()
state_false = sdfg.add_state()
state_true.add_edge(state_true.add_tasklet('assign', {}, {'a'}, 'a = 100.0'), 'a', state_true.add_... |
class Backend():
def __init__(self, args):
self.is_gpu = args.use_gpu
if args.use_gpu:
from chainer import cuda
import cupy
cuda.get_device(args.gpu_device).use()
self.lib = cuda.cupy
else:
self.lib = numpy
if (args.seed != ... |
class Call(Item):
def __init__(self, function, args=[], kwargs={}):
if (not callable(function)):
raise TypeError('function is not callable')
if (not isinstance(args, Sequence)):
raise TypeError('args is not a sequence')
if (not isinstance(kwargs, Mapping)):
... |
def InceptionV3_pre(imgs, scope):
return Lambda((lambda x: (((x / 255.0) - 0.5) * 2.0)), name=(scope + 'inceptionv3_pre'))(imgs) |
def test_long_dense_vector():
feature_columns = [SparseFeat('user_id', 4), SparseFeat('item_id', 5), DenseFeat('pic_vec', 5)]
fixlen_feature_names = get_feature_names(feature_columns)
user_id = np.array([[1], [0], [1]])
item_id = np.array([[3], [2], [1]])
pic_vec = np.array([[0.1, 0.5, 0.4, 0.3, 0.2... |
class Regularizer(object):
def __init__(self, model, value=0.001, filter={}, log=False):
self._model = model
self._named_parameters = list(FilterParameters(model, **filter).named_parameters())
self.value = value
self.log = log
if self.log:
logging.debug('Applying ... |
def build_scalers_with_transition_picker(algo: LearnableBase[(Any, Any)], dataset: ReplayBuffer) -> None:
if (algo.observation_scaler and (not algo.observation_scaler.built)):
LOG.debug('Fitting observation scaler...', observation_scaler=algo.observation_scaler.get_type())
algo.observation_scaler.fi... |
_toolkit()
class CiscoUmbrella(FunctionToolkit):
name_for_human = 'Cisco Umbrella'
description_for_human = 'Toolkit for managing a cloud security platform.'
name_for_model = 'CiscoUmbrella'
description_for_model = 'The CiscoUmbrella toolkit provides a suite of tools for managing a cloud security platfor... |
.parametrize('input_dim, output_dim, hidden_sizes', plain_settings)
def test_exp_max_std(input_dim, output_dim, hidden_sizes):
max_value = 1.0
module = GaussianMLPModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, init_std=10.0, max_std=max_value, hidden_nonlinearity=None, std_paramet... |
.parametrize('value, expected', (({'foo': True}, {'foo': 'true'}), ({'foo': False}, {'foo': 'false'}), ({'foo': None}, {'foo': 'null'}), ([{'foo': None}], [{'foo': 'null'}]), ([{'foo': {'bar': True}}], [{'foo': {'bar': 'true'}}])))
def test_jsonify_python_specific_types(value, expected):
assert (jsonify_python_spec... |
def train(config):
np.random.seed(2019)
tf.random.set_seed(2019)
model_dir = config['model.save_path'][:config['model.save_path'].rfind('/')]
if (not os.path.exists(model_dir)):
os.makedirs(model_dir)
data_dir = f"data/{config['data.dataset']}"
ret = load(data_dir, config, ['train', 'val... |
def test_gmm_correct_covariance_type():
gmm = learn_gmm(np.random.random((10, 10)), n_modes=2, gm_args={'covariance_type': 'diag'})
assert (gmm.means_ is not None)
assert (gmm.covariances_ is not None)
assert (gmm.weights_ is not None) |
def is_continuous_seq(move):
i = 0
while (i < (len(move) - 1)):
if ((move[(i + 1)] - move[i]) != 1):
return False
i += 1
return True |
def square(t, duty=0.5):
(t, w) = (asarray(t), asarray(duty))
w = asarray((w + (t - t)))
t = asarray((t + (w - w)))
if (t.dtype.char in ['fFdD']):
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
mask1 = ((w > 1) | (w < 0))
place(y, mask1, nan)
tmod = ... |
def cost_matrix_cosine(x: Tensor, y: Tensor, eps: float=1e-05) -> Tensor:
assert (x.dim() == y.dim())
assert (x.size(0) == y.size(0))
assert (x.size(2) == y.size(2))
x_norm = F.normalize(x, p=2, dim=(- 1), eps=eps)
y_norm = F.normalize(y, p=2, dim=(- 1), eps=eps)
cosine_sim = x_norm.matmul(y_nor... |
class BrazilBandCollection(namedtuple('BrazilBandCollection', ('cls_obs', 'cls_exp', 'one_sigma_band', 'two_sigma_band', 'test_size', 'clsb', 'clb', 'axes'))): |
def get_model():
model = torchvision.models.resnet18(pretrained=True).eval()
mean = torch.Tensor([0.485, 0.456, 0.406])
std = torch.Tensor([0.229, 0.224, 0.225])
normalizer = torchvision.transforms.Normalize(mean=mean, std=std)
return torch.nn.Sequential(normalizer, model).eval() |
def collate_fn(samples):
(image_seqs, original_dims, idxes) = zip(*samples)
image_seqs = [[im] for im in image_seqs]
image_seqs = ImageList.from_image_sequence_list(image_seqs, original_dims)
return (image_seqs, idxes) |
def default_output_fn(preds):
batch_size = jax.tree_util.tree_leaves(preds)[0].shape[0]
assert jax.tree_util.tree_all(jax.tree_util.tree_map((lambda x: (x.shape[0] == batch_size)), preds))
outputs = []
for i in range(batch_size):
outputs.append(jax.tree_util.tree_map((lambda x: x[i]), preds))
... |
def download_file_parallel(args):
(download_url, download_path, split_name, filename, resume_byte_pos) = args
download_file(download_url, download_path, split_name, filename, resume_byte_pos=resume_byte_pos) |
def _test_matmul(implementation, dtype, impl_name, storage, data_layout='CCC', eps=0.0001):
sdfg = make_sdfg(impl_name, dtype, storage, data_layout)
csdfg = sdfg.compile()
(m, n, k) = (32, 31, 30)
x = np.ndarray([m, k], dtype=dtype.type, order=data_layout[0])
y = np.ndarray([k, n], dtype=dtype.type,... |
class Transition(nn.Module):
def __init__(self, nChannels, nOutChannels, use_dropout):
super(Transition, self).__init__()
self.bn1 = nn.BatchNorm2d(nOutChannels)
self.conv1 = nn.Conv2d(nChannels, nOutChannels, kernel_size=1, bias=False)
self.use_dropout = use_dropout
self.dro... |
def save_and_load_tester(algo: QLearningAlgoBase[(QLearningAlgoImplBase, LearnableConfig)], observation_shape: Shape, action_size: int, deterministic_best_action: bool=True) -> None:
algo.create_impl(observation_shape, action_size)
algo.save_model(os.path.join('test_data', 'model.pt'))
try:
algo2 = ... |
def test_read_write_consistency_conll2003():
conll_io = ConllIO(text_col_id=0, tag_col_id=3, scheme='BIO1', document_sep_starts=['-DOCSTART-'])
data = conll_io.read('data/conll2003/demo.eng.train')
brat_io = BratIO(tokenize_callback='space', max_len=None, token_sep=' ', line_sep='\r\n', sentence_seps=[], ph... |
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=100):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = (2 * growth_rate)
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
... |
def dict_to_stats(cfg_dict):
set_cfg(cfg)
cfg_new = CN(cfg_dict)
cfg.merge_from_other_cfg(cfg_new)
stats = get_stats()
set_cfg(cfg)
return stats |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.