code stringlengths 101 5.91M |
|---|
_numpy_output(check_dtype=True)
def test_ufunc_right_shift_cc(A: dace.complex64[10], B: dace.complex64[10]):
return np.right_shift(A, B) |
class FGP_Morphism(Morphism):
def __init__(self, parent, phi, check=True):
Morphism.__init__(self, parent)
M = parent.domain()
N = parent.codomain()
if isinstance(phi, FGP_Morphism):
if check:
if (phi.parent() != parent):
raise TypeErro... |
def calculate_md5(fpath: str, chunk_size: int=(1024 * 1024)) -> str:
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter((lambda : f.read(chunk_size)), b''):
md5.update(chunk)
return md5.hexdigest() |
def pdfparser(in_path, out_path):
fp = open(in_path, 'rb')
rsrcmgr = PDFResourceManager()
retstr = io.StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_page... |
def main():
global start_epoch, label_map, epoch, checkpoint, decay_lr_at
if (checkpoint is None):
start_epoch = 0
model = SSD300(n_classes=n_classes)
biases = list()
not_biases = list()
for (param_name, param) in model.named_parameters():
if param.requires_gr... |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', required=True, type=str, help='Input directory with images.')
parser.add_argument('--output_file', required=True, type=str, help='Output directory with images.')
parser.add_argument('--input_dir_mode', default='test', type... |
class GenericProduct(CartesianProductPoset, GenericGrowthGroup):
__classcall__ = CartesianProductPoset.__classcall__
def __init__(self, sets, category, **kwds):
order = kwds.pop('order')
CartesianProductPoset.__init__(self, sets, category, order, **kwds)
vars = sum(iter((factor.variable_... |
def load_model(hparams):
model = Tacotron2(hparams).cuda()
if hparams.fp16_run:
model.decoder.attention_layer.score_mask_value = finfo('float16').min
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
return model |
def test_vectors_rotations():
with pytest.raises(TypeError):
Vector3D.rotate(Vector3D(), pi, 1)
with pytest.raises(TypeError):
Vector3D.rotate(Vector3D(), pi, [1, 2])
with pytest.raises(TypeError):
Vector3D.rotate(Vector3D(), pi, 1, 2, 3, 4)
with pytest.raises(ValueError):
... |
class _CustomLinearOperator(LinearOperator):
def __init__(self, shape, matvec, rmatvec=None, matmat=None, dtype=None, rmatmat=None):
super().__init__(dtype, shape)
self.args = ()
self.__matvec_impl = matvec
self.__rmatvec_impl = rmatvec
self.__rmatmat_impl = rmatmat
s... |
def get_wandb_logger(trainer: Trainer) -> WandbLogger:
if isinstance(trainer.logger, WandbLogger):
return trainer.logger
if isinstance(trainer.logger, LoggerCollection):
for logger in trainer.logger:
if isinstance(logger, WandbLogger):
return logger
raise Exceptio... |
def saveCompressed(fh, **namedict):
with zipfile.ZipFile(fh, mode='w', compression=zipfile.ZIP_STORED, allowZip64=True) as zf:
for (k, v) in namedict.items():
with zf.open((k + '.npy'), 'w', force_zip64=True) as buf:
numpy.lib.npyio.format.write_array(buf, numpy.asanyarray(v), al... |
def ENSO_34(filepath, df_splits=None, get_ENSO_states: bool=True):
kwrgs_pp = {'selbox': (190, 240, (- 5), 5), 'format_lon': 'only_east', 'seldates': None}
ds = core_pp.import_ds_lazy(filepath, **kwrgs_pp)
dates = pd.to_datetime(ds.time.values)
data = functions_pp.area_weighted(ds).mean(dim=('latitude',... |
def make_mask():
H = 100
W = 100
mask = np.zeros([H, W], dtype=np.uint8)
for x in range(W):
for y in range(H):
d = np.linalg.norm(((np.array([W, H]) / 2) - np.array([x, y])))
if ((d > 10) and (d < 20)):
mask[(y, x)] = 1
return mask |
def process_idx(idx):
f = files[idx]
fname = f.split('/')[(- 1)].split('.')[0]
(x, sr) = sf.read(f)
min_clip_duration = int((sr * 1))
parts = []
if (len(x) < min_clip_duration):
x = replicate_if_needed(x, min_clip_duration)
parts.append(x)
else:
overlap = int((sr * 0.... |
_utils.test(debug=True)
def test_kernel_too_many():
def foo(a: ti.i32, b: ti.i32):
assert (a == 1)
assert (b == 2)
with pytest.raises(ti.TaichiSyntaxError, match='Too many arguments'):
foo(1, 2, 3) |
class YolosPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def register_Ns3Ssid_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::Ssid const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('std::string', 's')])
cls.add_method('DeserializeInformationField', 'uint8_t', [param('ns3::Buffer::Iterator',... |
_utils.test(arch=[ti.cpu, ti.cuda])
def test_real_func_tuple_ret_typing_tuple():
s0 = ti.types.struct(a=ti.math.vec3, b=ti.i16)
_func
def foo() -> Tuple[(ti.f32, s0)]:
return (1, s0(a=ti.math.vec3([100, 0.2, 3]), b=65537))
def bar() -> Tuple[(ti.f32, s0)]:
return foo()
(ret_a, ret_b)... |
def load_audio(path, sr, len_audio):
(audio, _) = librosa.load(path, sr=sr, mono=True, duration=len_audio, dtype=np.float32, res_type='kaiser_best')
total_samples = (sr * len_audio)
if (len(audio) < total_samples):
audio = np.repeat(audio, ((total_samples // len(audio)) + 1))[:total_samples]
ret... |
def all_reduce(tensor, op=reduce_op.SUM, group=group.WORLD):
assert (torch.distributed.deprecated._initialized == _INITIALIZED_PG), 'collective only supported in process-group mode'
return torch._C._dist_all_reduce(tensor, op, group) |
class BoxBetts(Benchmark):
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = ([0.9, 1.2], [9.0, 11.2], [0.9, 1.2])
self.global_optimum = [[1.0, 10.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
i = arange(1, 1... |
class ExceptionDescr(object):
def __init__(self, entry_point, finally_enter=None, finally_exit=None):
self.entry_point = entry_point
self.finally_enter = finally_enter
self.finally_exit = finally_exit |
def require_backends_available(backends):
def check(backend):
if (backend == dist.Backend.GLOO):
return dist.is_gloo_available()
if (backend == dist.Backend.NCCL):
return dist.is_nccl_available()
if (backend == dist.Backend.MPI):
return dist.is_mpi_availab... |
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.model = nn.Sequential(FirstResBlockDiscriminator(channels, DISC_SIZE, stride=2), ResBlockDiscriminator(DISC_SIZE, DISC_SIZE, stride=2), ResBlockDiscriminator(DISC_SIZE, DISC_SIZE), ResBlockDiscriminator(DI... |
class ResNet12(nn.Module):
def __init__(self, channels):
super().__init__()
self.inplanes = 3
self.layer1 = self._make_layer(channels[0])
self.layer2 = self._make_layer(channels[1])
self.layer3 = self._make_layer(channels[2])
self.layer4 = self._make_layer(channels[3]... |
class RepeatFactorTrainingSampler(Sampler):
def __init__(self, repeat_factors, *, shuffle=True, seed=None):
self._shuffle = shuffle
if (seed is None):
seed = comm.shared_random_seed()
self._seed = int(seed)
self._rank = comm.get_rank()
self._world_size = comm.get_... |
class IntegrationTestSnipsNLUEngine(SnipsTest):
def test_pure_python_engine_performance(self):
dataset_path = str(PERFORMANCE_DATASET_PATH)
results = compute_cross_val_metrics(dataset_path, engine_class=TrainingEngine, nb_folds=5, train_size_ratio=1.0, slot_matching_lambda=_slot_matching_lambda, pro... |
class GraphPlot(SageObject):
def __init__(self, graph, options):
for (k, value) in DEFAULT_PLOT_OPTIONS.items():
if (k not in options):
options[k] = value
self._plot_components = {}
self._nodelist = list(graph)
self._graph = graph
self._options = o... |
class MultiLoraInjectedLinear(nn.Module):
def __init__(self, in_features, out_features, bias=False, r=4, dropout_p=0.1, lora_num=1, scales=[1.0]):
super().__init__()
if (r > min(in_features, out_features)):
print(f'LoRA rank {r} is too large. setting to: {min(in_features, out_features)}'... |
class SequentialSampler(Sampler[int]):
data_source: Sized
def __init__(self, data_source: Sized) -> None:
self.data_source = data_source
def __iter__(self) -> Iterator[int]:
return iter(range(len(self.data_source)))
def __len__(self) -> int:
return len(self.data_source) |
def test_check_input8():
with pytest.raises(ValueError, match=('The validation_metrics are None,' + ' please make sure to give valid validation_metrics!')):
trainer = SingleObjectiveTrainer(dataHandler, model, correctness_loss, None, save_to_path, yaml_path)
trainer.train()
validation_metrics_tm... |
class LlamaLoraInt8Engine(CausalLoraEngine):
config_name: str = 'llama_lora_int8_engine'
def __init__(self, weights_path: Optional[Union[(str, Path)]]=None):
model_name = 'aleksickx/llama-7b-hf'
device_map = {'': int((os.environ.get('LOCAL_RANK') or 0))}
model = LlamaForCausalLM.from_pre... |
class InteractingLayer(nn.Module):
def __init__(self, in_features, att_embedding_size=8, head_num=2, use_res=True, seed=1024, device='cpu'):
super(InteractingLayer, self).__init__()
if (head_num <= 0):
raise ValueError('head_num must be a int > 0')
self.att_embedding_size = att_e... |
def _format_values(key: str, value: Any) -> str:
if (not isinstance(value, (int, float))):
return str(value)
if ('Memory' in key):
ind = 0
unit = dict(enumerate(['B', 'KB', 'MB', 'GB', 'TB'], 0))
while (value > 1024):
value /= 1024
ind += 1
return ... |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('ignore_border', [True, False])
.parametrize('channel_last', [False, True])
.parametrize('inshape, kernel, stride, pad', [((2, 3, 4, 6), (2, 2, 2), (1, 1, 2), (0, 1, 0)), ((2, 2, 3, 4, 6), (2, 2, 2), (2, 1, 1), (1, 0, 1)), ((2, 2, 2, 3, 4, 6)... |
class DoNothingAgent(Agent):
def __init__(self, observation_space, action_space):
self.action = ([0] * action_space.shape[0])
def act(self, observation):
return self.action |
class ClassifierTest(unittest.TestCase):
def setUpClass(cls):
cls.task1 = create_task('task1', module_suffixes=['A', 'A'])
cls.task2 = create_task('task2', module_suffixes=['B', 'B'])
cls.dataloader = create_dataloader('task1')
def setUp(self):
random.seed(123)
np.random.... |
def test_none_correct():
from pysad.evaluation import PrecisionMetric, AUPRMetric, AUROCMetric, RecallMetric
import numpy as np
metric_classes = {PrecisionMetric: 0.0, AUROCMetric: 0.0, RecallMetric: 0.0}
y_true = np.random.randint(0, 2, size=(25,), dtype=np.int32)
y_true[0] = 1
y_true[1] = 0
... |
def fft2(x, s=None, axes=((- 2), (- 1)), norm=None, overwrite_x=False, workers=None):
return fftn(x, s, axes, norm, overwrite_x, workers) |
def evaluate_default(data, inferred_lines):
metrics = data.Metrics(data)
for (inferred_code, infer_results) in load_from_lines(inferred_lines):
if ('index' in infer_results):
metrics.add(data[infer_results['index']], inferred_code)
else:
metrics.add(None, inferred_code, o... |
class tuneNDT(tune.Trainable):
def setup(self, config):
yacs_cfg = self.convert_tune_cfg(config)
self.epochs_per_generation = yacs_cfg.TRAIN.TUNE_EPOCHS_PER_GENERATION
self.warmup_epochs = yacs_cfg.TRAIN.TUNE_WARMUP
self.runner = Runner(config=yacs_cfg)
self.runner.load_devic... |
def _load_c2_pickled_weights(file_path):
with open(file_path, 'rb') as f:
if torch._six.PY3:
data = pickle.load(f, encoding='latin1')
else:
data = pickle.load(f)
if ('blobs' in data):
weights = data['blobs']
else:
weights = data
return weights |
class Attention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):
super().__init__()
inner_dim = (dim_head * heads)
context_dim = default(context_dim, query_dim)
self.scale = (dim_head ** (- 0.5))
self.heads = heads
self.t... |
.mpi
def test_isnot_commworld_0():
from mpi4py import MPI
comm = MPI.COMM_WORLD
def isnot_commworld_0(out: dace.bool[1]):
out[0] = (comm is MPI.COMM_WORLD)
res = np.zeros((1,), dtype=np.bool_)
isnot_commworld_0(res)
assert (res[0] == (comm is MPI.COMM_WORLD)) |
def singleton(items: List):
if (len(items) != 1):
raise ValueError(f'Expected 1 item, got {len(items)} items: {items}')
return items[0] |
def osnet_ibn_x1_0(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
model = OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[64, 256, 384, 512], loss=loss, IN=True, **kwargs)
if pretrained:
init_pretrained_weights(model, key='osnet_ibn_x1_0')
return mod... |
class Order_relative(Order):
def __init__(self, K, absolute_order):
self._absolute_order = absolute_order
self._module_rep = absolute_order._module_rep
Order.__init__(self, K)
def _element_constructor_(self, x):
x = self._K(x)
abs_order = self._absolute_order
to_a... |
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, NL='relu', same_padding=False, bn=False):
super(Conv2d, self).__init__()
padding = (int(((kernel_size - 1) / 2)) if same_padding else 0)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,... |
def test_tiny_range():
(cmd, meta_path, out_path) = prepare('samples_tiny_range/shard-{000000..000003}.tar', **tiny_kwargs)
print('running')
subprocess.run(cmd.split())
print('testing')
check(meta_path, out_path) |
def test_fit_digraph(digraph_logistic_regression):
classifiers = {'a': {'classifier': LogisticRegression()}}
digraph_logistic_regression.n_jobs = 2
nx.set_node_attributes(digraph_logistic_regression.hierarchy_, classifiers)
digraph_logistic_regression._fit_digraph(local_mode=True)
try:
check... |
class InPlaceABN(autograd.Function):
def forward(ctx, x, weight, bias, running_mean, running_var, training=True, momentum=0.1, eps=1e-05, activation=ACT_LEAKY_RELU, slope=0.01):
ctx.training = training
ctx.momentum = momentum
ctx.eps = eps
ctx.activation = activation
ctx.slop... |
def _find_missing_pyi(source_dir, target_dir):
source_pyis = _glob_pyis(source_dir)
target_pyis = _glob_pyis(target_dir)
missing_pyis = [os.path.join(source_dir, p) for p in (source_pyis - target_pyis)]
missing_pyis.sort()
return missing_pyis |
def skipIfOnGHA(fn):
(fn)
def wrapper(*args, **kwargs):
if ON_GHA:
raise unittest.SkipTest('Test disabled for GHA')
else:
fn(*args, **kwargs)
return wrapper |
def main():
N = 500
P = 500
S = 40
T = 100
nperms = 5000
fdr_threshold = 0.1
trial = int(sys.argv[1])
sample = int(sys.argv[2])
TRIAL_PATH = 'data/{}'.format(trial)
X_PATH = 'data/{}/X.csv'.format(trial)
Y_PATH = 'data/{}/Y.csv'.format(trial)
TRUTH_PATH = 'data/{}/truth.c... |
def test_broadcast_fail():
A = np.empty((5, 1))
B = np.empty((1, 6))
C = np.empty((5,))
D = np.empty((1,))
array_shapes = [arr.shape for arr in [A, B, C, D]]
try:
repl._broadcast(array_shapes)
except SyntaxError:
return
assert False |
def scale_images(filepaths, zoom_factor=(1, 1, 1), zoom_order=3, save_identifier=''):
for (num_file, filepath) in enumerate(filepaths):
print_timestamp('Processing file {0}/{1}: {2}', [(num_file + 1), len(filepaths), os.path.split(filepath)[1]])
img = io.imread(filepath)
img = zoom(img, zoom... |
class TestModel(TestCase):
def test_smoke(self):
mbtr = MBTR1(start=0, stop=4, num=5, geomf='count', weightf='unity', broadening=0.001, eindexf='noreversals', aindexf='noreversals', elems=[0, 1, 2, 3], flatten=True)
model = Model(representation=[mbtr, mbtr.get_config()], regression={'krr': {'kernel'... |
class PaddedScatterTest(parameterized.TestCase):
(*_PADDED_SCATTER_BENCHMARK)
def testPaddedScatter(self, sl, hs, ne, top_k):
x = torch.randn((sl, hs)).cuda().half()
top_expert = torch.randint(0, ne, ((sl * top_k),)).cuda().int()
(bin_ids, indices) = ops.sort(top_expert)
tokens_p... |
def parse_args():
parser = argparse.ArgumentParser(description='Initialize ADE20K dataset.', epilog='Example: python prepare_cityscapes.py', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', default=None, help='dataset directory on disk')
args = parser.parse_args(... |
def verify_checkpoint_directory(save_dir: str) -> None:
if (not os.path.exists(save_dir)):
os.makedirs(save_dir, exist_ok=True)
temp_file_path = os.path.join(save_dir, 'dummy')
try:
with open(temp_file_path, 'w'):
pass
except OSError as e:
print('| Unable to access ch... |
class BidirectionalRNNEncoder(object):
def __init__(self, config, mode):
self.params = copy.deepcopy(config)
if (mode != tf.contrib.learn.ModeKeys.TRAIN):
self.params['keep_prob'] = 1.0
LogInfo.logs('Show Bi-RNN param: %s', self.params)
def encode(self, inputs, sequence_lengt... |
def count_params(model):
num_params = 0.0
state_dict = model.state_dict()
for (k, v) in state_dict.items():
v_shape = v.shape
num_params += np.prod(v_shape)
print(('Number of Parameters = %.2f M' % (num_params / 1000000.0))) |
def iscomplex(var):
return (isscalar(var) and (var.get('typespec') in ['complex', 'double complex'])) |
def require_soundfile(test_case):
if (not is_soundfile_availble()):
return unittest.skip('test requires soundfile')(test_case)
else:
return test_case |
class VanPreTrainedModel(PreTrainedModel):
config_class = VanConfig
base_model_prefix = 'van'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
def _init_weights(self, module):
if isinstance(module, nn.Linear):
nn.init.trunc_normal_(module.weight, std=self.c... |
def parse_graph(core_parse):
assert isinstance(core_parse, CoreParse)
circle_dict = _get_circle_dict(core_parse)
line_graph = _get_line_graph(core_parse)
arc_graphs = {}
for (center_key, d) in circle_dict.iteritems():
for radius_key in d:
circle = circle_dict[center_key][radius_k... |
def setup_experiment(args):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.dry_run:
args.no_save = args.no_log = args.no_visual = True
return
args.experimen... |
def pdb_loader(dict, mode='train', path='../../split-by-protein/data/', dist_bar=6):
dataset = LMDBDataset((path + mode))
(x, pos, y) = ([], [], [])
for i in range(len(dataset)):
struct = dataset[i]
if (struct['label'] == 'A'):
y += [1]
key = 'atoms_active'
el... |
def add_prefix(log_dict: OrderedDict, prefix: str, divider=''):
with_prefix = OrderedDict()
for (key, val) in log_dict.items():
with_prefix[((prefix + divider) + key)] = val
return with_prefix |
def get_qac_raw_file_path(self, data_dir: str, data_type: str, split: str='1.0', ext: str='pkl') -> str:
file_path = f'{data_dir}/visdial_{split}_{data_type}_raw_text.{ext}'
return file_path |
def make_train_data(args):
xgrid = np.linspace((- 0.5), 0.5, args.fr_size, endpoint=False)
if (args.kernel_type == 'triangle'):
kernel_param = (args.triangle_slope / args.signal_dim)
else:
kernel_param = (args.gaussian_std / args.signal_dim)
return load_dataloader(args.n_training, signal... |
def mse(y_true, y_pred, weights=None):
return np.average(((y_true - y_pred) ** 2), weights=weights) |
def split_era(amr):
while True:
index = None
(year, era) = (None, None)
for (i, token) in enumerate(amr.tokens):
if re.search('^\\d{4}BC$', token):
index = i
(year, era) = (token[:4], token[4:])
break
else:
break... |
def load_network(network):
save_path = os.path.join('./model', name, ('net_%s.pth' % opt.which_epoch))
network.load_state_dict(torch.load(save_path))
return network |
def mock_docx_file():
with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.docx') as f:
document = docx.Document()
document.add_paragraph(plain_text_str)
document.save(f.name)
return f.name |
def __getspaninfo(lnode, rnode):
try:
eduspan = (lnode.eduspan[0], rnode.eduspan[1])
except TypeError:
print(lnode.prop, rnode.prop)
print(lnode.nucspan, rnode.nucspan)
return eduspan |
class Subset(Dataset):
def __init__(self, dataset, indices):
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices) |
def get_train_val_loader(train_year: Union[(str, int)], valid_year: Union[(str, int)], split: int, batch_size: int, n_comb: int, root: str=C.ROOT, num_workers: Optional[int]=None) -> Tuple[(Any, Any)]:
label_dir_name = f'{train_year}-{valid_year}-split{split}'
iqon_outfits = IQONOutfits(train_year=train_year, v... |
class MultiEpochsDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._DataLoader__initialized = False
self.batch_sampler = _RepeatSampler(self.batch_sampler)
self._DataLoader__initialized = True
self.iterator = ... |
class ConvTranspose1d(_ConvTransposeNd):
__doc__ = 'Applies a 1D transposed convolution operator over an input image\n composed of several input planes.\n\n This module can be seen as the gradient of Conv1d with respect to its input.\n It is also known as a fractionally-strided convolution or\n a deconv... |
def box_transform(box):
x = box[2]
y = box[0]
w = ((box[3] - box[2]) + 1)
h = ((box[1] - box[0]) + 1)
return [x, y, w, h] |
def test_spinner_can_be_used_as_context_manager():
with Spinner() as spinner:
assert (spinner.running == True)
assert (spinner.running == False) |
def get_pip_packages(run_lambda):
def run_with_pip(pip):
if (get_platform() == 'win32'):
system_root = os.environ.get('SystemRoot', 'C:\\Windows')
findstr_cmd = os.path.join(system_root, 'System32', 'findstr')
grep_cmd = '{} /R "numpy torch"'.format(findstr_cmd)
e... |
class RNNTransducer(TransducerModel):
def __init__(self, num_classes: int, input_dim: int, num_encoder_layers: int=4, num_decoder_layers: int=1, encoder_hidden_state_dim: int=320, decoder_hidden_state_dim: int=512, output_dim: int=512, rnn_type: str='lstm', bidirectional: bool=True, encoder_dropout_p: float=0.2, de... |
def test_from_networkx_weights():
expected_nodes = {'a': pd.DataFrame(columns=range(0), index=[0, 2, 1])}
x_edges = [(0, 2, {'w': 2.0}), (0, 2), (1, 2), (1, 2)]
xs = len(x_edges)
y_edges = [(0, 1, {'w': 3.0, 'e': 'y'})]
ys = len(y_edges)
def df_edge(edge_tuple):
(src, tgt) = edge_tuple[:... |
def test_named_record_int32_float64_parameters():
t = RecordType([NumpyType('int32'), NumpyType('float64')], None, {'__record__': 'Name', 'p': [123]})
assert (str(parser.parse(str(t))) == str(t)) |
class Track():
def __init__(self):
self.dates = []
self.categories = []
self.latitudes = []
self.longitudes = []
self.windspeeds = []
self.pressures = []
self.stormid = 0
self.Ninstants = 0
self.month = None
self.maxcategory = 0
def... |
def register_Ns3CallbackImpl__Void_Ns3Time_Ns3Time_Ns3WifiPhyState_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Time, ns3::Time, ns3::WifiPhy::State, ns3::empty, ns3::empty, ns3::empty, ns3::emp... |
def calc_f1_score(answers, prediction):
f1_scores = []
for ans in answers:
ans_segs = mixed_segmentation(ans, rm_punc=True)
prediction_segs = mixed_segmentation(prediction, rm_punc=True)
(lcs, lcs_len) = find_lcs(ans_segs, prediction_segs)
if (lcs_len == 0):
f1_scores... |
.parametrize('slate_id, reward, pscore_item_position, position, evaluation_policy_pscore_item_position, description', invalid_input_of_iips)
def test_iips_using_invalid_input_data(slate_id, reward, pscore_item_position, position, evaluation_policy_pscore_item_position, description) -> None:
with pytest.raises(Value... |
_kwargs(**val_custom_params)
_fl_task(model='model', data_loader='val_loader', device='device')
def validate(model, val_loader, device, criterion, task, acc_fn):
val_loader = tqdm.tqdm(val_loader, desc='validate')
model.eval()
model.to(device)
val_score = 0
total_samples = 0
total_loss = []
... |
def grouper(iterable, n, fillvalue=None):
args = ([iter(iterable)] * n)
return itertools.zip_longest(*args, fillvalue=fillvalue) |
class Node(pretty.PrettyRepr):
__metaclass__ = MetaNode
def pretty(self):
args = (getattr(self, s) for s in self._allslots)
return pretty.pfun(type(self).__name__, args)
def args(self):
return tuple((getattr(self, s) for s in self._allslots))
def copy(self, **kws):
args =... |
class Zef_3d(Evaluator):
def __init__(self):
self.type = 'ZF3D'
def eval(self):
print('Check prediction files')
error_message = ''
for pred_file in self.tsfiles:
df = pd.read_csv(pred_file, header=None)
count = df.groupby([0, 1]).size().reset_index(name='c... |
class TreeNode(object):
def __init__(self, is_leaf, tag=None, token=None):
self.tag = tag
self.is_leaf = is_leaf
self.token = token
self.children_nodes = []
self.parent_index = None
self.node_index = None
self.leaf_node_index_seq = [] |
class PermutationLayer(TransformerLayer):
def __init__(self, permuted_indices: np.ndarray):
super(PermutationLayer, self).__init__(units=permuted_indices.shape[0])
self.units = permuted_indices.shape[0]
self.permuted_indices = np.asarray(permuted_indices, dtype=np.long)
self.inv_perm... |
def register_Ns3DataCollector_methods(root_module, cls):
cls.add_constructor([param('ns3::DataCollector const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddDataCalculator', 'void', [param('ns3::Ptr< ns3::DataCalculator >', 'datac')])
cls.add_method('AddMetadata', 'void', [param('std::string',... |
def create_functions(deployment, cache_client, code_package, experiment_config, benchmark, language, memory, times_begin_idx, times_end_idx, sleep_time, extend=None):
times = [1, 2, 4, 8, 15, 30, 60, 120, 180, 240, 300, 360, 480, 600, 720, 900, 1080, 1200, 420, 540, 660, 780, 840, 960, 1020, 1140, 1260, 1320]
i... |
def calc_loss_and_acc(logits, labels, loss_type, loss_func):
bs = labels.size(0)
if (loss_type == 'margin_rank'):
num_choice = logits.size(1)
flat_logits = logits.view((- 1))
correct_mask = F.one_hot(labels, num_classes=num_choice).view((- 1))
correct_logits = flat_logits[(correc... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.