code stringlengths 101 5.91M |
|---|
class KGEmbedding():
def __init__(self, device):
self.device = device
self.emb = None
self.is_train = False
def init(self, emb_init, lr, async_threads, num=(- 1), dim=(- 1)):
if (self.emb is None):
self.emb = th.empty(num, dim, dtype=th.float32, device=self.device)
... |
def cross(cp, size):
_check_params(len(cp), size)
crossings = 0
last_sample = 0
for sample in _get_samples(cp, size):
if ((sample <= 0 < last_sample) or (sample >= 0 > last_sample)):
crossings += 1
last_sample = sample
return crossings |
def common_part_of_commuters(values1, values2, numerator_only=False):
if numerator_only:
tot = 1.0
else:
tot = (np.sum(values2) + np.sum(values2))
if (tot > 0):
return ((2.0 * np.sum(np.minimum(values1, values2))) / tot)
else:
return 0.0 |
def test__loss_function():
data = pd.DataFrame({'1': [float(i) for i in range(1000)], '2': [float((2 * i)) for i in range(1000)]})
tvae = TVAE(epochs=300)
tvae.fit(data)
num_samples = 1000
sampled = tvae.sample(num_samples)
error = 0
for (_, row) in sampled.iterrows():
error += abs((... |
def changeTwoStar(G, A, i):
return (((G.degree(i) * (G.degree(i) - 1)) / 2.0) if (G.degree(i) > 1) else 0) |
class TestNativeFunctions(TestCase):
def do_test_optional_floatlist_with_module(self, module):
values = torch.tensor([1.5, 2.5], dtype=torch.float)
returned = module(values, None)
self.assertEqual(values, returned)
values[0] = 3.5
self.assertEqual(values, returned)
re... |
class TLogRegPredict(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_snap.TLogRegPredict_swiginit(self, _snap.new_TLogRegPredict(*args))
def Load(SIn):
return _snap.TLog... |
class TestTensorBoardUtils(BaseTestCase):
def test_to_HWC(self):
test_image = np.random.randint(0, 256, size=(3, 32, 32), dtype=np.uint8)
converted = convert_to_HWC(test_image, 'chw')
self.assertEqual(converted.shape, (32, 32, 3))
test_image = np.random.randint(0, 256, size=(16, 3, 3... |
def main(languages, args=None, print_generated=True):
options = parse_args(languages, args)
package_map = parse_lcmtypes(options.source_path, verbose=options.verbose, print_debug_tokens=options.debug_tokens, cache_parser=True, include_source_paths=(not options.no_source_paths))
packages = list(package_map.v... |
def test_bytes_primitive_statement_random_insertion(test_case_mock):
sample = list(b'Test')
result = stmt.BytesPrimitiveStatement._random_insertion(sample)
assert (len(result) >= len(sample)) |
def filter_broken_tags(train_sentences):
return [x for x in train_sentences if (not any(((y[1] is None) for y in x)))] |
def run_gemver(device_type: dace.dtypes.DeviceType):
N = sizes['small']
(alpha, beta, A, u1, v1, u2, v2, w, x, y, z) = initialize(N)
A_ref = np.copy(A)
w_ref = np.copy(w)
x_ref = np.copy(x)
if (device_type in {dace.dtypes.DeviceType.CPU, dace.dtypes.DeviceType.GPU}):
sdfg = gemver_kernel... |
class CaptureVariable(Capture):
value = None
name = None
calculated_value = None
names_idx = 0
def __init__(self, value, ctx):
self.ctx = ctx
self.value = value
self.name = ('var_%s' % CaptureVariable.names_idx)
CaptureVariable.names_idx += 1
self.ctx['variabl... |
def S2():
var('x y z')
e = ((((x ** sin(x)) + (y ** cos(y))) + (z ** (x + y))) ** 100)
t1 = clock()
f = e.expand()
t2 = clock()
return (t2 - t1) |
def forward_step(self, model_output, timestep: int, sample):
if (self.num_inference_steps is None):
raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler")
prev_timestep = (timestep - (self.config.num_train_timesteps / self.num_inference_step... |
class OneColorBreakoutWorld(BreakoutWorld):
ball_class = WhiteBall
paddle_class = WhitePaddle
brick_class = WhiteBrick |
class TFXLNetLMHeadModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def parse_common_args():
parser = argparse.ArgumentParser(description='Common arguments')
parser.add_argument('--data', type=str, default='mtop', help='Type of dataset to train')
parser.add_argument('--max_epochs', type=int, default=100, help='Total number of epochs to train')
parser.add_argument('--deb... |
class ParamProduction(Production):
_param_id: int
def __init__(self, id: int, lhs: ValueType, param_id: int):
super().__init__(id, lhs)
if (not isinstance(lhs, ValueType)):
raise ValueError('LHS of ParamProduction must be a value type')
self._param_id = param_id
def rhs(s... |
def try_real_annotations(fn, loc):
try:
sig = inspect.signature(fn)
except ValueError:
return None
all_annots = ([sig.return_annotation] + [p.annotation for p in sig.parameters.values()])
if all(((ann is sig.empty) for ann in all_annots)):
return None
def as_ann(ann):
... |
def demo_preprocess(args, example, vocabs=None, schema_graph=None):
(text_tokenize, program_tokenize, post_process, tu) = tok.get_tokenizers(args)
if (not schema_graph):
schema_graphs = load_schema_graphs(args)
schema_graph = schema_graphs.get_schema(example.db_id)
schema_graph.lexicalize_gr... |
_properties
class HorizontalEinsumFusion(transformation.SingleStateTransformation):
top = transformation.PatternNode(donnx.ONNXEinsum)
access = transformation.PatternNode(nodes.AccessNode)
bot = transformation.PatternNode(donnx.ONNXEinsum)
allow_nonblas = Property(dtype=bool, default=False, desc='Allow ... |
_properties
class NestSDFG(transformation.MultiStateTransformation):
promote_global_trans = Property(dtype=bool, default=False, desc='Promotes transients to be allocated once')
def annotates_memlets():
return True
def expressions(cls):
return [nx.DiGraph()]
def can_be_applied(self, graph... |
def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_e1_c16_noskip'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k5_s2_e3_c40_se0.25'], ['ir_r4_k3_s2_e6_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['ir_r1_k3_s1_e6_c320']]
with layer_config_k... |
def matrix_similarity_classes_length_two(n, q=None, selftranspose=False, invertible=False):
if (q is None):
q = FractionField(QQ['q']).gen()
return sum([(tau.number_of_classes(invertible=invertible, q=q) * ext_orbits(tau, q=q, selftranspose=selftranspose)) for tau in SimilarityClassTypes(n)]) |
class PBWBasisCrossProduct(CombinatorialFreeModule):
def __init__(self, base_ring):
I = IndexedFreeAbelianMonoid(['x', 'y', 'z'], prefix='U')
CombinatorialFreeModule.__init__(self, base_ring, I, bracket=False, prefix='', sorting_key=self._sort_key, category=FilteredAlgebrasWithBasis(base_ring))
... |
def OA_20_416():
from sage.rings.finite_rings.finite_field_constructor import FiniteField
Z = None
A = [[(0, Z), (0, Z), (0, Z), (0, Z), (0, Z), (0, Z), (0, Z), (0, Z), (0, Z), (0, Z), (0, Z), (0, Z), (0, Z), (0, Z), (1, Z), (4, Z), (9, Z), (3, Z), (12, Z)], [(0, Z), (1, Z), (2, 18), (3, 2), (4, 20), (5, 22... |
def get_ckpt_path_from_folder(folder) -> str:
ckpts = []
allowed_ckpt_types = [f'*{ext}' for ext in ALLOWED_CHECKPOINT_EXTS]
for ckpt_type in allowed_ckpt_types:
ckpts.extend(glob.glob(os.path.join(folder, ckpt_type)))
assert (len(ckpts) == 1), "None or multiple checkpoints files. MMF doesn't kn... |
def test_get_dependencies_chained(default_test_case, function_mock):
unused_float = st.FloatPrimitiveStatement(default_test_case, 5.5)
default_test_case.add_statement(unused_float)
float0 = st.FloatPrimitiveStatement(default_test_case, 5.5)
default_test_case.add_statement(float0)
func0 = st.Function... |
class _NeuralF(torch.nn.Module):
def __init__(self, width, oscillate):
super(_NeuralF, self).__init__()
self.linears = torch.nn.Sequential(torch.nn.Linear(2, width), torch.nn.Tanh(), torch.nn.Linear(width, 2), torch.nn.Tanh())
self.nfe = 0
self.oscillate = oscillate
def forward(s... |
def select_salient_terms(corpus_w_svo_pickle, verb_freq_file, all_lemma_freq_file, spacy_model, min_verb_freq, top_verb_ratio, min_obj_freq, top_obj_ratio):
print('Loading Spacy model...')
nlp = spacy.load(spacy_model)
nlp.tokenizer = WhitespaceTokenizer(nlp.vocab)
print('Loading Corpus...')
with op... |
.parametrize('observation_shape', [(100,), (4, 84, 84), ((100,), (200,))])
.parametrize('scalers', [None, 'min_max'])
def test_iql(observation_shape: Shape, scalers: Optional[str]) -> None:
(observation_scaler, action_scaler, reward_scaler) = create_scaler_tuple(scalers, observation_shape)
config = IQLConfig(ac... |
class AliveTest(FileBasedTest):
def __init__(self):
self.regex = re.compile(';\\s*(ERROR:.*)')
self.regex_args = re.compile(';\\s*TEST-ARGS:(.*)')
def execute(self, test, litConfig):
test = test.getSourcePath()
cmd = ['python', 'run.py', test]
input = readFile(test)
... |
def extract_index_access(baseviewer, subviewer, indices):
(tensorlib, _) = get_backend()
index_selection = []
stitched = None
indices_concatenated = None
if subviewer:
index_selection = baseviewer.split(indices, selection=subviewer.names)
stitched = subviewer.stitch(index_selection)
... |
class TrackableObject():
def __init__(self, objectID, centroid):
self.objectID = objectID
self.centroids = [centroid]
self.counted = False |
def detokenize(sents, reverse_vocab):
def detok_sent(sent):
outsent = ''
for t in sent:
if (t >= len(nlc_data._START_VOCAB)):
outsent += reverse_vocab[t]
return outsent
return [detok_sent(s) for s in sents] |
def evaluation(model, dataloader, device):
model.eval()
sim_tensor = torch.tensor([], device=device)
label_array = np.array([])
with torch.no_grad():
for (source, target, label) in dataloader:
source_input_ids = source.get('input_ids').squeeze(1).to(device)
source_attenti... |
def assign_gpu_idx(num_parallels, num_gpus):
if isinstance(num_gpus, str):
num_gpus = len(num_gpus.strip().split(','))
idxs = list(range(num_parallels))
gpu_idxs = [(_i % num_gpus) for _i in idxs]
return (idxs, gpu_idxs) |
class ClassificationHead(nn.Module):
def __init__(self, in_channels, num_classes):
super(ClassificationHead, self).__init__()
self.classifier = nn.Linear(in_channels, num_classes)
def forward(self, x):
return self.classifier(x) |
def test_skip_non_negated_headers(empty_open_api_3_schema):
empty_open_api_3_schema['paths'] = {'/test': {'get': {'parameters': [{'in': 'header', 'name': 'If-Modified-Since', 'schema': {'type': 'string'}}], 'responses': {'200': {'description': ''}}}}}
schema = schemathesis.from_dict(empty_open_api_3_schema, dat... |
def save_obj(obj, name, save_dir):
if (not os.path.exists(save_dir)):
os.makedirs(save_dir)
objfile = (((save_dir.rstrip('\\/') + '/') + name) + '.pkl')
with open(objfile, 'wb') as f:
pk.dump(obj, f, pk.HIGHEST_PROTOCOL) |
class ResNetPreTrainedModel(PreTrainedModel):
config_class = ResNetConfig
base_model_prefix = 'resnet'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
def _init_weights(self, module):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight,... |
class Optional(ParseElementEnhance):
def __init__(self, expr, default=_optionalNotMatched):
super(Optional, self).__init__(expr, savelist=False)
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
(loc, token... |
def spatial_bn(model, blob_in, blob_out, dim_in, init_scale=1.0, init_bias=0.0, ScaleInitializer=None, BiasInitializer=None, RunningMeanInitializer=None, RunningVarianceInitializer=None, order='NCHW', **kwargs):
blob_out = (blob_out or model.net.NextName())
if model.init_params:
scale_init = ('ConstantF... |
def pad_tensor_dict(tensor_dict, max_len):
keys = list(tensor_dict.keys())
ret = dict()
for k in keys:
if isinstance(tensor_dict[k], dict):
ret[k] = pad_tensor_dict(tensor_dict[k], max_len)
else:
ret[k] = pad_tensor(tensor_dict[k], max_len)
return ret |
class DirectedGraph(object):
def __init__(self):
self._vertices = set()
self._forwards = {}
self._backwards = {}
def __iter__(self):
return iter(self._vertices)
def __len__(self):
return len(self._vertices)
def __contains__(self, key):
return (key in self.... |
()
def dummy_network():
return nn.Sequential(nn.Flatten(), nn.AdaptiveAvgPool1d(output_size=10), nn.Linear(10, 5)) |
class MSAFNet(nn.Module):
def __init__(self, model_param):
super(MSAFNet, self).__init__()
self.msaf_locations = {'video': [], 'audio': []}
self.msaf = nn.ModuleList([])
self.num_msaf = len(self.msaf)
self.fc = nn.Linear(3712, 8)
if ('video' in model_param):
... |
def register_Ns3EdcaParameterSetValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::EdcaParameterSet const &', 'value')])
cls.add_constructor([param('ns3::EdcaParameterSetValue const &', 'arg0')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_cons... |
def gumbel_max_sample(x):
z = np.random.gumbel(loc=0, scale=1, size=x.shape)
return np.nanargmax((x + z)) |
def set_log(args):
res_dir = os.path.join(args.res_save_dir, 'normals')
if (not os.path.exists(res_dir)):
os.makedirs(res_dir)
suffix = (('-mr' + '_'.join([str(mr) for mr in args.missing_rate])) if (args.diff_missing is not None) else f'-mr{args.missing_rate[0]}')
log_file_path = os.path.join(re... |
def json_prec_dump(data, prec=6):
return json.dumps(json.loads(json.dumps(data), parse_float=(lambda x: round(float(x), prec)))) |
def strip_config_spec(config_spec):
if ('__class__' in config_spec):
del config_spec['__class__']
return config_spec |
def test_hub_metadata(request, save_path):
hm = HubMetadata('0.17.4', '0.8.0', 'SCVI')
assert (hm.scvi_version == '0.17.4')
assert (hm.anndata_version == '0.8.0')
assert (hm.training_data_url is None)
assert (hm.model_parent_module == 'scvi.model')
d = {'scvi_version': '0.15.4', 'anndata_version... |
def removeAllapostrophe(string):
string = string.replace('.', '')
string = string.replace(',', '')
string = string.replace('_', ' ')
string = string.replace('?', '')
string = string.replace('"', '')
string = string.replace('/', ' ')
string = string.replace('\\', '')
string = string.repla... |
class Decoder(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, output_size, num_layers, p):
super(Decoder, self).__init__()
self.dropout = nn.Dropout(p)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size... |
class RteProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, 'train.jsonl'), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, 'val.jsonl'), 'dev')
def get_test_examples(self, d... |
def text_stats(posts):
return [{'length': len(text), 'num_sentences': text.count('.')} for text in posts] |
class PretrainDataset(Dataset):
def set_epoch(self, epoch: int) -> None:
self.epoch = epoch |
def test__upgrade_constraints_no_constraints():
old_metadata = {'fields': {}}
new_constraints = _upgrade_constraints(old_metadata)
assert (new_constraints is None) |
class TorchSTFT(nn.Module):
def __init__(self, n_fft: int=4096, n_hop: int=1024, center: bool=False, window: Optional[nn.Parameter]=None):
super(TorchSTFT, self).__init__()
if (window is None):
self.window = nn.Parameter(torch.hann_window(n_fft), requires_grad=False)
else:
... |
def file_check(file_name):
path = os.path.dirname(file_name)
if (not os.path.exists(path)):
os.makedirs(path) |
class StackFilter(Filter):
def __init__(self, length):
self.stack = deque(maxlen=length)
def reset(self):
self.stack.clear()
def __call__(self, x, update=True):
self.stack.append(x)
while (len(self.stack) < self.stack.maxlen):
self.stack.append(x)
return n... |
def compute_metrics(predictions, references, xlingual=False):
assert (len(predictions) == len(references)), f"# of predictions {len(predictions)} doesn't match # of references {len(references)}."
(exact_match, rouge1, rougeL) = (0, 0, 0)
for (pred, gold) in zip(predictions, references):
if (END_SEQ ... |
def main(args):
random.seed(12345)
Queries = OrderedDict()
print_message(f'#> Loading queries from {args.input}..')
with open(args.input) as f:
for line in f:
(qid, query) = line.strip().split('\t')
assert (qid not in Queries)
Queries[qid] = query
'\n A... |
def get_images(filename):
x = misc.imread(filename)
x = misc.imresize(x, size=[299, 299])
return x |
def efficientnet_el(pretrained=False, **kwargs):
model = _gen_efficientnet_edge('efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model |
def get_test_transform():
test_transform = [albu.Normalize()]
return albu.Compose(test_transform) |
class LeftOversCollator():
def __init__(self, tokenizer, device, max_segment_len):
self.tokenizer = tokenizer
self.device = device
self.max_segment_len = max_segment_len
def __call__(self, batch):
batch = self.tokenizer.pad(batch)
batch['leftovers'] = {'input_ids': [], 'a... |
.unit
def test_backpressure_queue():
helpers.setup()
pbar_ref = (0, u.MockQueue(helpers.MockTQDM()))
n_parallel_jobs = 1
f_args = [[None], [None], [None]]
hit_all_queue = [False, False, False]
wait_one = [True]
def wait_f(in_progress: List[Any]):
still_running = (in_progress[1:] if (... |
def load_glove(data_dir_path=None):
if (data_dir_path is None):
data_dir_path = 'very_large_data'
download_glove(data_dir_path)
_word2em = {}
glove_model_path = (((data_dir_path + '/glove.6B.') + str(GLOVE_EMBEDDING_SIZE)) + 'd.txt')
file = open(glove_model_path, mode='rt', encoding='utf8')
... |
def sharp_if(extr, testValue, valueIfTrue, valueIfFalse=None, *args):
if testValue.strip():
valueIfTrue = extr.expand(valueIfTrue.strip())
if valueIfTrue:
return valueIfTrue
elif valueIfFalse:
return extr.expand(valueIfFalse.strip())
return '' |
.mujoco
def test_set_task_task_sampler_half_cheetah_vel_env():
tasks = task_sampler.SetTaskSampler(HalfCheetahVelEnv)
assert (tasks.n_tasks is None)
updates = tasks.sample(10)
envs = [update() for update in updates]
action = envs[0].action_space.sample()
rewards = [env.step(action)[1] for env in... |
class HRModule(BaseModule):
def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), block_init_cfg=None, init_cfg=None):
super(HRModule, self).__init__(init_cfg)
self.block_init_cfg = block_init_c... |
class _SplitDataset(torch.utils.data.Dataset):
def __init__(self, underlying_dataset, keys, train_flag):
super(_SplitDataset, self).__init__()
self.underlying_dataset = underlying_dataset
self.keys = keys
self.train_flag = train_flag
def __getitem__(self, key):
self.under... |
class MpoImageFile(JpegImagePlugin.JpegImageFile):
format = 'MPO'
format_description = 'MPO (CIPA DC-007)'
_close_exclusive_fp_after_loading = False
def _open(self):
self.fp.seek(0)
JpegImagePlugin.JpegImageFile._open(self)
self._after_jpeg_open()
def _after_jpeg_open(self, m... |
def get_arg_return_types_from_interface(module_interface):
assert getattr(module_interface, '__torch_script_interface__', False), 'Expect a TorchScript class interface decorated by .interface.'
qualified_name = torch._jit_internal._qualified_name(module_interface)
cu = torch.jit._state._python_cu
module... |
def plot_results(testmus, cls_obs, cls_exp, test_size=0.05):
plt.plot(mutests, cls_obs, c='k')
for (i, c) in zip(range(5), ['grey', 'grey', 'grey', 'grey', 'grey']):
plt.plot(mutests, cls_exp[i], c=c)
plt.plot(testmus, ([test_size] * len(testmus)), c='r')
plt.ylim(0, 1) |
class ActiveLeaf(metaclass=ABCMeta):
def new_nominal_attribute_observer():
pass
def new_numeric_attribute_observer():
pass
def attribute_observers(self):
try:
return self._attribute_observers
except AttributeError:
self._attribute_observers = {}
... |
def get_sched_predictor(optimizer, sched_creator_cls, **kw):
n_param_groups = len(optimizer.param_groups)
lrs = [pg['lr'] for pg in optimizer.param_groups]
d = {'lrs': lrs, 'sched_creator_cls': sched_creator_cls, 'n_param_groups': n_param_groups}
d = {**d, **kw}
return SchedulerPredictor(**d) |
def test_evaluate_prequential_classifier(tmpdir, test_path):
stream = RandomTreeGenerator(tree_random_state=23, sample_random_state=12, n_classes=4, n_cat_features=2, n_num_features=5, n_categories_per_cat_feature=5, max_tree_depth=6, min_leaf_depth=3, fraction_leaves_per_level=0.15)
nominal_attr_idx = [x for x... |
class DotWriter():
def __init__(self, file):
self.file = file
self._write('graph G {\n')
def _write(self, string):
self.file.write(string)
def add_vertex(self, id):
self._write(' {};\n'.format(id))
def add_edge(self, src, dest):
self._write(' {} -- {};\n'.format... |
def _command_line_ok(_cache=None):
if _cache:
return _cache[0]
elif (_cache is None):
_cache = []
ok = True
display_opts = [('--' + n) for n in Distribution.display_option_names]
for o in Distribution.display_options:
if o[1]:
display_opts.append(('-' + o[1]))
... |
class TestClassifierData():
def test_read_data(self, train_file):
train_set = data.read_dataset(str(train_file), WVType.OTHER, 1)
assert (len(train_set) == 60)
def test_read_data_with_trees(self, train_file, train_file_with_trees):
train_trees_set = data.read_dataset(str(train_file_with_... |
class FreeScale(object):
def __init__(self, size):
self.size = tuple(reversed(size))
def __call__(self, img, mask):
assert (img.size == mask.size)
return (img.resize(self.size, Image.BILINEAR), mask.resize(self.size, Image.NEAREST)) |
def test_random_under_sampling_datetime():
pd = pytest.importorskip('pandas')
X = pd.DataFrame({'label': [0, 0, 0, 1], 'td': ([datetime.now()] * 4)})
y = X['label']
rus = RandomUnderSampler(random_state=0)
(X_res, y_res) = rus.fit_resample(X, y)
pd.testing.assert_series_equal(X_res.dtypes, X.dty... |
def add_distributed_training_args(parser):
group = parser.add_argument_group('Distributed training')
group.add_argument('--distributed-world-size', type=int, metavar='N', default=torch.cuda.device_count(), help='total number of GPUs across all nodes (default: all visible GPUs)')
group.add_argument('--distri... |
class TestCorefReader(AllenNlpTestCase):
def setUp(self):
super(TestCorefReader, self).setUp()
self.span_width = 5
def test_read_from_file(self):
conll_reader = ConllCorefReader(max_span_width=self.span_width)
dataset = conll_reader.read('tests/fixtures/data/coref/sample.gold_con... |
def change_strides_test():
sdfg = dace.SDFG('change_strides_test')
N = dace.symbol('N')
M = dace.symbol('M')
sdfg.add_array('A', [N, M], dace.float64)
sdfg.add_array('B', [N, M, 3], dace.float64)
state = sdfg.add_state()
(task1, mentry1, mexit1) = state.add_mapped_tasklet(name='map1', map_ra... |
class IntModel(nn.Module):
def __init__(self, head, body, classifier, block_setting):
super(IntModel, self).__init__()
self.block_setting = block_setting
self.head = head
if getattr(FLAGS, 'quant_maxpool', False):
self.head[(- 1)] = FXQMaxPool2d(self.head[(- 1)].kernel_si... |
class MaskedCrossEntropyLayer(torch.nn.Module):
def __init__(self):
super(MaskedCrossEntropyLayer, self).__init__()
self.epsilon = 1e-08
def forward(self, y_pred, targets, seq_mask, weight=None):
shape = y_pred.size()
label_size = shape[(- 1)]
y_pred = y_pred.view((- 1), ... |
def _pixel_num(partial_primitives, pixels_dict):
if (len(partial_primitives) == 0):
return 0
num = sum((len(pixels_dict[key]) for key in partial_primitives))
return num |
def start_collab_storyline(system_id, topic, storyline, kw_temp, dedup, max_len):
worker_request = {'action': 'collab_storyline', 'topic': topic, 'storyline': storyline, 'kw_temp': kw_temp, 'dedup': dedup, 'max_len': max_len}
request_queues[system_id].put(worker_request) |
class Model(rf.Module):
def __init__(self, in_dim: Dim, encoder_in_dim: Dim, *, num_enc_layers: int=12, target_dim: Dim, eos_idx: int, bos_idx: int, enc_model_dim: Dim=Dim(name='enc', dimension=512), enc_ff_dim: Dim=Dim(name='enc-ff', dimension=2048), enc_att_num_heads: int=4, enc_conformer_layer_opts: Optional[Dic... |
class A001221(SloaneSequence):
def __init__(self):
SloaneSequence.__init__(self, offset=1)
def _repr_(self):
return 'Number of distinct primes dividing n (also called omega(n)).'
def _eval(self, n):
return len(arith.prime_divisors(n)) |
class TestGPTJWindowService():
def setup_method(self):
self.path: str = tempfile.mkdtemp()
service: TokenizerService = get_tokenizer_service(self.path)
self.window_service = WindowServiceFactory.get_window_service('together/gpt-j-6b', service)
def teardown_method(self, method):
s... |
class LSTM(nn.Module):
def __init__(self, n_in, n_hidden, n_out):
super(LSTM, self).__init__()
self.rnn = nn.LSTM(n_in, n_hidden, bidirectional=True, batch_first=True)
self.linear = nn.Linear((2 * n_hidden), n_out)
def forward(self, x):
if (type(x) is not PackedSequence):
... |
class MultiscaleDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, n_frames=16, norm_layer=nn.InstanceNorm2d, num_D=2, getIntermFeat=True):
super(MultiscaleDiscriminator, self).__init__()
self.num_D = num_D
self.n_layers = n_layers
self.getIntermFeat = getInte... |
class BidirectionalGRU(nn.Module):
def __init__(self, rnn_dim, hidden_size, dropout, batch_first):
super(BidirectionalGRU, self).__init__()
self.BiGRU = nn.GRU(input_size=rnn_dim, hidden_size=hidden_size, num_layers=1, batch_first=batch_first, bidirectional=True)
self.layer_norm = nn.LayerNo... |
def save_results(result, out_dir, img_name, score_thr=0.3):
assert ('boundary_result' in result)
assert ((score_thr > 0) and (score_thr < 1))
txt_file = gen_target_path(out_dir, img_name, '.txt')
valid_boundary_res = [res for res in result['boundary_result'] if (res[(- 1)] > score_thr)]
lines = [','... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.