code stringlengths 101 5.91M |
|---|
def is_float(x):
try:
float(x)
return True
except ValueError:
return False |
def gap_console():
from sage.repl.rich_output.display_manager import get_display_manager
if (not get_display_manager().is_in_terminal()):
raise RuntimeError('Can use the console only in the terminal. Try %%gap magics instead.')
(cmd, _) = gap_command(use_workspace_cache=False)
cmd += (' ' + os.p... |
def root_scalar(f, args=(), method=None, bracket=None, fprime=None, fprime2=None, x0=None, x1=None, xtol=None, rtol=None, maxiter=None, options=None):
if (not isinstance(args, tuple)):
args = (args,)
if (options is None):
options = {}
is_memoized = False
if ((fprime2 is not None) and (no... |
class BatchMasking(Data):
def __init__(self, batch=None, **kwargs):
super(BatchMasking, self).__init__(**kwargs)
self.batch = batch
def from_data_list(data_list):
keys = [set(data.keys) for data in data_list]
keys = list(set.union(*keys))
assert ('batch' not in keys)
... |
def to_2d_array_level(file_name):
level = []
with open(file_name, 'r') as f:
rows = f.readlines()
for row in rows:
new_row = []
for char in row:
if (char != '\n'):
new_row.append(TILES_MAP[char])
level.append(new_row)
tr... |
def func_attention(query, context, gamma1):
(batch_size, queryL) = (query.size(0), query.size(2))
(ih, iw) = (context.size(2), context.size(3))
sourceL = (ih * iw)
context = context.view(batch_size, (- 1), sourceL)
contextT = torch.transpose(context, 1, 2).contiguous()
attn = torch.bmm(contextT,... |
_dataset(NAME)
class PascalVOCDataset(FileListDataset):
def __init__(self, config, name, subset, num_classes):
data_dir = config.string('data_dir', DEFAULT_PATH)
super().__init__(config, name, subset, data_dir, num_classes)
def read_inputfile_lists(self):
data_list = ('train.txt' if (sel... |
def generate_meshs(bop_path, dataset_name, divide_number_each_iteration, number_of_itration, executable_path):
(dataset_dir, source_dir, model_plys, model_info, model_ids, rgb_files, depth_files, mask_files, mask_visib_files, gts, gt_infos, cam_param_global) = bop_io.get_dataset(bop_path, dataset_name)
if (not ... |
def get_nae(**model_cfg):
arch = model_cfg.pop('arch')
x_dim = model_cfg['x_dim']
z_dim = model_cfg['z_dim']
encoder = get_net(in_dim=x_dim, out_dim=z_dim, **model_cfg['encoder'])
decoder = get_net(in_dim=z_dim, out_dim=x_dim, **model_cfg['decoder'])
if (arch == 'nae'):
ae = NAE(encoder,... |
def locate_with_hint(class_path, prefix_hints=[]):
module_or_class = locate(class_path)
if (module_or_class is None):
hint = '.'.join(prefix_hints)
module_or_class = locate(((hint + '.') + class_path))
return module_or_class |
.parametrize('a,expected', [([[]], []), ([[0]], []), ([[1]], [0]), ([[0, 1], [10, 0]], [1, 0]), ([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 3, 4, 1, 2])])
def test_make_edge_pointers(a, expected):
a = csr_matrix(a, dtype=np.int32)
rev_edge_ptr = _make_edge_pointers(a)
assert_array_equal(rev_edge_ptr, expected) |
.parametrize('classifier', classifiers)
def test_knn(classifier):
knn = KNeighborsClassifier(n_neighbors=2)
clf = classifier(local_classifier=knn)
y = np.array([['a', 'b'], ['a', 'c']])
X = np.array([[1, 2], [3, 4]])
clf.fit(X, y)
check_is_fitted(clf) |
class Test_HinSAGELinkGenerator(object):
n_feat = {'B': 5, 'A': 10}
batch_size = 2
num_samples = [2, 3]
def test_HinSAGELinkGenerator_constructor(self):
G = example_HIN_homo(self.n_feat)
links = [(1, 4), (1, 5), (0, 4), (5, 0)]
link_labels = ([0] * len(links))
gen = HinSA... |
def main():
global dataset_video_root_path, save_images_root_path
video_path_list = sorted(glob.glob(('%s/*.mp4' % dataset_video_root_path)))
for video_path in tqdm(video_path_list):
video_name = os.path.split(video_path)[(- 1)][:(- 4)]
(actor_id, cloth_id, action_type) = video_name.split('_... |
def p2_2partitions_16x4(model='wrn_16x4_c100_p2'):
csv = '2partitions.csv'
out_file_name = f'{model}_output.png'
out_file_name = os.path.join('.', out_file_name)
df = pd.read_csv(csv).query("dataset == 'cifar100' and model == ").query('epoch == 200')
ax = sns.barplot(x='epoch', y='test_acc', hue='al... |
def main(_):
print('Parsed arguments: ', FLAGS.__flags)
if (not os.path.exists(FLAGS.save_path)):
os.makedirs(FLAGS.save_path)
if (not os.path.exists(FLAGS.synthesis_path)):
os.makedirs(FLAGS.synthesis_path)
np.random.seed(FLAGS.seed)
config = tf.ConfigProto()
config.gpu_options.... |
def predict_step(datasource, select, result_table, result_column_names, train_label_idx, model, extra_result_cols=[], pai_table=None):
if isinstance(model, six.string_types):
model = Model.load_from_db(datasource, model)
else:
assert isinstance(model, Model), ('not supported model type %s' % typ... |
class WebvisionDataLoader(BaseDataLoader):
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_batches=0, training=True, num_workers=4, pin_memory=True, num_class=50):
self.batch_size = batch_size
self.num_workers = num_workers
self.num_batches = num_batches
... |
def main():
print(header.lstrip())
print('all_bfs = {')
for (dim, _dbfs) in all_bfs.items():
print(' {} : {{'.format(dim))
for (order, _bfs) in _dbfs.items():
print(' {} : ['.format(order))
vs = [x, y, z][:dim]
bfs = [sm.horner(sm.simplify(bf.sub... |
def unpack(gzip_file: str, out_file: str):
print('Uncompressing ', gzip_file)
input = gzip.GzipFile(gzip_file, 'rb')
s = input.read()
input.close()
output = open(out_file, 'wb')
output.write(s)
output.close()
print('Saved to ', out_file) |
def load_learnable_tester(algo: LearnableBase[(ImplBase, LearnableConfig)], observation_shape: Shape, action_size: int) -> None:
algo.create_impl(observation_shape, action_size)
path = os.path.join('test_data', 'algo.d3')
algo.save(path)
new_algo = load_learnable(path)
assert algo.impl
assert ne... |
class SuperbProblem(Problem, Trainer):
_cfg(workspace=field('???', "\nWill put the following keys into this workspace:\n 'train_dataset', 'train_sampler', 'valid_dataset', 'valid_sampler', and 'task'", 'str or Path or Workspace'), corpus=dict(CLS=field('???', '\nThe corpus class. You can add the **kwargs right bel... |
def block_inception_c(inputs, scope=None, reuse=None):
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv... |
def test_from_regular():
array = ak.contents.NumpyArray(np.arange(((2 * 3) * 5), dtype=np.int64).reshape(2, 3, 5))
irregular1 = ak.operations.from_regular(array, axis=1, highlevel=False)
irregular2 = ak.operations.from_regular(array, axis=2, highlevel=False)
irregularNone = ak.operations.from_regular(ar... |
class Parameter():
def __init__(self):
self.args = self.extract_args()
def extract_args(self):
self.parser = argparse.ArgumentParser(description='Video Deblurring')
self.parser.add_argument('--description', type=str, default='develop', help='experiment description')
self.parser.a... |
class CloseObjectAction(BaseAction):
valid_actions = {'CloseObject'}
def get_reward(self, state, prev_state, expert_plan, goal_idx):
if (state.metadata['lastAction'] not in self.valid_actions):
(reward, done) = (self.rewards['invalid_action'], False)
return (reward, done)
... |
def test_optplan_context_stack_get_node_model():
ctx = optplan.OptplanContext()
ctx.register_node_type('testmeta', 'dummy', DummyModel, dummy_creator)
ctx_stack = optplan.OptplanContextStack()
ctx_stack.push(ctx)
assert (ctx_stack.get_node_model('testmeta', 'dummy') == DummyModel)
assert (ctx_st... |
def wait_for_report_handler(queue: Queue, title: str, timeout: float=service.WORKER_FINISH_TIMEOUT) -> service.Event:
start = time.monotonic()
spinner = create_spinner(SPINNER_REPETITION_NUMBER)
while queue.empty():
if ((time.monotonic() - start) >= timeout):
return service.Timeout()
... |
def discrim(pols):
flist = tuple(pols)
(x, y) = flist[0].parent().gens()
field = flist[0].base_ring()
pol_ring = PolynomialRing(field, (x,))
def discrim_pairs(f, g):
if (g is None):
return pol_ring(f.discriminant(y))
return pol_ring(f.resultant(g, y))
pairs = ([(f, No... |
def maybe_download(directory, filename, url):
if (not os.path.exists(directory)):
print(('Creating directory %s' % directory))
os.mkdir(directory)
filepath = os.path.join(directory, filename)
if (not os.path.exists(filepath)):
print(('Downloading %s to %s' % (url, filepath)))
... |
def load_alpaca_gpt4():
dataset_dict = load_dataset('shibing624/alpaca-zh')
if isinstance(dataset_dict, Dataset):
dataset_dict = DatasetDict({'train': dataset_dict})
dataset_dict = cast(DatasetDict, dataset_dict)
def concat_instruction_and_input(batch):
return {'text1': [f'{instruction} ... |
class SceneManager(object):
def __init__(self, stats_manager=None):
self._cutting_list = []
self._event_list = []
self._detector_list = []
self._sparse_detector_list = []
self._stats_manager = stats_manager
self._num_frames = 0
self._start_frame = 0
def ad... |
class LSTMAttention(nn.Module):
def __init__(self, input_size, hidden_size, batch_first=True, attn_type='soft'):
super(LSTMAttention, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.batch_first = batch_first
self.lstm_cell = nn.LSTMCell(input... |
_model
def resnet50t(pretrained=False, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs)
return _create_resnet('resnet50t', pretrained, **model_args) |
def __make_power_2(img, base, method=Image.BICUBIC):
(ow, oh) = img.size
h = int((round((oh / base)) * base))
w = int((round((ow / base)) * base))
if ((h == oh) and (w == ow)):
return img
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), method) |
def initialize_model_parallel(model_parallel_size_):
if (torch.distributed.get_rank() == 0):
print('> initializing model parallel with size {}'.format(model_parallel_size_))
assert torch.distributed.is_initialized()
world_size = torch.distributed.get_world_size()
model_parallel_size = min(model_... |
def _get_polygons(graph_parse, name, is_variable, *args):
assert isinstance(graph_parse, GraphParse)
line_graph = graph_parse.line_graph
if all((line_graph.has_edge(args[(idx - 1)], arg) for (idx, arg) in enumerate(args))):
convex = polygon_is_convex(tuple((graph_parse.intersection_points[key] for k... |
class DuplicateIntRequestingTask(MockTask):
def run(self, i: int, j: int):
self.calls.append((i, j))
return self.results |
def log_metrics(mode, step, metrics):
for metric in metrics:
logging.info(('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))) |
def make_conv_layer(hidden_ch, output_ch, padding='same', bin_dtype=bb.DType.BIT):
return bb.Sequential([bb.Convolution2d(bb.Sequential([bb.DifferentiableLut([(hidden_ch * 6), 1, 1], bin_dtype=bin_dtype), bb.DifferentiableLut([hidden_ch, 1, 1], connection='serial', bin_dtype=bin_dtype)]), filter_size=(1, 1), fw_dty... |
_bs4
class MarkupLMFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = (MarkupLMFeatureExtractor if is_bs4_available() else None)
def setUp(self):
self.feature_extract_tester = MarkupLMFeatureExtractionTester(self)
def feat_extract_dict(self):
... |
class build_ext(old_build_ext):
description = 'build C/C++/F extensions (compile/link to build directory)'
user_options = (old_build_ext.user_options + [('fcompiler=', None, 'specify the Fortran compiler type'), ('parallel=', 'j', 'number of parallel jobs')])
help_options = (old_build_ext.help_options + [('... |
def data_folder():
import sbibm.third_party.kgof.config as config
data_path = config.expr_configs['data_path']
return data_path |
def pick_quantile_value_by_action(values: torch.Tensor, action: torch.Tensor, keepdim: bool=False) -> torch.Tensor:
assert (values.ndim == 3)
action_size = values.shape[1]
one_hot = F.one_hot(action.view((- 1)), num_classes=action_size)
mask = cast(torch.Tensor, one_hot.view((- 1), action_size, 1).float... |
def generate_code(sdfg, validate=True) -> List[CodeObject]:
from dace.codegen.targets.target import TargetCodeGenerator
if validate:
sdfg.validate()
if Config.get_bool('testing', 'serialization'):
from dace.sdfg import SDFG
import difflib
import filecmp
import shutil
... |
def _same_ImageCollection(collection1, collection2):
if (len(collection1) != len(collection2)):
return False
for (ext1, ext2) in zip(collection1, collection2):
if (not np.all((ext1 == ext2))):
return False
return True |
class BBPSSW(EntanglementProtocol):
circuit = Circuit(2)
circuit.cx(0, 1)
circuit.measure(1)
def __init__(self, own: 'Node', name: str, kept_memo: 'Memory', meas_memo: 'Memory'):
assert (kept_memo != meas_memo)
EntanglementProtocol.__init__(self, own, name)
self.memories: List[Me... |
def fc_stack_dropout(nc_inp, nc_out, nlayers):
modules = []
modules.append(nn.Linear(nc_inp, 1024, bias=True))
modules.append(nn.ReLU())
modules.append(nn.Dropout())
modules.append(nn.Linear(1024, 1024, bias=True))
modules.append(nn.ReLU())
modules.append(nn.Dropout())
modules.append(nn.... |
def cocostuff_palette():
return [[0, 192, 64], [0, 192, 64], [0, 64, 96], [128, 192, 192], [0, 64, 64], [0, 192, 224], [0, 192, 192], [128, 192, 64], [0, 192, 96], [128, 192, 64], [128, 32, 192], [0, 0, 224], [0, 0, 64], [0, 160, 192], [128, 0, 96], [128, 0, 192], [0, 32, 192], [128, 128, 224], [0, 0, 192], [128, 1... |
def get_measurements(domain, workload):
lookup = {}
for attr in domain:
n = domain.size(attr)
lookup[attr] = Identity(n)
lookup['age'] = EkteloMatrix(np.load('prefix-85.npy'))
lookup['fnlwgt'] = EkteloMatrix(np.load('prefix-100.npy'))
lookup['capital-gain'] = EkteloMatrix(np.load('pr... |
def get_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--asr-transcript', type=str, help='Path to the transcript file.')
parser.add_argument('--manifest', required=True)
parser.add_argument('--prompts-description', required=True)
parser.add_argument('--cut-id', a... |
class Item():
def infer(self, context):
pass
def depend(self, pipeline):
return []
def evaluate(self, pipeline):
return None |
class StatementCheckedCoverageTestFitness(ff.TestCaseFitnessFunction):
def __init__(self, executor: AbstractTestCaseExecutor, goal: CheckedCoverageGoal):
super().__init__(executor, goal.code_object_id)
self._goal = goal
def compute_fitness(self, individual: tcc.TestCaseChromosome) -> float:
... |
_model_architecture('model_parallel_transformer_lm', 'transformer_lm_megatron_big')
def transformer_lm_megatron_big(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 3072)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', (3072 * 6))
args.decoder_layers = getattr(args, 'de... |
def get_param_space(trial):
trial.suggest_float('learning_rate', 0.0001, 0.001, log=True)
trial.suggest_float('lr_decay_rate', 0.7, 1.0, log=True)
trial.suggest_categorical('weight_decay', [1e-06, 1e-07, 0])
trial.suggest_categorical('batch_size', [16, 32, 64])
trial.suggest_int('pe_embed_k', 0, 20)... |
def tucker_rank(layer):
W = layer.weight.data
mode3 = tl.base.unfold(W, 0)
mode4 = tl.base.unfold(W, 1)
diag_0 = EVBMF(mode3)
diag_1 = EVBMF(mode4)
d1 = diag_0.shape[0]
d2 = diag_1.shape[1]
del mode3
del mode4
del diag_0
del diag_1
return [int((np.ceil((d1 / 16)) * 16)), ... |
class Function_arctan(GinacFunction):
def __init__(self):
GinacFunction.__init__(self, 'arctan', latex_name='\\arctan', conversions=dict(maxima='atan', sympy='atan', mathematica='ArcTan', fricas='atan', giac='atan')) |
class MixedPrecisionActivationOnlyBopsTest(MixedPrecisionBopsAllWeightsLayersTest):
def __init__(self, unit_test):
super().__init__(unit_test, mixed_precision_candidates_list=[(8, 8), (8, 4), (8, 2)])
def get_kpi(self):
return KPI(bops=)
def compare(self, quantized_model, float_model, input_... |
def create_arg_parser(allow_derivatives=False):
from pyparsing import Literal, Word, OneOrMore, delimitedList, Group, StringStart, StringEnd, Combine, Optional, nums, alphas, alphanums
ident = Word(alphas, (alphanums + '_'))
inumber = Word(('+-' + nums), nums)
history = Optional(((Literal('[').suppress(... |
def WeakTableau(t, k, inner_shape=[], representation='core'):
if (representation == 'core'):
return WeakTableau_core(t, k)
elif (representation == 'bounded'):
return WeakTableau_bounded(t, k)
elif (representation == 'factorized_permutation'):
return WeakTableau_factorized_permutation... |
def test_validator_init_no_dataloader():
with pytest.raises(TypeError, match='Argument: dataloader must be set.'):
Validator(model, None, metrics, objectives) |
class _CSC(Function):
def forward(ctx, cpgs, labels, preds, rois, tau, debug_info, fg_threshold, mass_threshold, density_threshold, area_sqrt, context_scale):
PL = labels.clone().detach()
NL = torch.zeros(labels.size(), dtype=labels.dtype, device=labels.device)
W = _C.csc_forward(cpgs, label... |
def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.cont... |
def OP_surgery_head(model, gpu_id):
num_op = len(model.net._net.op)
for i in range(num_op):
if (('gpu_' + str(gpu_id)) not in model.net._net.op[i].input[0]):
continue
if ((cfg.WSL.CPG_PRE_BLOB + '_grad') in model.net._net.op[i].input[0]):
start_op = i
break
... |
class _BackendRetriever():
__slots__ = ['_array_types', '_array_subtypes', 'numpy_backend', 'jax_backend', 'pytorch_backend', 'tensorflow_backend']
def __init__(self):
self._array_types = set()
self._array_subtypes = set()
def __getattr__(self, name):
if (name == 'numpy_backend'):
... |
class TestConvolution(hu.HypothesisTestCase):
((not workspace.has_gpu_support), 'No gpu support')
(stride=st.integers(1, 3), pad=st.integers(0, 3), kernel=st.integers(1, 5), dilation=st.integers(1, 3), size=st.integers(7, 10), input_channels=st.integers(1, 8), output_channels=st.integers(1, 8), batch_size=st.in... |
def create_caffe2_op_test_case(op_bench, test_config):
test_case = Caffe2OperatorTestCase(op_bench, test_config)
test_config = test_case.test_config
op = test_case.op_bench
func_name = '{}{}{}'.format(op.module_name(), test_case.framework, str(test_config))
return (func_name, test_case) |
class LevelMapper(object):
def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-06):
self.k_min = k_min
self.k_max = k_max
self.s0 = canonical_scale
self.lvl0 = canonical_level
self.eps = eps
self.anchor_sizes = torch.tensor(cfg.MODEL.RPN.AN... |
_utils.test(exclude=[ti.opengl, ti.gles])
def test_arg_read():
x = ti.field(ti.i32, shape=())
def func(a: ti.i8, b: ti.i32):
x[None] = b
func(255, 2)
assert (x[None] == 2) |
class RestSource(SourceLanguage):
start_finish_can_overlap = True
def _init(self):
self.link_all = False
self.last_line = ''
self.last_indent = (- 1)
self.first_line = False
self.skipping = False
def starting_docstring(self, line):
if link_all.match(line):
... |
def painting(clip, saturation=1.4, black=0.006):
return clip.fl_image((lambda im: to_painting(im, saturation, black))) |
def quat_to_ee(quaternions: Union[(torch.Tensor, numpy.ndarray)], convention: str='xyz') -> Union[(torch.Tensor, numpy.ndarray)]:
if (quaternions.shape[(- 1)] != 4):
raise ValueError(f'Invalid input quaternions f{quaternions.shape}.')
t = Compose([quaternion_to_matrix, matrix_to_euler_angles])
retur... |
(branch_distance=st.floats())
def test_branch_distance(branch_distance, control_flow_distance):
assume((branch_distance >= 0))
control_flow_distance.branch_distance = branch_distance
assert (control_flow_distance.branch_distance == branch_distance) |
def get_activation(activation):
if isinstance(activation, list):
return [get_activation(act) for act in activation]
elif isinstance(activation, str):
if hasattr(k.activations, activation):
return getattr(k.activations, activation)
elif hasattr(k.backend, activation):
... |
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, (4 * growth_rate), kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d((4 * growth_rate))
self.c... |
def main():
(args, cfg) = parse_config()
if (args.launcher == 'none'):
dist_test = False
total_gpus = 1
else:
(total_gpus, cfg.LOCAL_RANK) = getattr(common_utils, ('init_dist_%s' % args.launcher))(args.tcp_port, args.local_rank, backend='nccl')
dist_test = True
if (args.b... |
class ActorNet():
def __init__(self, num_states, num_actions):
self.g = tf.Graph()
with self.g.as_default():
self.sess = tf.InteractiveSession()
(self.W1_a, self.B1_a, self.W2_a, self.B2_a, self.W3_a, self.B3_a, self.actor_state_in, self.actor_model) = self.create_actor_net(n... |
def test_step_without_reset():
env = TMazeEnv()
with pytest.raises(AssertionError):
env.step(1) |
class BaseOpenAIPlugin(AutoGPTPluginTemplate):
def __init__(self, manifests_specs_clients: dict):
self._name = manifests_specs_clients['manifest']['name_for_model']
self._version = manifests_specs_clients['manifest']['schema_version']
self._description = manifests_specs_clients['manifest']['... |
def GenerateSM80_TensorOp_1688_trmm_complex(manifest, cuda_version):
if (not CudaToolkitVersionSatisfies(cuda_version, 11, 0)):
return
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor)]
side_mode... |
def _wrap_method_for_model_recording(model, method_name, cache_name):
method = getattr(torch.Tensor, method_name)
(method)
def wrapped(*args, **kwargs):
if (not hasattr(model, cache_name)):
setattr(model, cache_name, [])
cache = getattr(model, cache_name)
res = method(*ar... |
def adadelta(opfunc, x, config, state=None):
if ((config is None) and (state is None)):
raise ValueError('adadelta requires a dictionary to retain state between iterations')
state = (state if (state is not None) else config)
rho = config.get('rho', 0.9)
eps = config.get('eps', 1e-06)
wd = co... |
def distributed_init(args):
if (args.distributed_world_size == 1):
raise ValueError('Cannot initialize distributed with distributed_world_size=1')
if torch.distributed.is_initialized():
warnings.warn('Distributed is already initialized, cannot initialize twice!')
else:
print('| distr... |
class ArrayInput(Input):
def _set_output_value(self, output, value):
setattr(self, output, np.array(value, copy=False)) |
def get_segment_name(tree):
def _m(x):
if ('name' in x.attrib):
return x.attrib['name']
if (x.tag == 'segment'):
return '1'
assert False, ('unknown name: %r, %r' % (x, vars(x)))
return '/'.join(map(_m, tree)) |
def test_len():
el = EventList()
assert (len(el) == 0)
e = Event(0, None)
el.push(e)
assert (len(el) == 1)
e1 = el.pop()
assert (len(el) == 0)
assert (e == e1) |
def Mixed(geometry):
elem = FiniteElement('CG', geometry.mesh.ufl_cell(), 1)
return FunctionSpace(geometry.mesh, MixedElement([elem, elem])) |
.parametrize('sparse_format', ['sparse_csr', 'sparse_csc'])
def test_smoten_sparse_input(data, sparse_format):
(X, y) = data
X = OneHotEncoder().fit_transform(X)
X = _convert_container(X, sparse_format)
with pytest.warns(DataConversionWarning, match='is not really efficient'):
(X_res, y_res) = S... |
def crossover(parent_A, parent_B):
parent_smiles = [Chem.MolToSmiles(parent_A), Chem.MolToSmiles(parent_B)]
try:
Chem.Kekulize(parent_A, clearAromaticFlags=True)
Chem.Kekulize(parent_B, clearAromaticFlags=True)
except:
pass
for i in range(10):
if (random.random() <= 0.5):... |
class TestOntonotes(AllenNlpTestCase):
def test_dataset_iterator(self):
reader = Ontonotes()
annotated_sentences = list(reader.dataset_iterator('tests/fixtures/conll_2012/'))
annotation = annotated_sentences[0]
assert (annotation.document_id == 'test/test/01/test_001')
assert... |
def _symbol2py(ctx, s):
if (Z3_get_symbol_kind(ctx.ref(), s) == Z3_INT_SYMBOL):
return ('k!%s' % Z3_get_symbol_int(ctx.ref(), s))
else:
return Z3_get_symbol_string(ctx.ref(), s) |
def DownloadProgressProvider(progress_bar, max=None):
if ((max is None) or (max == 0)):
return BAR_TYPES[progress_bar][1]().iter
else:
return BAR_TYPES[progress_bar][0](max=max).iter |
def main():
print('Starting run task.')
parser = argparse.ArgumentParser(description='Downstream tasks')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--dataset', type=str, default='Amazon-Video_Games', help='dataset name (default:... |
class Specification(Freezable):
def __init__(self, mat, rhs):
if (not isinstance(mat, np.ndarray)):
assert isinstance(rhs, list)
mat = np.array(mat, dtype=float)
if (not isinstance(rhs, np.ndarray)):
assert isinstance(rhs, list)
rhs = np.array(rhs, dty... |
def test_detr_head_loss():
s = 256
img_metas = [{'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3), 'batch_input_shape': (s, s)}]
config = ConfigDict(dict(type='DETRHead', num_classes=80, in_channels=200, transformer=dict(type='Transformer', encoder=dict(type='DetrTransformerEncoder', num_la... |
def calculate_activation_statistics(images, sess, batch_size=200, verbose=False):
act = get_activations(images, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return (mu, sigma) |
class XxxLayer(nn.Module):
def __init__(self, config):
super(XxxLayer, self).__init__()
self.attention = XxxAttention(config)
self.intermediate = XxxIntermediate(config)
self.output = XxxOutput(config)
def forward(self, hidden_states, attention_mask=None, head_mask=None):
... |
class Cnn1DC3(Convolution1DArchitectureBase, NeuralNetworkTrainingDefault):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_gpu = False
def build_model(self, x_shape, y_shape):
self.assert_shapes(x_shape, y_shape)
assert (x_shape[1:] == (606, 1, 1)... |
class TIntPrV(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
__swig_destroy__ = _snap.delete_TIntPrV
def __init__(self, *args):
_snap.TIntPrV_swiginit(self, _snap.new_TIntPrV(*args))
def Load(self, SI... |
class PoissonNLLLoss(_Loss):
def __init__(self, log_input=True, full=False, size_average=None, eps=1e-08, reduce=None, reduction='elementwise_mean'):
super(PoissonNLLLoss, self).__init__(size_average, reduce, reduction)
self.log_input = log_input
self.full = full
self.eps = eps
d... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.