code stringlengths 101 5.91M |
|---|
class DifferentiableLut(SparseModel):
def __init__(self, output_shape=None, *, input_shape=None, connection='random', binarize=True, batch_norm=True, momentum=0.0, gamma=0.3, beta=0.5, seed=1, name=None, N=6, bin_dtype=bb.DType.FP32, real_dtype=bb.DType.FP32, core_model=None):
if (output_shape is None):
... |
def get_musicxml_schema_path() -> str:
return str((Path(__file__).resolve().parent / 'musicxml.xsd')) |
.xfail(reason='Needs to be implemented')
def test_trace_vpacket_volley(packet, verysimple_packet_collection, verysimple_3vpacket_collection, verysimple_numba_radial_1d_geometry, verysimple_numba_model, verysimple_opacity_state):
np.random.seed(1)
packet.initialize_line_id(verysimple_opacity_state, verysimple_nu... |
class TFSegformerMixFFN(tf.keras.layers.Layer):
def __init__(self, config: SegformerConfig, in_features: int, hidden_features: int=None, out_features: int=None, **kwargs):
super().__init__(**kwargs)
out_features = (out_features or in_features)
self.dense1 = tf.keras.layers.Dense(hidden_featu... |
def job_fssdJ5p_opt(p, data_source, tr, te, r):
null_sim = gof.FSSDH0SimCovDraw(n_draw=2000, n_simulate=2000, seed=r)
return job_fssdJ1q_opt(p, data_source, tr, te, r, J=5, null_sim=null_sim) |
def register_Ns3TimeChecker_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return |
class TinyImageNet(torchvision.datasets.ImageFolder):
def __init__(self, root, transform):
super(TinyImageNet, self).__init__(root, transform)
self.uq_idxs = np.array(range(len(self)))
def __getitem__(self, item):
(img, label) = super().__getitem__(item)
uq_idx = self.uq_idxs[ite... |
class SRUCell(tf.contrib.rnn.RNNCell):
def __init__(self, num_units, activation=None, reuse=None):
super(SRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = (activation or tf.nn.tanh)
def state_size(self):
return self._num_units
def output_siz... |
_numpy_output(positive=True)
def test_auglshift(A: dace.int64[(5, 5)], B: dace.int64[(5, 5)]):
B <<= A
return B |
def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
if (Loader is None):
loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_co... |
def selectCandidateFramesFarthest(files, numFrames, dataSet, frameSpacing=10, startFrame=0):
confThresh = 0.5
confCntThresh = 12
neighborThresh = 1
objSegThresh = 0.01
finalIndList = []
handPklDataList = []
ObjPklDataList = []
ind = startFrame
while (ind < len(files)):
file =... |
class MixedTypeKNeighbors():
def __init__(self, n_neighbors: int=5, n_jobs: int=(- 2)):
self._n_neighbors = n_neighbors
self._n_jobs = n_jobs
def fit(self, candidates: pd.DataFrame, ctypes: Optional[Dict[(str, List[str])]]=None):
self._candidates = candidates
self._ctypes = ctype... |
def get_most_edited_wikipedia_articles(all_wiki_titles_intros):
most_edited_titles = []
most_edited_titles.extend(get_most_edited_wikipedia_titles('2023', '01'))
most_edited_titles.extend(get_most_edited_wikipedia_titles('2023', '02'))
most_edited_titles.extend(get_most_edited_wikipedia_titles('2023', '... |
def _CopyConditionBlobNet(condition_blob):
condition_net = core.Net('copy_condition_blob_net')
out_blob = condition_net.Copy(condition_blob)
condition_net.AddExternalOutput(out_blob)
return (condition_net, out_blob) |
def id_index_to_id_(id_list, loader):
id_index = [loader.id_field.vocab.itos[id_] for id_ in id_list]
return id_index |
class LidarNVSPoisson(LidarNVSMeshing):
def _run_poisson(pcd: o3d.geometry.PointCloud, depth: int, min_density: int) -> o3d.geometry.TriangleMesh:
print('Start _run_poisson()')
s_time = time.time()
(mesh, densities) = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=depth... |
def merge_train_valid(train_data, valid_data, train_ixs, valid_ixs):
if ((train_data.shape[0] == train_ixs.shape[0]) and (valid_data.shape[0] == valid_ixs.shape[0])):
data = np.full_like(np.concatenate([train_data, valid_data]), np.nan)
if (min(min(train_ixs), min(valid_ixs)) > 0):
train... |
class DataReader():
def __init__(self, h5_file_name, h5_image_name, shuffle=True, prefetch_num=8):
self.h5_file_name = h5_file_name
self.h5_image = h5_image_name
self.shuffle = shuffle
self.prefetch_num = prefetch_num
self.n_batch = 0
self.n_epoch = 0
self.h5_... |
class OutputSplitter(object):
def __init__(self, nextFile, max_file_size=0, compress=True):
self.nextFile = nextFile
self.compress = compress
self.max_file_size = max_file_size
self.file = self.open(next(self.nextFile))
def reserve(self, size):
if ((self.file.tell() + siz... |
def inverse_softplus(x):
if (not torch.is_tensor(x)):
x = torch.tensor(x)
return log_clamped((torch.exp(x) - 1.0)) |
def get_correctors_from_file_hdf5(coefs_filename='coefs.h5', dump_names=None):
if (dump_names == None):
coefs = Coefficients.from_file_hdf5(coefs_filename)
if hasattr(coefs, 'save_names'):
dump_names = coefs.save_names
else:
raise ValueError(' "filenames" coefficient ... |
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, inv=False, flow=False):
for t in self.transforms:
img = t(img, inv, flow)
return img
def randomize_parameters(self):
for t in self.transforms:
t... |
def _get_par_data_and_metadata():
date = datetime.datetime.strptime('2020-01-01', '%Y-%m-%d')
data = pd.DataFrame({'column1': [1.0, 2.0, 1.5, 1.3], 'date': [date, date, date, date], 'column2': ['b', 'a', 'a', 'c'], 'entity': [1, 1, 2, 2], 'context': ['a', 'a', 'b', 'b']})
metadata = SingleTableMetadata()
... |
class _FP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build_fp32_params(cls, params):
total_param_size = sum((p.data.numel() for p in params))
fp32_params = params[0].new(0).float().new(total_param_size)
offset = 0
f... |
class RequirementSet(object):
def __init__(self, require_hashes=False):
self.requirements = OrderedDict()
self.require_hashes = require_hashes
self.requirement_aliases = {}
self.unnamed_requirements = []
self.successfully_downloaded = []
self.reqs_to_cleanup = []
... |
class TestErfOp(serial.SerializedTestCase):
(X=hu.tensor(elements=hu.floats(min_value=(- 0.7), max_value=0.7)), **hu.gcs)
(deadline=1000)
def test_erf(self, X, gc, dc):
op = core.CreateOperator('Erf', ['X'], ['Y'])
self.assertReferenceChecks(gc, op, [X], (lambda x: (np.vectorize(math.erf)(X)... |
class HLibComponent(LibComponent):
def __init__(self, name, path, includes2install):
LibComponent.__init__(self, name, path, [], includes2install)
def mk_makefile(self, out):
return |
class MREval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : MR *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'rt-polarity.pos'))
neg = self.loadFile(os.path.join(task_path, 'rt-polarity.neg'))
super(self.__class__, self... |
class Encryption():
__target_folder: str = '/tmp/tmp'
__target_file_exts: list[str] = ['txt']
__encrypted_file_signature: bytes = b'WANAKRY!'
__encrypted_file_extension: str = 'wncry'
__fs_len: int = 8
__master_pri_key: bytes
__master_pub_key: bytes
def setEncFileSig(self, encrypted_file... |
def is_bliss(filename):
try:
corpus_file = open(filename, 'rb')
if filename.endswith('.gz'):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
context = iter(ElementTree.iterparse(corpus_file, events=('start', 'end')))
(_, root) = next(context)
return True
exce... |
def check_reproduce_tree(transition_scheme):
text = '((SBARQ (WHNP (WP Who)) (SQ (VP (VBZ sits) (PP (IN in) (NP (DT this) (NN seat))))) (. ?)))'
trees = tree_reader.read_trees(text)
model = SimpleModel(transition_scheme)
transitions = transition_sequence.build_sequence(trees[0], transition_scheme)
s... |
def create_train_model(model_creator, hparams, scope=None, num_workers=1, jobid=0, extra_args=None):
src_file = ('%s.%s' % (hparams.train_prefix, hparams.src))
tgt_file = ('%s.%s' % (hparams.train_prefix, hparams.tgt))
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
g... |
def neuralOptimiser(ws, ti, xs, ys, min_yaw=(- 30), max_yaw=30, plots=False, plots_ini=False, floris_gain=False, mode='yaw', results=True):
print()
print()
print('In NEURAL Optimiser...')
layout = np.concatenate((xs, ys), axis=0)
if (mode == 'yaw'):
power_ini = (- superposition(np.zeros(xs.s... |
def _handle_src(option, opt_str, value, parser):
value = os.path.abspath(value)
setattr(parser.values, option.dest, value) |
class ModelCombine(BaseModel):
def __init__(self, opt):
super(ModelCombine, self).__init__(opt)
self._opt = opt
self._init_create_networks()
if self._is_train:
self._init_train_vars()
if ((not self._is_train) or (self._opt.load_epoch > 0)):
self.load()... |
def test_align_first():
faces = RetinaFace.extract_faces(img_path='tests/dataset/img11.jpg', align_first=True)
num_black_pixels = np.sum(np.all((faces[0] == 0), axis=2))
assert (num_black_pixels < THRESHOLD)
logger.info(' Enabled align_first test for single face photo done') |
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
cls.add... |
def setup(opt):
if (opt.caption_model in ['fc', 'show_tell']):
print(('Warning: %s model is mostly deprecated; many new features are not supported.' % opt.caption_model))
if (opt.caption_model == 'fc'):
print('Use newfc instead of fc')
if (opt.caption_model == 'fc'):
model = ... |
def match_all(mask, val):
if (impl.get_cuda_compute_capability() < 70):
raise AssertionError('match_all intrinsic only available on compute_70 or higher')
return impl.call_internal('cuda_match_all_sync_i32', mask, val, with_runtime_context=False) |
_grad()
def build_pruned_model(model, state_dict):
for i in range(model.encoder.n_layers):
shape_fc1 = state_dict[(('encoder.blocks.' + str(i)) + '.mlp.fc1.weight')].shape
shape_fc2 = state_dict[(('encoder.blocks.' + str(i)) + '.mlp.fc2.weight')].shape
getattr(model.encoder.blocks, str(i)).m... |
def read_cqa_examples(input_file, is_select, is_training):
with open(input_file, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
lines = list(reader)
if (is_training and (lines[0][(- 2)] != 'label')):
raise ValueError('For training, the input file must contain a label column.')
i... |
def _get_solver(M, sparse=False, lstsq=False, sym_pos=True, cholesky=True, permc_spec='MMD_AT_PLUS_A'):
try:
if sparse:
if lstsq:
def solve(r, sym_pos=False):
return sps.linalg.lsqr(M, r)[0]
elif cholesky:
solve = cholmod(M)
... |
def gauss_sum(char_value, finite_field):
from sage.categories.fields import Fields
if (finite_field not in Fields().Finite()):
raise ValueError('second input must be a finite field')
ring = char_value.parent()
q = finite_field.cardinality()
p = finite_field.characteristic()
gen = finite_... |
def _scale_down_image(img, max_img_size):
(org_h, org_w) = img.shape[0:2]
(h, w) = img.shape[0:2]
if (max_img_size[0] < w):
h *= (float(max_img_size[0]) / float(w))
w = max_img_size[0]
if (max_img_size[1] < h):
w *= (float(max_img_size[1]) / float(h))
h = max_img_size[1]
... |
def get_class(module_name, class_name):
try:
m = importlib.import_module(module_name)
except ImportError as e:
log(('%s' % e), LogLevel.ERROR)
return False
try:
c = getattr(m, class_name)
except AttributeError as e:
log(('%s' % e), LogLevel.ERROR)
return F... |
def _check_range_conflicts(subset, a, itersym, b, step):
found = False
if isinstance(step, symbolic.SymExpr):
step = step.approx
for (rb, re, _) in subset.ndrange():
m = rb.match(((a * itersym) + b))
if (m is None):
continue
if ((m[a] >= 1) != True):
c... |
class GatLinear(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.linear = nn.Linear((hidden_size * 2), 1)
def forward(self, Q, K, V, adj):
N = K.size()[1]
Q = Q.unsqueeze(1).expand((- 1), N, (- 1))
X = torch.cat((Q, K), dim=2)
alpha = self.line... |
class BenchmarkJPEG(BenchmarkSR):
def __init__(self, phase, opt):
self.quality = opt.quality
super().__init__(phase, opt)
def get_subdir(self):
dir_HQ = 'HQ'
dir_LQ = '{}'.format(self.quality)
return (dir_HQ, dir_LQ) |
def save_model(model, destination):
if ('model' not in destination):
destination.create_group('model')
for (name, value) in model.state_dict().items():
save_params(destination, ('model/' + name), value)
return |
def global_variables():
tess_polygons = [[[7.481, 45.184], [7.481, 45.216], [7.526, 45.216], [7.526, 45.184], [7.481, 45.184]], [[7.481, 45.216], [7.481, 45.247], [7.526, 45.247], [7.526, 45.216], [7.481, 45.216]], [[7.526, 45.184], [7.526, 45.216], [7.571, 45.216], [7.571, 45.184], [7.526, 45.184]], [[7.526, 45.21... |
def test_number():
result = ak.operations.from_json(' [ 1 ,2,3.14, 4, 5]', schema={'type': 'array', 'items': {'type': 'number'}})
assert (result.to_list() == [1, 2, 3.14, 4, 5])
assert (str(result.type) == '5 * float64')
result = ak.operations.from_json((' [ 1 ,2,3.14, 4, 5]' * 2), schema={'type': 'arra... |
def _format(val: Any, output_format: str='standard', split: bool=False, errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
if split:
return [np.nan, np.nan, np.nan, np.nan]
else:
return [np.nan]
if (not validate_iban(val)):
... |
class MultilingualDatasetManager(object):
def __init__(self, args, lang_pairs, langs, dicts, sampling_method):
super().__init__()
self.args = args
self.seed = args.seed
self.lang_pairs = lang_pairs
self.langs = langs
self.dicts = dicts
self.lang_dict = self.cr... |
def build_scorer(choice, tgt_dict):
_choice = (choice._name if isinstance(choice, DictConfig) else choice)
if (_choice == 'bleu'):
from fairseq.scoring import bleu
return bleu.Scorer(bleu.BleuConfig(pad=tgt_dict.pad(), eos=tgt_dict.eos(), unk=tgt_dict.unk()))
return _build_scorer(choice) |
class SpectralNormStateDictHook(object):
def __init__(self, fn):
self.fn = fn
def __call__(self, module, state_dict, prefix, local_metadata):
if ('spectral_norm' not in local_metadata):
local_metadata['spectral_norm'] = {}
key = (self.fn.name + '.version')
if (key in ... |
def test_parallel(global_dtype):
centers = (np.array([[1, 1], [(- 1), (- 1)], [1, (- 1)]]) + 10)
(X, _) = make_blobs(n_samples=50, n_features=2, centers=centers, cluster_std=0.4, shuffle=True, random_state=11)
X = X.astype(global_dtype, copy=False)
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = Mean... |
def range_indirection(A: dace.float64[(M, N)], x: dace.int32[M]):
A[:] = 1.0
for j in range(1, M):
A[x[j]] += A[x[(j - 1)]] |
def make_user_as(asn: int, exchange: str):
user_as = base.createAutonomousSystem(asn)
router = user_as.createRouter('router0')
net = user_as.createNetwork('net0')
real.enableRealWorldAccess(user_as, 'net0')
router.joinNetwork('net0')
router.joinNetwork('ix{}'.format(exchange)) |
def set_seed(args):
seed = args.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if (args.n_gpu > 0):
torch.cuda.manual_seed_all(seed) |
class GTestXMLTestCase(gtest_test_utils.TestCase):
def AssertEquivalentNodes(self, expected_node, actual_node):
if (expected_node.nodeType == Node.CDATA_SECTION_NODE):
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_n... |
def convert_images_to_uint8(images, drange=[(- 1), 1], nchw_to_nhwc=False, shrink=1):
images = tf.cast(images, tf.float32)
if (shrink > 1):
ksize = [1, 1, shrink, shrink]
images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW')
if nchw_to_nhwc:
... |
class XLNetTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
padding_side = 'left'
slow_tokenizer_class = XLNetTokenizer
def __init__(self, vocab_fil... |
def register_Ns3FdMtFfMacScheduler_methods(root_module, cls):
cls.add_constructor([param('ns3::FdMtFfMacScheduler const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('GetFfMacCschedSapProvider', 'ns3::FfMacCschedSapProvider *', [], is_virtu... |
def tokenize_text(text):
paragraph = nlp.annotate(text, properties={'annotators': 'tokenize, ssplit', 'outputFormat': 'json'})
tokens = []
for sent in paragraph['sentences']:
for token in sent['tokens']:
tokens.append(_str(token['word']))
return ' '.join(tokens) |
def get_global_memlet_path_dst(sdfg: SDFG, state: SDFGState, edge: MultiConnectorEdge) -> nd.Node:
dst = state.memlet_path(edge)[(- 1)].dst
if (isinstance(dst, nd.AccessNode) and (not sdfg.arrays[dst.data].transient) and (sdfg.parent is not None)):
psdfg = sdfg.parent_sdfg
pstate = sdfg.parent
... |
class Discriminator(nn.Module):
def __init__(self, channel_in=3, recon_level=3):
super(Discriminator, self).__init__()
self.size = channel_in
self.recon_levl = recon_level
self.conv = nn.ModuleList()
self.conv.append(nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, ker... |
def to_device(data, device):
for k in data.keys():
if torch.is_tensor(data[k]):
data[k] = data[k].to(device)
return data |
def get_Babi_1(args=None):
Babi_1_dataset = Dataset(name='babi_1', path='preprocess/Babi/vec_babi_qa1_single-supporting-fact_.p', args=args)
Babi_1_dataset.vec.word_dim = 50
Babi_1_dataset.bsize = 50
Babi_1_dataset.n_iters = 100
Babi_1_dataset.hidden_size = 32
return Babi_1_dataset |
_builder('conceptual_caption_3m_instruct')
class ConceptualCaption3MInstructBuilder(BaseDatasetBuilder):
train_dataset_cls = ImageTextPairInstructDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/conceptual_caption/defaults_3m_instruct.yaml'} |
def image_aug(images, image_transform):
global_transform = image_transform[0][0]
local_transform = image_transform[0][1]
global_images_tensor = []
for i in range(2):
global_images_tensor.append(global_transform(images).unsqueeze(0))
return global_images_tensor |
(frozen=True)
class RunSpec():
name: str
scenario_spec: ScenarioSpec
adapter_spec: AdapterSpec
metric_specs: List[MetricSpec]
data_augmenter_spec: DataAugmenterSpec = DataAugmenterSpec()
groups: List[str] = field(default_factory=list)
def __post_init__(self):
object.__setattr__(self,... |
def run_wrap(sentinet, image_paths, reference_img_paths, threshold, candidates, saliency, pattern):
results = []
for image_path in tqdm(image_paths):
(fooled_percentage, confidence) = sentinet.run_sentinet(image_path, threshold, reference_img_paths, candidates, saliency=saliency, pattern=pattern)
... |
def write_setup_requirements(cmd, basename, filename):
data = io.StringIO()
_write_requirements(data, cmd.distribution.setup_requires)
cmd.write_or_delete_file('setup-requirements', filename, data.getvalue()) |
def seed_everything(seed):
import random, os
import numpy as np
import torch
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.determinist... |
class CythonParameter(gdb.Parameter):
def __init__(self, name, command_class, parameter_class, default=None):
self.show_doc = self.set_doc = self.__class__.__doc__
super(CythonParameter, self).__init__(name, command_class, parameter_class)
if (default is not None):
self.value = d... |
class FunctionTestRunner(unittest.TestCase):
def test_conv2d_bn_info_collection(self):
BNInfoCollectionTest(self).run_test()
def test_conv2d_2bn_info_collection(self):
Conv2D2BNInfoCollectionTest(self).run_test()
def test_conv2d_bn_chain_info_collection(self):
Conv2DBNChainInfoCollec... |
class BigBirdPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def TCliqueOverlap_GetMaxCliques(G, MinMaxCliqueSize, MaxCliques):
return _snap.TCliqueOverlap_GetMaxCliques(G, MinMaxCliqueSize, MaxCliques) |
def _init_by_key(key: PRNGKey, rng: PRNGKey) -> State:
(rng1, rng2, rng3, rng4) = jax.random.split(rng, num=4)
hand = _key_to_hand(key)
vul_NS = jax.random.choice(rng1, jnp.bool_([False, True]))
vul_EW = jax.random.choice(rng2, jnp.bool_([False, True]))
dealer = jax.random.randint(rng3, (1,), 0, 4, ... |
def master_loop():
logger.info('main loop started')
master_send_task('calculate', None)
ndone = len(get_slaves())
source = MPI.ANY_SOURCE
while (ndone > 0):
data = mpi_comm.recv(source=source, tag=MPI.ANY_TAG, status=mpi_status)
tag = mpi_status.Get_tag()
slave = mpi_status.G... |
class UpConcatHead(BaseSegHead):
def __init__(self, **kwargs):
super(UpConcatHead, self).__init__(**kwargs)
self.linear_fuse = ConvModule(in_channels=sum(self.in_channels), out_channels=self.channels, kernel_size=1, norm_cfg=self.norm_cfg)
def forward(self, x):
x = [F.interpolate(xx, siz... |
def test_sv_cyext_nopa():
cy = acvtree.shap_values_nopa(X, [[]], 5)
py = acvtree.py_shap_values(X, [[]])
assert np.allclose(cy, py) |
class Visualizer():
def __init__(self, reversed_virtual_map, reversed_original_map, args, total_batches, tp, holdout_size):
self.args = args
self.tp = tp
self.holdout_size = holdout_size
self.previous_class_scores = {}
self.current_class_scores = {}
self.batch_num = t... |
def get_str(output, lower=False):
s = ' '.join(output['word'])
return (s.lower() if lower else s) |
def linear_backward_weights(x, w, dy, bias=None):
if (bias is not None):
dbias = dy.sum(axis=(1, 0))
else:
dbias = None
transposed_axes = list(range(dy.ndim))
(transposed_axes[(- 2)], transposed_axes[(- 1)]) = (transposed_axes[(- 1)], transposed_axes[(- 2)])
dw = np.matmul(np.transpo... |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, dat... |
class functional_datapipe(object):
name: str
def __init__(self, name: str, enable_df_api_tracing=False) -> None:
self.name = name
self.enable_df_api_tracing = enable_df_api_tracing
def __call__(self, cls):
if issubclass(cls, IterDataPipe):
if isinstance(cls, Type):
... |
def collaborator(f: Callable=None, *, num_gpus: float=0) -> Callable:
if (f is None):
return functools.partial(collaborator, num_gpus=num_gpus)
print(f'Collaborator step "{f.__name__}" registered')
f.is_step = True
f.decorators = []
f.name = f.__name__
f.task = True
f.aggregator_step... |
def add_java_example(name, path=None):
c = JavaExampleComponent(name, path)
reg_component(name, c) |
class MSVDDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def dataset_cls(self):
return MSVDDataset
def dataset_cls_no_false(self):
return MSVDDataset
def dataset_name(self):
return 'msvd' |
def _write_ti_bashrc():
path = (get_cache_home() / 'ti.bashrc')
envs = (get_cache_home() / 'ti-env.sh')
_write_env(envs)
with open(path, 'w') as f:
f.write(f'''[ -f /etc/bashrc ] && source /etc/bashrc
[ -f ~/.bashrc ] && source ~/.bashrc
export PS1="\[\e]0;[Taichi Build Environment]\]\[[01;31m... |
def print_thresholded_metric(title, thresholds, data, last_entry_name=None, last_entry_value=None):
line_separator = ('=' * 120)
threshold_line_format = get_threshold_line_format(thresholds, last_entry_name)
items = data
if (last_entry_value is not None):
items = (items + [last_entry_value])
... |
class SHREC(InMemoryDataset):
url = '
class_names = ['alien', 'ants', 'armadillo', 'bird1', 'bird2', 'camel', 'cat', 'centaur', 'dinosaur', 'dino_ske', 'dog1', 'dog2', 'flamingo', 'glasses', 'gorilla', 'hand', 'horse', 'lamp', 'laptop', 'man', 'myScissor', 'octopus', 'pliers', 'rabbit', 'santa', 'shark', 'snake... |
def LF_pseudo_negation_rule_out(span):
left_rgx = "(cannot|does not|doesn't) rule[s]* out"
left = get_left_span(span)
trigger = match_regex(left_rgx, left)
if ((not trigger) or (token_distance(trigger, span) > 5)):
return ABSTAIN
return (NON_NEGATED if re.search("(cannot|does not|doesn't)", ... |
def register_Ns3TcpClassicRecovery_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::TcpClassicRecovery const &', 'recovery')])
cls.add_method('DoRecovery', 'void', [param('ns3::Ptr< ns3::TcpSocketState >', 'tcb'), param('uint32_t', 'lastAckedBytes'), param('uint32_t', 'las... |
def populate_cluster(cluster_ids, tokens_to_remove, token_ids):
if (len(cluster_ids[(- 1)]) == 0):
tokens_to_remove += token_ids
cluster_ids[(- 1)].append(((token_ids[(- 1)] + 1) - len(tokens_to_remove)))
else:
mention_tokens = range(cluster_ids[(- 1)][0], (token_ids[0] - len(tokens_to_r... |
def load_backend(t, lib, generic_functions, mixins=tuple()):
backend_name = 'THNN{}Backend'.format(t)
backend = type(backend_name, (mixins + (THNNBackendBase,)), {})()
for function in generic_functions:
full_fn_name = '{}{}'.format(t, function.name)
fn = getattr(lib, full_fn_name)
ba... |
class MultiList():
class Node():
def __init__(self, numberLists, cargo=None):
self.cargo = cargo
self.next = ([None] * numberLists)
self.prev = ([None] * numberLists)
self.ignore = 0
self.area = ([0.0] * numberLists)
self.volume = ([0.0... |
def skip_imports(lines: List[str], pos: int) -> int:
for n in range(pos, len(lines)):
if ((lines[n] != '') and (not lines[n].isspace()) and (not lines[n].lstrip().startswith('import'))):
return n
return len(lines) |
class LiSHT(torch.nn.Module):
def forward(self, input: Tensor) -> Tensor:
return (input * torch.tanh(input)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.