code stringlengths 101 5.91M |
|---|
def verify(info, whole_db, root_path, num_point_features):
obj_path = (root_path / info['path'])
obj_points = np.fromfile(str(obj_path), dtype=np.float32).reshape([(- 1), num_point_features])
mean_origin = obj_points.mean()
(start_idx, end_idx) = info['global_data_offset']
obj_points_new = whole_db[... |
def max(g, self, dim_or_y=None, keepdim=None):
if ((dim_or_y is None) and (keepdim is None)):
return g.op('ReduceMax', self, keepdims_i=0)
if (keepdim is None):
return g.op('Max', self, dim_or_y)
else:
dim = sym_help._get_const(dim_or_y, 'i', 'dim')
keepdim = sym_help._get_co... |
def test_assignment_to_nonexistent_variable():
def badprog(B: dace.float64):
A[...] = B
with pytest.raises(DaceSyntaxError):
badprog.to_sdfg() |
def _get_data_from_buffer(obj):
view = memoryview(obj)
if (view.itemsize != 1):
raise ValueError('cannot unpack from multi-byte object')
return view |
class _Rx_operation(_rot_operation):
def get_circuit(self, var_param_assignment: dict):
QC = QuantumCircuit(self.num_qubits)
QC = self.apply_param_vectors(QC, RXGate, var_param_assignment)
return QC |
def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=255):
if (pred.dim() != label.dim()):
assert (((pred.dim() == 2) and (label.dim() == 1)) or ((pred.dim() == 4) and (label.dim() == 3))), 'Only pred shape [N, C], label shape [N] or pred shap... |
def create_perfect_overlap_pairs_from_intervals(intervals1: List[Pair], intervals2: List[Pair]) -> List[Tuple[(Pair, Pair)]]:
pipeline = itertools.product(intervals1, intervals2)
pipeline = filter((lambda x: (x[0] == x[1])), pipeline)
return list(pipeline) |
class NonPSDError(LinAlgError):
def __init__(self):
err_msg = 'Matrix is not positive semidefinite (PSD).'
super(LinAlgError, self).__init__(err_msg) |
def _check_py_version():
py_version = sys.version_info
if (py_version.major != 3):
raise RuntimeError('Works only with python 3')
if (py_version.minor not in PYTHON_DEPS):
raise RuntimeError(f'Works only with python 3.[{list(PYTHON_DEPS)}]') |
class TestSuiteGenerationAlgorithmFactory(GenerationAlgorithmFactory[tsc.TestSuiteChromosome]):
_strategies: ClassVar[dict[(config.Algorithm, Callable[([], GenerationAlgorithm)])]] = {config.Algorithm.DYNAMOSA: DynaMOSAAlgorithm, config.Algorithm.MIO: MIOAlgorithm, config.Algorithm.MOSA: MOSAAlgorithm, config.Algor... |
class VQVAE_1d(torch.nn.Module):
def __init__(self, fmri_len, num_tokens, num_layers=5, num_resnet_blocks=3, hidden_dim=64):
super().__init__()
self.dVAE = DiscreteVAE(signal_len=fmri_len, num_layers=num_layers, num_tokens=num_tokens, codebook_dim=1024, hidden_dim=hidden_dim, channels=1, num_resnet_... |
def main():
parser = argparse.ArgumentParser(description='Average checkpoints')
parser.add_argument('--checkpoint-dir', required=True, type=str, default='results', help='Checkpoint directory location.')
parser.add_argument('--best-n', required=True, type=int, default=5, help='Num of epochs to average')
... |
class InceptionB(nn.Module):
def __init__(self, input_channels):
super().__init__()
self.branch3x3 = BasicConv2d(input_channels, 384, kernel_size=3, stride=2)
self.branch3x3stack = nn.Sequential(BasicConv2d(input_channels, 64, kernel_size=1), BasicConv2d(64, 96, kernel_size=3, padding=1), Ba... |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--strategy', default='all', help="Distributed or centralized training (options : all for all available gpus or string of gpus numbers separated by commas like '0,1')", type=str)
parser.add_argument('--batch_size', default=2, help='Total bat... |
def parse_args():
parser = argparse.ArgumentParser(description='MMDet3D upgrade model version(before v0.6.0) of VoteNet')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='path of the output checkpoint file')
args = parser.parse_args()
return args |
def test_body(testdir):
testdir.make_test('\(method="POST")\(max_examples=3, deadline=None)\ndef test_(case):\n assert_int(case.body)\n assert_requests_call(case)\n ', paths={'/users': {'post': {'parameters': [{'name': 'id', 'in': 'body', 'required': True, 'schema': {'type': 'integer'}}], 'responses': ... |
def redirect_edge(state: SDFGState, edge: graph.MultiConnectorEdge[Memlet], new_src: Optional[nodes.Node]=None, new_dst: Optional[nodes.Node]=None, new_src_conn: Optional[str]=None, new_dst_conn: Optional[str]=None, new_data: Optional[str]=None, new_memlet: Optional[Memlet]=None) -> graph.MultiConnectorEdge[Memlet]:
... |
def conv1x1x1(in_planes, out_planes, stride=1):
return nn.Conv3d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) |
class miniImageNetContrastive(miniImageNet):
def __init__(self, root: str, mode: str='train') -> None:
super().__init__(root, mode)
self.transform = transforms.Compose([transforms.RandomApply([transforms.ColorJitter(0.8, 0.8, 0.8, 0.2)], p=0.3), transforms.RandomGrayscale(p=0.2), transforms.RandomHo... |
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if isinstance(module, torch.nn.Conv2d):
handle = module.register_forward_hook(conv_flops_counter_hook)
elif (isinstance(module, torch.nn.ReLU)... |
def CalculateMZagreb1(mol):
deltas = [x.GetDegree() for x in mol.GetAtoms()]
while (0 in deltas):
deltas.remove(0)
deltas = np.array(deltas, 'd')
res = sum(((1.0 / deltas) ** 2))
if (res == 0):
res = MINVALUE
return np.log10(res) |
def get_eval_extra_info(model_name, eval_setting_name):
for e in m_repo.get_evaluations():
if ((e.checkpoint.model.name == model_name) and (e.setting.name == eval_setting_name) and e.completed):
return e.extra_info |
def filter_candidates(candidates, model, size_limit):
ans = {}
free_cliques = downward_closure(model.cliques)
for cl in candidates:
cond1 = (hypothetical_model_size(model.domain, (model.cliques + [cl])) <= size_limit)
cond2 = (cl in free_cliques)
if (cond1 or cond2):
ans[... |
def layer_norm(model, blob_in, blob_out, dim_in, axis=1, epsilon=0.0001, initial_scale=1.0, initial_bias=0.0):
scale = model.create_param(param_name='{}_scale'.format(blob_out), shape=([dim_in] if isinstance(dim_in, int) else dim_in), initializer=initializers.Initializer('ConstantFill', value=initial_scale), tags=P... |
def glue_convert_examples_to_features(examples, tokenizer, max_length=512, task=None, label_list=None, output_mode=None, pad_on_left=False, pad_token=0, pad_token_segment_id=0, mask_padding_with_zero=True):
is_tf_dataset = False
if (is_tf_available() and isinstance(examples, tf.data.Dataset)):
is_tf_dat... |
def left_actors_callback(msg):
global left_actors
left = msg.data
left = left.replace('[', ',')
left = left.replace(']', ',')
left = left.split(',')
left_actors = []
for i in left[1:(- 1)]:
if (i == ''):
continue
left_actors.append(int(i)) |
class MSC(nn.Module):
def __init__(self, scale, pyramids=[0.5, 0.75]):
super(MSC, self).__init__()
self.scale = scale
self.pyramids = pyramids
def forward(self, x):
logits = self.scale(x)
interp = (lambda l: F.interpolate(l, size=logits.shape[2:], mode='bilinear', align_c... |
def register_dataset(datasets_root: Optional[str]=None):
def empty_load_callback():
pass
video_list_fpath = maybe_prepend_base_path(datasets_root, 'chimpnsee/cdna.eva.mpg.de/video_list.txt')
video_base_path = maybe_prepend_base_path(datasets_root, 'chimpnsee/cdna.eva.mpg.de')
DatasetCatalog.regi... |
def add_unique_craters(craters, craters_unique, thresh_longlat2, thresh_rad):
k2d = (180.0 / (np.pi * 1737.4))
(Long, Lat, Rad) = craters_unique.T
for j in range(len(craters)):
(lo, la, r) = craters[j].T
la_m = ((la + Lat) / 2.0)
minr = np.minimum(r, Rad)
dL = ((((Long - lo) ... |
def train_one_epoch(model: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, log_writer=None, args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1,... |
.parametrize('backend', ['pydub'])
.parametrize('size', [(50000, 2), (150000, 2), (200000, 1)])
.parametrize('channel_first', [False, True])
.parametrize('audio', audios)
def test_auresize(backend, size, channel_first, audio):
_change_backend(backend)
if channel_first:
audio = audio.transpose((1, 0))
... |
_converter_regitstry('sCONV')
def sCONV_t_converter(reg: sCONV_reg):
opd0 = dict(address=reg.opd0_addr, shape=(reg.res0_n, reg.opd0_c, reg.opd0_h, reg.opd0_w), stride=[reg[f'opd0_{i}_str'] for i in 'nchw'], dtype=(reg.opd0_prec, reg.opd0_sign), layout=reg.opd0_str)
res0 = dict(address=reg.res0_addr, shape=[reg[... |
def get_early_stop_callback(args: Namespace) -> EarlyStopping:
early_stop_callback = EarlyStopping(monitor='val_acc', min_delta=0.0, patience=5, verbose=True, mode='max')
return early_stop_callback |
class CosmoAgent():
def __init__(self):
print((cf.bold | cf.purple('Loading COSMO-xl...')))
self.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
self.tokenizer = AutoTokenizer.from_pretrained('allenai/cosmo-xl')
self.model = AutoModelForSeq2SeqLM.from_pretrain... |
def masked_mae_loss(null_val):
def loss(preds, labels):
mae = masked_mae_tf(preds=preds, labels=labels, null_val=null_val)
return mae
return loss |
def reset_accumulated_memory_stats(device: Union[(Device, int)]=None) -> None:
device = _get_device_index(device, optional=True)
return torch._C._cuda_resetAccumulatedMemoryStats(device) |
class LibComponent(Component):
def __init__(self, name, path, deps, includes2install):
Component.__init__(self, name, path, deps)
self.includes2install = includes2install
def mk_makefile(self, out):
Component.mk_makefile(self, out)
objs = []
for cppfile in get_cpp_files(s... |
_cache()
def sxs_directory(directory_type, persistent=True):
import warnings
import sys
import os
import atexit
import shutil
import tempfile
from pathlib import Path
if (directory_type not in ['cache', 'config']):
raise ValueError(f"Can only find 'cache' or 'config' directories,... |
class EntityState(object):
def __init__(self):
self.p_pos = None
self.p_vel = None |
class CbPVP(PVP):
VERBALIZER = {'contradiction': ['No'], 'entailment': ['Yes'], 'neutral': ['Maybe']}
def get_parts(self, example: InputExample) -> FilledPattern:
text_a = self.shortenable(example.text_a)
text_b = self.shortenable(example.text_b)
self.pattern_id = 1
if (self.patt... |
_module()
class LoadImageAnnotationsFromFile(object):
def __init__(self, dataset='RefCOCOUNC', color_type='color', backend=None, file_client_cfg=dict(backend='disk'), max_token=15, with_bbox=False, with_mask=False):
self.color_type = color_type
self.backend = backend
self.file_client_cfg = f... |
def main(_):
logging.info('Benchmarking model: {}'.format(FLAGS.model_name))
gpus = tf.config.list_physical_devices('GPU')
if gpus:
print('Found {} GPU(s)'.format(len(gpus)))
[tf.config.experimental.set_memory_growth(device, True) for device in gpus]
else:
logging.warning("No GPU... |
class KRTableauxSpin(KRTableauxRectangle):
def _build_module_generators(self):
n = self.cartan_type().classical().rank()
if (self._r == n):
return KRTableauxRectangle._build_module_generators(self)
tableau = []
for i in range(self._s):
tableau.append(([(- n)] ... |
def register_Ns3DsrDsrOptionPad1_methods(root_module, cls):
cls.add_constructor([param('ns3::dsr::DsrOptionPad1 const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetOptionNumber', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
... |
def entity_linking(e_spans, cutoff=500, threshold=0):
guessed_ids = []
for span in e_spans:
span_ids = e_index.label_scores(span, top=cutoff, threshold=threshold, verbose=False, scale=0.3, max_degree=100000)
guessed_ids.append(span_ids)
return guessed_ids |
def get_class_in_module(class_name, module_path):
module_path = module_path.replace(os.path.sep, '.')
module = importlib.import_module(module_path)
return getattr(module, class_name) |
.parametrize('truncation_tol', (True, None, 1e-09))
def test_lvcnr_format(truncation_tol):
import shutil
sxs_id = sxs.sxs_id(shortest_h_com_file)
sxs_lev = sxs.lev_number(shortest_h_com_file)
shortest_lvcnr = f"{sxs_id.replace(':', '_')}_Res{sxs_lev}.h5"
with contextlib.redirect_stdout(None), contex... |
def embed(args):
device = torch.device(('cuda' if args.cuda else 'cpu'))
pprint(args.__dict__)
interface = FileInterface(**args.__dict__)
if args.cache:
out = interface.cache(preprocess, args)
processor = out['processor']
processed_metadata = out['processed_metadata']
else:
... |
def get_labels(ENE_ids, ENE_id_index):
labels = []
for d in ENE_ids:
labels.append(ENE_id_index[d['ENE_id']])
return labels |
class ResNetBlock(nn.Module):
n_hidden: int
strides: Tuple[(int, int)] = (1, 1)
activation: Callable = nn.relu
conv_block_cls: ModuleDef = ConvBlock
skip_cls: ModuleDef = ResNetSkipConnection
def __call__(self, x):
skip_cls = partial(self.skip_cls, conv_block_cls=self.conv_block_cls)
... |
_memoize_get_funcs
def get_lapack_funcs(names, arrays=(), dtype=None, ilp64=False):
if isinstance(ilp64, str):
if (ilp64 == 'preferred'):
ilp64 = HAS_ILP64
else:
raise ValueError("Invalid value for 'ilp64'")
if (not ilp64):
return _get_funcs(names, arrays, dtype, ... |
def factor_prefix(vals, do_it):
vals = [format_value(v) for v in vals]
prefix = (commonprefix(vals) if ((len(vals) > 1) and do_it) else '')
joined = ', '.join((v[len(prefix):] for v in vals))
return (('%s[%s]' % (prefix, joined)) if prefix else joined) |
def unique_tensor_list(tensors: Iterable[Tensor]) -> List[Tensor]:
seen = set()
out = []
for tensor in tensors:
if (RefIdEq(tensor) not in seen):
out.append(tensor)
seen.add(RefIdEq(tensor))
return out |
class VecEnvWrapper(VecEnv):
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(self, num_envs=venv.num_envs, observation_space=(observation_space or venv.observation_space), action_space=(action_space or venv.action_space))
def step_async(self,... |
class VoxelRCNN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batc... |
def damp_array_wyckoffs(len_wyckoff):
print('static const int position_wyckoff[] =')
text = (' { %4d,' % 0)
for (i, x) in enumerate(len_wyckoffs[1:]):
if ((i % 10) == 0):
print(text)
text = ' '
text += (' %4d,' % x)
print((text + ' };')) |
def cla1_adv_ll(input, target, class_freq):
return torch.gather(input, 1, target.unsqueeze(1)).mean() |
class ToArrowOptions(TypedDict):
list_to32: bool
string_to32: bool
bytestring_to32: bool
emptyarray_to: (np.dtype | None)
categorical_as_dictionary: bool
extensionarray: bool
count_nulls: bool
record_is_scalar: bool |
def convert_ids_to_string(tokenizer, input):
return ' '.join(tokenizer.convert_ids_to_tokens(input)) |
def sparse_dropout(x, keep_prob, noise_shape):
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return (pre_out * (1.0 / keep_prob)) |
_utils.test()
def test_remove_element_shape_ndarray_arg():
with pytest.raises(ti.TaichiRuntimeError, match='The element_shape argument for ndarray is deprecated in v1.6.0, and it is removed in v1.7.0. Please use vector or matrix data type instead.'):
ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'x', ti.f32, ndim=... |
class SpeakerEmbeddingExtractor(nn.Module):
def __init__(self, input_size: int, output_size: int=1500, backbone: str='XVector', pooling_type: str='TemporalAveragePooling'):
super().__init__()
self._indim = input_size
self._outdim = output_size
if (backbone == 'XVector'):
... |
def MSRAFill(tensor):
size = reduce(operator.mul, tensor.shape, 1)
fan_out = (size / tensor.shape[1])
scale = math.sqrt((2 / fan_out))
return init.normal_(tensor, 0, scale) |
def create_datasets(tfrecord_path, batch_size, num_readers, config, only_test=False):
batch_size_test = max(1, (batch_size // config.num_segments))
filenames_test = glob((tfrecord_path + '/test-*.tfrecord'))
dataset_test = tf.data.TFRecordDataset(filenames_test)
dataset_test = dataset_test.map(tfrecord_... |
class GPT2LM(MiniconsLM):
def __init__(self, model_name_or_path, device='cuda', gpu_batch_size=20):
super().__init__(model_name_or_path=model_name_or_path, device=device, gpu_batch_size=gpu_batch_size, model_type='IncrementalLMScorer') |
(config_path='.', config_name='config')
def run(cfg: DictConfig) -> None:
mlflow.set_tracking_uri(cfg.params.tracking_uri)
mlflow.set_experiment(cfg.params.experiment_name)
mlflow.start_run(run_name=cfg.params.run_name)
mlflow.log_params(cfg.params)
mlflow.log_param('cwd', os.getcwd())
mlflow.lo... |
class BinaryCnxp(Constant):
__slots__ = ('x', 'y')
codes = {}
def __init__(self, x, y):
self.x = x
self.y = y
def type_constraints(self, tcs):
tcs.integer(self)
tcs.eq_types(self, self.x, self.y) |
class STSTrainer(Trainer):
def prediction_step(self, model: nn.Module, inputs: Dict[(str, Union[(torch.Tensor, Any)])], prediction_loss_only: bool, ignore_keys: Optional[List[str]]=None) -> Tuple[(Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor])]:
model.eval()
with torch.no_gr... |
def _trn_epoch(config, model, data, epoch, np_rng):
logger = logging.getLogger()
valid_qtn_idxs = np.flatnonzero(data.trn.vectorized.qtn_ans_inds).astype(np.int32)
np_rng.shuffle(valid_qtn_idxs)
num_samples = valid_qtn_idxs.size
batch_sizes = []
losses = []
accs = []
samples_per_sec = []... |
def retrieve_all_test_sessions(conn, project):
ids = []
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute('SELECT distinct b.id FROM results r, bots b \n WHERE r.bot_id = b.id and b.type= %s\n order by b.id ', [project])
... |
def create_loss_counting():
n = 370
nbkg = 340
Nsig = zfit.Parameter('Nsig', 0, (- 100.0), 100)
Nbkg = zfit.Parameter('Nbkg', nbkg, floating=False)
Nobs = zfit.ComposedParameter('Nobs', (lambda a, b: (a + b)), params=[Nsig, Nbkg])
obs = zfit.Space('N', limits=(0, 800))
model = Poisson(obs=ob... |
def _precision_warn(p1, p2, extra=''):
t = 'Lossy conversion from {} to {}. {} Convert image to {} prior to saving to suppress this warning.'
logger.warning(t.format(p1, p2, extra, p2)) |
def export_pytorch(preprocessor: Union[('PreTrainedTokenizer', 'FeatureExtractionMixin', 'ProcessorMixin')], model: 'PreTrainedModel', config: OnnxConfig, opset: int, output: Path, tokenizer: 'PreTrainedTokenizer'=None, device: str='cpu') -> Tuple[(List[str], List[str])]:
if (isinstance(preprocessor, PreTrainedToke... |
class TokenEmbedding(nn.Module):
def __init__(self, num_embeddings, embedding_dim, output_dim=None, static=True):
super(TokenEmbedding, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.embeddings = nn.Embedding(num_embeddings, embedding_di... |
def get_summary_bstate(bstate):
domains = [u'taxi', u'restaurant', u'hospital', u'hotel', u'attraction', u'train', u'police']
summary_bstate = []
for domain in domains:
domain_active = False
booking = []
for slot in sorted(bstate[domain]['book'].keys()):
if (slot == 'book... |
def dl_image(url, timeout, fn, quality, crop=False, resize=256):
fetched = 1
try:
response = requests.get(url, timeout=timeout)
open(fn, 'wb').write(response.content)
img = Image.open(fn)
if crop:
img = crop_largest_square(img)
has_alpha = ((img.mode in ('RGBA... |
((not workspace.C.use_mkldnn), 'No MKLDNN support.')
class TestMomentumSGDUpdateOps(hu.HypothesisTestCase):
(n=st.integers(4, 8), nesterov=st.booleans(), **mu.gcs)
def test_MomentumSGDUpdate(self, n, nesterov, gc, dc):
param = np.random.rand(n).astype(np.float32)
grad = np.random.rand(n).astype(... |
class BatchNorm2dNoSync(_BatchNorm):
def _check_input_dim(self, input):
if (input.dim() != 4):
raise ValueError('expected 4D input (got {}D input)'.format(input.dim())) |
def test_IndexedOptionArray_NumpyArray():
v2a = ak.contents.indexedoptionarray.IndexedOptionArray(ak.index.Index(np.array([2, 2, (- 1), 1, (- 1), 5, 4], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])))
layout = v2a
generator = ak._connect.cling.togenerator(layout.form... |
def produceImgAndLabel():
root_path = '/home/lmin/data/CamVid/'
stages = ['train', 'val', 'test']
for stage in stages:
seg_txt = open(((root_path + stage) + '.txt'), 'a')
imgpath = glob(os.path.join(root_path, stage, 'images/*.png'))
txtpath = glob(os.path.join(root_path, stage, 'mas... |
def test_ListType():
assert (str(ak.types.listtype.ListType(ak.types.unknowntype.UnknownType())) == 'var * unknown')
assert (str(ak.types.listtype.ListType(ak.types.unknowntype.UnknownType(), parameters={'x': 123})) == '[var * unknown, parameters={"x": 123}]')
assert_overrides_typestr(ak.types.listtype.List... |
_utils.test()
def test_is_not():
b = ti.field(ti.i32, shape=())
c = ti.field(ti.i32, shape=())
def func():
a = (b is not c)
with pytest.raises(ti.TaichiCompilationError):
func() |
def render_train(opts, batch_size=1, data_loader_kwargs=None, max_items=None, **stats_kwargs):
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
if (data_loader_kwargs is None):
data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
options = webdriver.Chrome... |
def str_to_mod(nodes, s):
if ((s is None) or (' on ' not in s)):
return None
(module, node) = s.split(' on ', 1)
if ((node in nodes) and (module in nodes[node].modules)):
return nodes[node][module]
return None |
def test_timeit():
s = 0
x = np.zeros((320, 240, 3), dtype=np.uint8)
for i in range(100):
y = foo(x)
s += y.sum()
print(s) |
.parametrize(['nu', 'temperature'], [(.0, 10000.0), (0, 1), (1, 1)])
def test_intensity_black_body(nu, temperature):
func = formal_integral.intensity_black_body
actual = func(nu, temperature)
print(actual, type(actual))
expected = intensity_black_body(nu, temperature)
ntest.assert_almost_equal(actua... |
def preprocess_descriptions(examples):
nlp = spacy.load('en_core_web_sm', disable=['parser'])
sentences = [clean_sentence(example['description']) for example in examples]
parsed_sentences = []
for sentence in tqdm.tqdm(sentences):
parsed_sentences.append(nlp(sentence))
clean_parsed_sentences... |
def download_blob(bucket_name: str, source_blob_name: str, destination_file_name: str):
gcs_bucket(bucket_name).blob(source_blob_name).download_to_filename(destination_file_name)
print(f'Downloaded storage object {source_blob_name!r} from bucket {bucket_name!r} to local file {destination_file_name!r}.') |
class Identity(nn.Module):
def __init__(self, out_channel, affine=False):
super().__init__()
def forward(self, x):
return x |
def test_model_init():
UTMOS22Strong()
assert True, 'UTMOS22Strong is not properly instantiated.' |
def ReverseCloseExpression(clean_lines, linenum, pos):
line = clean_lines.elided[linenum]
endchar = line[pos]
if (endchar not in ')}]>'):
return (line, 0, (- 1))
if (endchar == ')'):
startchar = '('
if (endchar == ']'):
startchar = '['
if (endchar == '}'):
startch... |
def test_tasklet_fission_useless_statement():
def test_basic_tf(A: dace.float32, D: dace.float32):
B = dace.define_local_scalar(dace.float32)
C = dace.define_local([1], dace.float32)
with dace.tasklet:
(a << A[0])
(d << D[0])
(b >> B[0])
(c >> ... |
class BertTokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = BertTokenizer
def setUp(self):
super(BertTokenizationTest, self).setUp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
self.vocab... |
def require_vision(test_case):
if (not is_vision_available()):
return unittest.skip('test requires vision')(test_case)
else:
return test_case |
def rho_select(pad, lengths):
idx_ = (lengths - 1).view((- 1), 1).expand(pad.size(0), pad.size(2)).unsqueeze(1)
extracted = pad.gather(1, idx_).squeeze(1)
return extracted |
def print_autograd_prof_summary(prof, mode, sortby='cpu_time', topk=15):
valid_sortby = ['cpu_time', 'cuda_time', 'cpu_time_total', 'cuda_time_total', 'count']
if (sortby not in valid_sortby):
warn = 'WARNING: invalid sorting option for autograd profiler results: {}\nExpected `cpu_time`, `cpu_time_total... |
def _cycle_score(mol):
cycle_list = nx.cycle_basis(nx.Graph(rdmolops.GetAdjacencyMatrix(mol)))
if (len(cycle_list) == 0):
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if (cycle_length <= 6):
cycle_length = 0
else:
cycle_length = (cycle_lengt... |
class TrainArgs():
optimizer: OptimizerConfig
trainer: TrainerConfig
max_tune_length: int = 2048
data: str = 'tatsu-lab/alpaca'
data_cache_dir: str = 'cache/'
prompts: Optional[(Dict[(str, str)] | str)] = None
mask_inputs: bool = True
model_name_or_path: str = 'meta-llama/Llama-2-7b-hf'
... |
def MobileNet(input_shape=None, alpha=1.0, depth_multiplier=1, dropout=0.001, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000):
if (K.backend() != 'tensorflow'):
raise RuntimeError('Only TensorFlow backend is currently supported, as other backends do not support depthwise... |
class PorD_reg(atomic_reg):
OP_NAME = 'PorD'
_fields_ = [('cmd_short', ctypes.c_uint64, 1), ('op_code', ctypes.c_uint64, 16), ('cmd_id_dep', ctypes.c_uint64, 23), ('dbg_mode', ctypes.c_uint64, 1), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 5), ('opt_rq', ctypes.c_uint64, 1), ('tsk_opd_num'... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.