code stringlengths 101 5.91M |
|---|
def url_encode_stream(obj, stream=None, charset='utf-8', encode_keys=False, sort=False, key=None, separator=b'&'):
separator = to_native(separator, 'ascii')
gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
if (stream is None):
return gen
for (idx, chunk) in enumerate(gen):
if... |
class PDELU(torch.nn.Module):
__constants__ = ['num_parameters']
num_parameters: int
def __init__(self, num_parameters: int=1, init: float=1.0) -> None:
self.num_parameters = num_parameters
super(PDELU, self).__init__()
self.weight = Parameter(torch.Tensor(num_parameters).fill_(init)... |
def make_model(args, parent=False):
module = import_module(('model.' + args.base.lower()))
if (args.precision.find('fix') >= 0):
precision = int(args.precision[3:])
else:
precision = 12
QuantizeParams.bits_w = precision
QuantizeParams.bits_b = precision
QuantizeFeature.bits_f = p... |
def _requiredSize(shape, dtype):
return math.floor((np.prod(np.asarray(shape, dtype=np.uint64)) * np.dtype(dtype).itemsize)) |
def prepare_stage1_data(data):
split = []
for d in data:
if (d['type'] in ['medium', 'easy']):
table_id = d['table_id']
with open('{}/tables_tok/{}.json'.format(resource_path, table_id), 'r') as f:
table = json.load(f)
headers = [cell[0] for cell in ta... |
def validate_mx_curp(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(curp.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
... |
def do_replace(eval_ctx, s, old, new, count=None):
if (count is None):
count = (- 1)
if (not eval_ctx.autoescape):
return text_type(s).replace(text_type(old), text_type(new), count)
if (hasattr(old, '__html__') or (hasattr(new, '__html__') and (not hasattr(s, '__html__')))):
s = esca... |
def test_invalid_operation(testdir, hypothesis_max_examples, is_older_subtests):
testdir.make_test('\nlazy_schema = schemathesis.from_pytest_fixture("simple_schema")\n\_schema.parametrize()\ndef test_(request, case):\n request.config.HYPOTHESIS_CASES += 1\n', paths={'/valid': {'get': {'parameters': [{'type': 'in... |
def delete_error(file_name):
sessions = get_all_agent_sessions(file_name)
non_error_sessions = [sess for sess in sessions if (not sess['error'])]
with open((file_name + '.back'), 'a') as b_f:
for sess in sessions:
json.dump(sess, b_f)
b_f.write('\n')
with open(file_name, ... |
class XCLIPTextConfig(PretrainedConfig):
model_type = 'xclip_text_model'
def __init__(self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=77, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=... |
.parametrize('value', ('/', '\udc9b'))
def test_filter_path_parameters(value):
assert (not is_valid_path({'foo': value})) |
class STDCModule(BaseModule):
def __init__(self, in_channels, out_channels, stride, norm_cfg=None, act_cfg=None, num_convs=4, fusion_type='add', init_cfg=None):
super(STDCModule, self).__init__(init_cfg=init_cfg)
assert (num_convs > 1)
assert (fusion_type in ['add', 'cat'])
self.stri... |
def format_pbar_str(i, im_name):
pbar_prefix = (('(' + str(i)) + ') ')
width = (33 - len(pbar_prefix))
pretty_name = (pbar_prefix + (('...' + im_name[(- (width - 3)):]) if (len(im_name) > width) else im_name))
return pretty_name.rjust(33) |
def generate_distances_network_part4():
logging.info('Consolidating graphs...')
graphs_c = {}
layer = 0
while isPickle(('graphs-layer-' + str(layer))):
logging.info('Executing layer {}...'.format(layer))
graphs = restoreVariableFromDisk(('graphs-layer-' + str(layer)))
graphs_c[la... |
def func_3(mol, bits):
AllRingsBond = mol.GetRingInfo().BondRings()
ringSize = []
temp = {3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}
for ring in AllRingsBond:
nonsingle = False
for bondIdx in ring:
if (mol.GetBondWithIdx(bondIdx).GetBondType().name != 'SINGLE'):
... |
def test_docstring_with_python_OO():
instance = cls(param_1='xxx', param_2='yyy')
instance.__doc__ = None
instance = Substitution(param_1='xxx', param_2='yyy')(instance)
assert (instance.__doc__ is None) |
def test_regression_bipartite_change_stats(netfilename, outcomefilename, num_tests=DEFAULT_NUM_TESTS):
print('testing bipartite change stats for ', netfilename)
print('for ', num_tests, 'iterations...')
start = time.time()
g = BipartiteGraph(netfilename)
g.printSummary()
outcome_binvar = list(ma... |
def sample_patch(point: ee.Feature, patches_array: ee.Image, scale: float) -> ee.Feature:
arrays_samples = patches_array.sample(region=point.geometry(), scale=scale, projection='EPSG:3857', factor=None, numPixels=None, dropNulls=False, tileScale=12)
return arrays_samples.first().copyProperties(point) |
def gnn_iclr_test():
N = 5
K = 1
hidden_size = 230
model = gnn_iclr.GNN(N, hidden_size)
model.eval()
x_support = paddle.randn([1, 5, hidden_size])
x_query = paddle.randn([1, 80, hidden_size])
output = model(x_support, x_query, N, K, (N * 16))
print(output.shape)
print(output) |
def forward_loss(model, criterion, input, target, meter, train=False):
if getattr(FLAGS, 'normalize', False):
assert (getattr(FLAGS, 'ptcv_pretrained', False) or getattr(FLAGS, 'nvidia_pretrained', False) or getattr(FLAGS, 'hawq_pretrained', False))
if getattr(model, 'int_op_only', False):
... |
def get_writer():
global writer
if (not writer):
writer = SummaryWriter('./logs/cnn_mnist', flush_secs=5) |
class docRowType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, entry=None):
if (entry is None):
self.entry = []
else:
self.entry = entry
def factory(*args_, **kwargs_):
if docRowType.subclass:
return docRowType.subclass... |
def redact_netloc(netloc):
(netloc, (user, password)) = split_auth_from_netloc(netloc)
if (user is None):
return netloc
if (password is None):
user = '****'
password = ''
else:
user = urllib_parse.quote(user)
password = ':****'
return '{user}{password}{netloc}... |
def train_surrogate(model, dataset, sampling_rate=2.0, **kwargs):
(train_x, train_y, test_x, test_y) = (dataset['train_x'], dataset['train_y'], dataset['test_x'], dataset['test_y'])
is_continuous = dataset.get('is_continuous', None)
is_categorical = dataset.get('is_categorical', None)
is_integer = datas... |
def test_set_schema_path(monkeypatch):
monkeypatch.setattr(pyhf.schema.variables, 'schemas', pyhf.schema.variables.schemas, raising=True)
new_path = pathlib.Path('a/new/path')
pyhf.schema(new_path)
assert (pyhf.schema.path == new_path) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', default=None, type=str, required=True)
parser.add_argument('--base_model', default=None, type=str, required=True)
parser.add_argument('--lora_model', default='', type=str, help='If None, perform inference on the base mode... |
.core
.usefixtures('pandas_df_for_labelencoder', 'pandas_df_for_labelencoder_modified')
def test_label_encoder_with_null_values_pandas(pandas_df_for_labelencoder, pandas_df_for_labelencoder_modified):
encoder = LabelEncoder([LabelEncodingRule('item1'), LabelEncodingRule('item2')])
encoder.fit(pandas_df_for_labe... |
class ROUGE():
def __init__(self, tokenizer: Tokenizer=None) -> None:
if (tokenizer is None):
self.tokenizer = Tokenizer(word_delimiter=' ')
else:
self.tokenizer = tokenizer
def compute(self, predictions: List[str], references: List[List[str]], rouge_types: Union[(str, Li... |
def test_register_nonprocessor():
with pytest.raises(ProcessorRegisterException):
_processor('nonprocessor')
class NonProcessor():
pass |
class _OutputDuplicator(object):
def __init__(self, output):
assert (output in ['stdout', 'stderr'])
self.output = output
self._fds = []
self._original_output = getattr(sys, output)
setattr(sys, output, self)
def __del__(self):
setattr(sys, self.output, self._orig... |
.parametrize('action_dist, estimated_rewards_by_reg_model, description_1', valid_input_of_create_estimator_inputs)
.parametrize('alpha, n_bootstrap_samples, random_state, err, description_2', invalid_input_of_estimate_intervals)
def test_meta_estimate_intervals_using_invalid_input_data(action_dist, estimated_rewards_by... |
def run_parallel(x):
(target_feature, x, y, features, forest_model, model_weights, descriptor) = x
feat_idx = features.get_loc(target_feature)
p_path = 'data/p_{}_{}.npy'.format(descriptor, feat_idx)
if os.path.exists(p_path):
p_value = np.load(p_path)
print('p-value for {}: {}'.format(t... |
def reproduce_experiments(args):
model = init_model(args.model_name, args.credentials_path)
if (args.generate_tests_for == 'spider'):
(tests_df, databases) = step_0_1_get_spider_data(spider_input_path=args.spider_input_path)
else:
databases = step_0_get_proprietary_data(model_name=args.model... |
class FractionFieldToFunctionField(FunctionFieldVectorSpaceIsomorphism):
def _call_(self, f):
return self.codomain()._element_constructor_(f)
def section(self):
parent = Hom(self.codomain(), self.domain())
return parent.__make_element_class__(FunctionFieldToFractionField)(parent) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--train_data_file', default=None, type=str, required=True, help='The input training data file (a text file).')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions... |
class BoolBinopNode(ExprNode):
subexprs = ['operand1', 'operand2']
is_temp = True
operator = None
operand1 = None
operand2 = None
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
return PyrexTypes.independent_spanning_... |
_start_docstrings('Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ', BERT_START_DOCSTRING)
class BertForSequenceClassificationWithPabee(BertPreTrainedModel):
def __init__(self, config):
super().__i... |
class FailingTask(MockTask):
def __init__(self, message: str='', results=None):
super().__init__(results)
self.message = message
def run(self):
self.calls.append(())
raise ValueError(self.message) |
_grad()
def evaluate(model, graph, feat, pseudo, labels, train_idx, val_idx, test_idx, metric='acc'):
model.eval()
with th.no_grad():
pred = model(feat, pseudo, graph)
val_loss = cross_entropy(pred[val_idx], labels[val_idx])
test_loss = cross_entropy(pred[test_idx], labels[test_idx])
if (met... |
def get_transform(name='imagenet', input_size=None, scale_size=None, normalize=None, augment=True):
normalize = (normalize or __imagenet_stats)
if (name == 'imagenet'):
scale_size = (scale_size or 256)
input_size = (input_size or 224)
if augment:
return inception_preproccess(... |
class SSPP_LUT(BasicMachine):
def __init__(self, **kwargs):
BasicMachine.__init__(self, **kwargs)
self.optimizers = []
self.net_D = net_D(in_channels=3).to(self.device)
self.optimizer_D = torch.optim.RMSprop(self.net_D.parameters(), lr=self.args.lr)
self.optimizer = torch.opt... |
def decode_states(js_context):
def unpack(values):
return list(zip(*[(value['x'], value['y']) for value in values]))
state_names = [f.name for f in dataclasses.fields(State)]
data = {}
sample_times = None
for (idx, state_name) in enumerate(state_names):
state_data = js_context.eval(f... |
def make_item_catalog(inp: str, output_dir: str=C.ROOT):
if (not pathlib.Path(inp).exists()):
import requests
url = '
print(f'download {url} to {inp}.')
r = requests.get(url)
with open(inp, 'w') as f:
json.dump(r.json(), f, indent=2)
data = json.load(open(inp)... |
def parse_args():
parser = argparse.ArgumentParser('Argument for Self-Supervised Pre-training using Resolution Sequence Prediction (RSP)')
parser.add_argument('--print_freq', type=int, default=10, help='print frequency')
parser.add_argument('--save_freq', type=int, default=10, help='save frequency')
par... |
def display_args_to_z3(params):
i = 0
for p in params:
if (i > 0):
core_py.write(', ')
if (param_type(p) == STRING):
core_py.write(('_str_to_bytes(a%s)' % i))
else:
core_py.write(('a%s' % i))
i = (i + 1) |
class TestOptions(BaseOptions):
def __init__(self):
super(TestOptions, self).__init__()
self.parser.add_argument('--phase', type=str, default='test', help='phase for dataloading')
self.parser.add_argument('--num', type=int, default=5, help='number of outputs per image')
self.parser.a... |
class CNNEvaluation(object):
def __init__(self, gpu_num, epoch_num=50, dataset='cifar10', verbose=True, imgSize=64, batchsize=16, mask='center'):
self.gpu_num = gpu_num
self.epoch_num = epoch_num
self.dataset = dataset
self.verbose = verbose
self.imgSize = imgSize
sel... |
def build_CBLs(inplanes, planes, kernel_sizes, strides, paddings):
layers = []
for i in range(len(planes)):
if (i == 0):
inplanes = inplanes
else:
inplanes = planes[(i - 1)]
outplanes = planes[i]
stride = strides[i]
padding = paddings[i]
ke... |
def _get_redshifts_in_range(redshifts, z_low, z_high, bracket):
redshifts = np.array(redshifts)
redshifts.sort()
if bracket:
if ((z_low < redshifts.min()) or (z_high > redshifts.max())):
raise Exception('No redshifts to bracket range.')
z_low = redshifts[(redshifts <= z_low)][(- ... |
def test_ByteMaskedArray_RecordArray_NumpyArray():
a = ak.contents.bytemaskedarray.ByteMaskedArray(ak.index.Index(np.array([1, 0, 1, 0, 1], dtype=np.int8)), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']), valid_when=True)
assert (a.to... |
def download_cityscapes(path, username, password, overwrite=False):
_CITY_DOWNLOAD_URLS = [('gtFine_trainvaltest.zip', '99f532cb1af174f5fcc4c5bc8feea8c66246ddbc'), ('leftImg8bit_trainvaltest.zip', '2c0b77ce9933cc635adda307fbba5566f5d9d404')]
download_dir = (path / 'downloads')
download_dir.mkdir(parents=Tru... |
def run_inspect(pycharm_dir, src_dir, skip_pycharm_inspect=False):
out_tmp_dir = tempfile.mkdtemp()
fold_start('script.inspect')
if (not skip_pycharm_inspect):
cmd = [('%s/bin/inspect.sh' % pycharm_dir), src_dir, ('%s/PyCharm-inspection-profile.xml' % my_dir), out_tmp_dir, '-v2']
print(('$ %... |
def get_training_model(optimizer=tf.keras.optimizers.Adam()):
resnet50 = tf.keras.applications.ResNet50(weights=None, include_top=False)
model = tf.keras.Sequential([resnet50, GlobalAveragePooling2D(), Dropout(0.2), Dense(5)])
model.compile(optimizer=optimizer, loss=tf.keras.losses.SparseCategoricalCrossent... |
def detection_collate(batch):
(inputs, labels, video_idx, extra_data) = zip(*batch)
(inputs, video_idx) = (default_collate(inputs), default_collate(video_idx))
labels = torch.tensor(np.concatenate(labels, axis=0)).float()
collated_extra_data = {}
for key in extra_data[0].keys():
data = [d[ke... |
def random(dtype=float) -> Union[(float, int)]:
dtype = cook_dtype(dtype)
x = expr.Expr(_ti_core.make_rand_expr(dtype, _ti_core.DebugInfo(impl.get_runtime().get_current_src_info())))
return impl.expr_init(x) |
class SEBrain(sb.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
(noisy_wavs, lens) = batch.noisy_sig
noisy_feats = self.compute_feats(noisy_wavs)
mask = self.modules.model(noisy_feats)
predict_spec = torch.mul(mask, noisy_feats)
predict... |
def ToGraphMP(tspec, *args):
if (tspec == PNGraphMP):
return ToGraphMP_PNGraphMP(*args)
return None |
class DatasetCatalog(object):
DATA_DIR = 'datasets'
DATASETS = {'anet_cap_train': {'feature_path': os.path.join(ANET_FEATURES_PATH, 'anet-cap/anet_c3d.hdf5'), 'ann_file_path': os.path.join(ANNOTATIONS_PATH, 'anet-cap/train.json'), 'embeddings_path': os.path.join(EMBEDDINGS_PATH, 'glove.840B.300d.txt')}, 'anet_c... |
def test_call_if():
A = np.random.randint(1, 10, size=(10,), dtype=np.int32)
ref = np.copy(A)
for i in range(10):
if ((i % 2) == 0):
ref[i] += (2 * i)
else:
ref[i] += (3 * i)
sdfg = call_if.to_sdfg()
call_if(A)
assert np.array_equal(A, ref) |
def rescale_img(img, image_shape, current_scale_transform):
w = image_shape[2]
h = image_shape[1]
desired_h = (h * current_scale_transform)
desired_w = (w * current_scale_transform)
img = torchvision.transforms.Resize([int(desired_h), int(desired_w)])(img)
w_pad = ((w - (w * current_scale_transf... |
class SPPParameter(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SPPPARAMETER |
class COCOEvalCap():
def __init__(self, coco, cocoRes):
self.evalImgs = []
self.eval = {}
self.imgToEval = {}
self.coco = coco
self.cocoRes = cocoRes
self.params = {'image_id': coco.getImgIds()}
def evaluate(self):
imgIds = self.params['image_id']
... |
_model
def gluon_inception_v3(pretrained=False, **kwargs):
model = _inception_v3('gluon_inception_v3', pretrained=pretrained, **kwargs)
return model |
def det_QQ(n=300, num_bound=10, den_bound=10, system='sage'):
if (system == 'sage'):
A = random_matrix(QQ, n, n, num_bound=num_bound, den_bound=den_bound)
t = cputime()
d = A.determinant()
return cputime(t)
elif (system == 'magma'):
code = ('\nn := %s;\nA := MatrixAlgebra... |
class InfiniteAugmentedValuation(FinalAugmentedValuation, InfiniteInductiveValuation):
def __init__(self, parent, v, phi, mu):
FinalAugmentedValuation.__init__(self, parent, v, phi, mu)
InfiniteInductiveValuation.__init__(self, parent, phi)
_method
def value_group(self):
return self.... |
_module()
class PConvEncoderDecoder(nn.Module):
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = build_component(encoder)
self.decoder = build_component(decoder)
self.fp16_enabled = False
_fp16()
def forward(self, x, mask_in):
enc_outputs = self... |
def _setup_output_path(output_path: str) -> None:
path = Path(output_path).resolve()
if (not path.exists()):
path.mkdir(parents=True, exist_ok=True) |
def test_UnmaskedArray_NumpyArray():
v1 = json.loads('{"class":"UnmaskedArray","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","parameters":{},"form_key":null},"parameters":{},"form_key":null}')
v2 = ak.forms.from_dict(v1).to_dict()
assert (v2 == {'class': 'U... |
class TreeWalker(base.NonRecursiveTreeWalker):
def getNodeDetails(self, node):
if (node.nodeType == Node.DOCUMENT_TYPE_NODE):
return (base.DOCTYPE, node.name, node.publicId, node.systemId)
elif (node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE)):
return (base.TEXT, n... |
class SuperAlgebrasWithBasis(SuperModulesCategory):
def extra_super_categories(self):
return [self.base_category().Graded()]
class ParentMethods():
def graded_algebra(self):
from sage.algebras.associated_graded import AssociatedGradedAlgebra
return AssociatedGradedAlgebra... |
def parse_flow_transition_routes(flow_object, name_to_display_name):
transition = {'intent': {}, 'condition': {}, 'fulfillment': {}, 'flow': [], 'page': []}
for (i, transition_to) in enumerate(flow_object.transition_routes):
target = ''
if transition_to.target_flow:
target_flow = nam... |
.parametrize('ctx, func_name', ctxs)
.parametrize('axis', [0, 1, 2, (- 1), (- 2), (- 3)])
.parametrize('seed', [313])
def test_crelu_forward_backward(seed, axis, ctx, func_name):
from nbla_test_utils import cap_ignore_region, function_tester
rng = np.random.RandomState(seed)
inputs = [cap_ignore_region((rng... |
def simGetInt32Parameter(parameter):
ret = lib.simGetInt32Parameter(parameter)
_check_return(ret)
return ret |
class MT5ForConditionalGenerationWithLatentSpace(T5ForConditionalGenerationWithLatentSpace):
model_type = 'mt5'
config_class = MT5Config
_keys_to_ignore_on_load_missing = ['encoder\\.embed_tokens\\.weight']
_keys_to_ignore_on_save = ['encoder\\.embed_tokens\\.weight'] |
class ResNet1d(nn.Sequential):
def __init__(self, block, layers, kernel_size=3, num_classes=2, input_channels=3, inplanes=64, fix_feature_dim=True, kernel_size_stem=None, stride_stem=2, pooling_stem=True, stride=2, lin_ftrs_head=None, ps_head=0.5, bn_final_head=False, bn_head=True, act_head='relu', concat_pooling=T... |
class DistillTrainingArguments(TrainingArguments):
output_dir: Optional[str] = field(default=None, metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'})
per_device_train_batch_size: int = field(default=32, metadata={'help': 'Batch size per GPU/TPU core/CPU for t... |
class AlignmentStats(object):
def __init__(self, data_stream, vctk, configuration, device, model, results_path, experiment_name, alignment_subset):
self._data_stream = data_stream
self._vctk = vctk
self._configuration = configuration
self._device = device
self._model = model
... |
.parametrize('num_inducing_points', [(- 1), 0])
def test_build_svgp_raises_for_invalid_num_inducing_points(num_inducing_points: int) -> None:
(qp, obs) = mock_data()
data = mk_dataset(qp, obs)
search_space = (Box([0.0], [1.0]) ** qp.shape[(- 1)])
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
bui... |
class ImageNet12(object):
def __init__(self, trainFolder, testFolder, num_workers=8, pin_memory=True, size_images=224, scaled_size=256, type_of_data_augmentation='rand_scale', data_config=None):
self.data_config = data_config
self.trainFolder = trainFolder
self.testFolder = testFolder
... |
def benchmark_hnf(nrange, bits=4):
b = (2 ** bits)
for n in nrange:
a = random_matrix(ZZ, n, x=(- b), y=b)
t = cputime()
(h, _) = hnf(a, proof=False)
tm = cputime(t)
print(('%s,' % (('sage', n, bits, tm),))) |
def test_regular_string_string_valid():
strings = ak.to_regular([['abc', 'efg']], axis=2)
numbers = ak.to_regular([[['ab'], ['bc', 'de']]], axis=3)
(x, y) = ak.broadcast_arrays(strings, numbers, right_broadcast=False)
assert (x.tolist() == [[['abc'], ['efg', 'efg']]])
assert (y.tolist() == [[['ab'],... |
_utils.test(require=ti.extension.mesh)
def test_nested_mesh_for():
mesh_builder = ti.lang.mesh._TetMesh()
mesh_builder.faces.place({'a': ti.i32, 'b': ti.i32})
model = mesh_builder.build(ti.Mesh.load_meta(model_file_path))
def foo():
for f in model.faces:
for i in range(f.verts.size):... |
def include_paths(cuda: bool=False) -> List[str]:
lib_include = os.path.join(_TORCH_PATH, 'include')
paths = [lib_include, os.path.join(lib_include, 'torch', 'csrc', 'api', 'include'), os.path.join(lib_include, 'TH'), os.path.join(lib_include, 'THC')]
if (cuda and IS_HIP_EXTENSION):
paths.append(os.... |
class problem(Structure):
_names = ['l', 'n', 'y', 'x', 'bias']
_types = [c_int, c_int, POINTER(c_double), POINTER(POINTER(feature_node)), c_double]
_fields_ = genFields(_names, _types)
def __init__(self, y, x, bias=(- 1)):
if (len(y) != len(x)):
raise ValueError('len(y) != len(x)')
... |
('/chat', methods=['POST'])
_user_limiter.limit(None, methods=['POST'])
_limiter.limit(None, methods=['POST'])
def chat():
request_args = req_parser.parse_args()
logger.info('Input arguments received: %s', str(filter_nons(request_args)))
experiment_id = request_args['experiment_id']
new_user_utterance =... |
def create_cnn(width, height, depth, filters=(16, 32, 64), regress=False):
inputShape = (height, width, depth)
chanDim = (- 1)
inputs = Input(shape=inputShape)
for (i, f) in enumerate(filters):
if (i == 0):
x = inputs
x = Conv2D(f, (3, 3), padding='same')(x)
x = Activ... |
class OpenWhiskTestSequenceNodejs(unittest.TestCase, metaclass=TestSequenceMeta, benchmarks=benchmarks_nodejs, deployment_name='openwhisk', triggers=[Trigger.TriggerType.HTTP]):
def get_deployment(self, benchmark_name):
deployment_name = 'gcp'
assert cloud_config
deployment_client = self.cli... |
.ort
.gpu
def test_fast_mb(use_cpp_dispatcher):
with change_default(donnx.ONNXConv, 'cuDNN'), change_default(donnx.ONNXBatchNormalization, 'cuDNN'):
with torch.no_grad():
dace_inputs = torch.rand(8, 32, 224, 224).cuda()
torch_inputs = torch.clone(dace_inputs)
(block_params, g... |
def dist2bbox(distance, anchor_points, box_format='xyxy'):
(lt, rb) = torch.split(distance, 2, (- 1))
x1y1 = (anchor_points - lt)
x2y2 = (anchor_points + rb)
if (box_format == 'xyxy'):
bbox = torch.cat([x1y1, x2y2], (- 1))
elif (box_format == 'xywh'):
c_xy = ((x1y1 + x2y2) / 2)
... |
def convert_to_unicode(text):
def six_ensure_text(s, encoding='utf-8', errors='strict'):
if isinstance(s, six.binary_type):
return s.decode(encoding, errors)
elif isinstance(s, six.text_type):
return s
else:
raise TypeError(("not expecting type '%s'" % typ... |
class BasicBlock(nn.Module):
def __init__(self, in_channels, out_channels, expansion=1, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN')):
super(BasicBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_c... |
class RCToKRTBijectionTypeA2Odd(RCToKRTBijectionTypeA):
def next_state(self, height):
height -= 1
n = self.n
ell = ([None] * (2 * n))
b = None
last_size = 0
for a in range(height, n):
ell[a] = self._find_singular_string(self.cur_partitions[a], last_size)
... |
def dont_suppress_errors(function):
(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception:
traceback.print_exc()
raise
return wrapper |
def test_validate_series_lat_long(df_lat_long_column: pd.DataFrame) -> None:
srs_valid = validate_lat_long(df_lat_long_column['messy_lat_long'])
srs_check = pd.Series([True, True, True, True, True, True, False, True, True, True, False, False, False], name='messy_lat_long')
assert srs_check.equals(srs_valid) |
def test_populate_and_train_one_v1(save_path):
sp = os.path.join(save_path, '10X')
dataset = dataset_10x(dataset_name='cd4_t_helper', remove_extracted_data=True, save_path=sp)
unsupervised_training_one_epoch(dataset) |
class ConfigParser():
def __init__(self, args, options='', timestamp=True):
for opt in options:
args.add_argument(*opt.flags, default=None, type=opt.type)
args = args.parse_args()
self.args = args
if args.device:
os.environ['CUDA_VISIBLE_DEVICES'] = args.devic... |
def mol_ok(mol):
try:
Chem.SanitizeMol(mol)
target_size = ((size_stdev * np.random.randn()) + average_size)
if ((mol.GetNumAtoms() > 5) and (mol.GetNumAtoms() < target_size)):
return True
else:
return False
except:
return False |
class Demo(object):
def __init__(self):
config = flags.FLAGS
config.out_dir = os.path.join(config.out_base_dir, config.model_name, str(config.run_id).zfill(2))
config.max_sent_size = config.sent_size_th
config.max_num_sents = config.num_sents_th
config.max_ques_size = config.... |
def _make_integral_poly(exact_modulus, p, prec):
try:
return exact_modulus.change_ring(ZZ)
except TypeError:
return exact_modulus.change_ring(Zmod((p ** prec))).change_ring(ZZ) |
def main():
gui = ti.GUI('SDF Path Tracer', res)
last_t = 0
for i in range(50000):
render()
interval = 10
if (((i % interval) == 0) and (i > 0)):
print(f'{(interval / (time.time() - last_t)):.2f} samples/s')
last_t = time.time()
img = (color_buffer... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.