code stringlengths 101 5.91M |
|---|
def main():
print('Taichi system diagnose:')
print('')
executable = sys.executable
print(f'python: {sys.version}')
print(f'system: {sys.platform}')
print(f'executable: {executable}')
print(f'platform: {platform.platform()}')
print(f"architecture: {' '.join(platform.architecture())}")
... |
def ListOfType(ofType: type) -> ConfigListOfType.__class__:
return ConfigListOfType.buildWith(ofType) |
class LearnedRouter(torch.nn.Module):
def __init__(self, args: Arguments):
super().__init__()
self.args = args
self.layer = torch.nn.Linear(args.hidden_size, args.moe_num_experts, bias=False, dtype=common.dtype(args), device=args.device)
args.init_method(self.layer.weight)
def ji... |
def parse_table(raw_table: Dict[(str, Any)]) -> Table:
def get_cell_values(cells: List[dict]) -> List[Any]:
values = []
for cell in cells:
value = (cell['value'] if ('value' in cell) else np.nan)
if (('contamination_level' in cell) and (cell['contamination_level'] == 'strong'... |
class TFLongformerForSequenceClassification():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class LieAlgebraMorphism_from_generators(LieAlgebraHomomorphism_im_gens):
def __init__(self, on_generators, domain=None, codomain=None, check=True, base_map=None, category=None):
from sage.categories.lie_algebras import LieAlgebras
cm = get_coercion_model()
if (domain is None):
i... |
_module()
class EncHead(BaseDecodeHead):
def __init__(self, num_codes=32, use_se_loss=True, add_lateral=False, loss_se_decode=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2), **kwargs):
super(EncHead, self).__init__(input_transform='multiple_select', **kwargs)
self.use_se_loss = use... |
class ElectraForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_long_tail_partition(n_relations, n_machine):
assert (n_relations > 0), 'n_relations must be a positive number.'
assert (n_machine > 0), 'n_machine must be a positive number.'
partition_book = ([0] * n_relations)
part_id = 0
for i in range(n_relations):
partition_book[i] = part_id
... |
def load_lm():
(model_class, tokenizer_class, pretrained_weights) = (BertModel, BertTokenizer, 'bert-base-uncased')
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights)
return (model, tokenizer) |
def load_short2tb(filename):
short2tb = dict()
with open(filename) as infile:
for line in infile:
line = line.strip()
if (len(line) == 0):
continue
array = line.split()
assert (len(array) == 2)
short2tb[array[0]] = array[1]
... |
def f_saliency_whitebox_ebp(wb, im):
P = torch.zeros((1, wb.net.num_classes()))
P[0][0] = 1.0
img_saliency = wb.ebp(wb.net.preprocess(im.pil()), P)
if (np.max(img_saliency) == 255):
img_saliency = (img_saliency.astype(np.float32) / 255.0)
return np.array(_blend_saliency_map(np.array(im.pil()... |
def predictions(img):
x = preprocess_image(img)
start_time = timeit.default_timer()
output = model(x)
output = torch.squeeze(output, 0)
output = output.detach().cpu().numpy()
output = output.dot(255)
output *= (output.max() / 255.0)
return output |
def tidy_total(input_name):
global beam_size
import pickle as pk
dev_recs_loss = pk.load(open('dev_recs_loss.pkl', 'rb'))
with open(input_name, 'r') as stream, open('devset.recs.full.txt', 'w') as stream_1:
for (idx, line) in enumerate(stream):
data = line.strip().split('\t')
... |
def build_gaussian_distribution(action: ActionOutput) -> GaussianDistribution:
assert (action.logstd is not None)
return GaussianDistribution(loc=action.squashed_mu, std=action.logstd.exp(), raw_loc=action.mu) |
class VQAEval():
def __init__(self, vqa, vqaRes, n=2):
self.n = n
self.accuracy = {}
self.evalQA = {}
self.evalQuesType = {}
self.evalAnsType = {}
self.vqa = vqa
self.vqaRes = vqaRes
self.params = {'question_id': vqa.getQuesIds()}
self.contract... |
class FirstResBlockDiscriminator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(FirstResBlockDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1)
... |
def example():
task = generate_task(task_generator_id='picking')
world_params = dict()
world_params['skip_frame'] = 3
world_params['seed'] = 0
stable_baselines_policy_path = './model_2000000_steps.zip'
model = SAC.load(stable_baselines_policy_path)
def policy_fn(obs):
return model.pr... |
def test_backed_anndata_sparse(adata, save_path):
adata.X = csr_matrix(adata.X)
path = os.path.join(save_path, 'test_data2.h5ad')
adata.write_h5ad(path)
adata = anndata.read_h5ad(path, backed='r+')
adata_manager = generic_setup_adata_manager(adata, batch_key='batch')
bd = AnnTorchDataset(adata_m... |
class HeaderSet(collections_abc.MutableSet):
def __init__(self, headers=None, on_update=None):
self._headers = list((headers or ()))
self._set = set([x.lower() for x in self._headers])
self.on_update = on_update
def add(self, header):
self.update((header,))
def remove(self, h... |
.imputils.lower_constant(ArrayViewType)
def lower_const_Array(context, builder, viewtype, array):
return lower_const_view(context, builder, viewtype, array._numbaview) |
class KitchenEnv(BenchEnv):
def __init__(self, action_repeat=1, use_goal_idx=False, log_per_goal=False, control_mode='end_effector', width=64):
super().__init__(action_repeat, width)
self.use_goal_idx = use_goal_idx
self.log_per_goal = log_per_goal
with self.LOCK:
self._e... |
def basis_complement(B):
F = B.parent().base_ring()
m = B.nrows()
n = B.ncols()
C = MatrixSpace(F, (n - m), n, sparse=True)(0)
k = 0
l = 0
for i in range(m):
for j in range(k, n):
if (B[(i, j)] == 0):
C[(l, j)] = 1
l += 1
else:
... |
def maybe_parse_mpi_env_vars(args):
if (args.distributed_backend == 'mpi'):
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) |
def load_entry_point(dist, group, name):
return get_distribution(dist).load_entry_point(group, name) |
def run_index_pred_eval(args):
t0 = time()
run_index(args)
t1 = time()
run_pred(args)
t2 = time()
evaluate_recall(args)
print(('run_index: %.1f mins, run_pred: %.1f mins' % (((t1 - t0) / 60), ((t2 - t1) / 60)))) |
def inverse_cdf(u, dstar, dmin, dmax):
finv = y0(A(u, dstar, dmin, dmax))
return (((3 * dstar) / 2) * (finv - 1)) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', type=str, default='~/t7/ycb_video')
parser.add_argument('-v', '--video', type=str, default='0048')
args = parser.parse_args()
git_repo = Path(git.Repo(search_parent_directories=True).working_tree_dir)
sys.path.... |
def get_concept_id(code, description=None, require_exists=False):
if (description is None):
description = code
if (code not in code_to_concept_id_map):
assert (not require_exists)
code_to_concept_id_map[code] = (extra_code_offset + len(extra_codes))
extra_codes.append((code, desc... |
class DictAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if (nargs is not None):
raise ValueError('nargs not allowed')
super(DictAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_strin... |
class SimpleReplayBuffer(ReplayBuffer):
def __init__(self, max_replay_buffer_size, observation_dim, action_dim, env_info_sizes):
self._observation_dim = observation_dim
self._action_dim = action_dim
self._max_replay_buffer_size = max_replay_buffer_size
self._observations = np.zeros((... |
class RandomSampler(Sampler):
def __init__(self, data_source, replacement=False, num_samples=None):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
if (not isinstance(self.replacement, bool)):
raise ValueError('replacement sho... |
def get_equal_len_datasets(dataset1, dataset2):
if (len(dataset1) > len(dataset2)):
rand_idxs = np.random.choice(range(len(dataset1)), size=len(dataset2), replace=False)
subsample_dataset(dataset1, rand_idxs)
elif (len(dataset2) > len(dataset1)):
rand_idxs = np.random.choice(range(len(da... |
(scope='function')
def default_backend(backend):
pyhf.set_backend(*backend, default=True)
(yield backend) |
_numpy_output(check_dtype=True)
def test_ufunc_accumulate_nested_call(Z: dace.complex64[(10, 10)]):
return np.add.accumulate(np.absolute(Z)) |
def test_pydoc():
import pybind11_tests
import pydoc
assert (pybind11_tests.__name__ == 'pybind11_tests')
assert (pybind11_tests.__doc__ == 'pybind11 test module')
assert pydoc.text.docmodule(pybind11_tests) |
class MultiAgentEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, world, reset_callback=None, reward_callback=None, observation_callback=None, info_callback=None, done_callback=None, discrete_action=False, shared_viewer=True, cam_range=1):
self.world = world
se... |
def register_Ns3EpcX2ResourceStatusUpdateHeader_methods(root_module, cls):
cls.add_constructor([param('ns3::EpcX2ResourceStatusUpdateHeader const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('Ge... |
def test_integer_data(datadir, mocker):
with open(datadir.joinpath('workspace_integer_data.json'), encoding='utf-8') as spec_file:
spec = json.load(spec_file)
channel_spec = spec['channels'][0]
mocker.patch('pyhf.writexml._ROOT_DATA_FILE')
channel = pyhf.writexml.build_channel(spec, channel_spec... |
class SpecFunctor(Functor, UniqueRepresentation):
def __init__(self, base_ring=None):
from sage.categories.commutative_rings import CommutativeRings
from sage.categories.schemes import Schemes
if (base_ring is None):
domain = CommutativeRings()
codomain = Schemes()
... |
def plot_all_learning_curves_for_third(**kwargs):
for exp in kwargs['exps']:
prefix = ''
exp_attrs = EXP_ATTRS[exp](exp)
for auc_or_final in kwargs['auc_or_final']:
for sp in kwargs['sp_list']:
save_dir = os.path.join('pdf_plots', 'all_third_learning_curves', auc_... |
def _parse_local_version(local):
if (local is not None):
return tuple(((part.lower() if (not part.isdigit()) else int(part)) for part in _local_version_seperators.split(local))) |
def read_data_json(split_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
with open(split_json) as f:
split_data = json.load(f)
print('read_data_json', split_json, len(split_data))
for interaction_data in split_data:
db_id = interaction_data[... |
def list_to_2d_float_array(flst, width, height):
return np.reshape(np.asarray(flst, np.float32), (height, width)) |
def generate_all_logical_forms_alpha(entity: str, domains: List[str]=None, offline=True):
def r_in_domains(domains0, r0):
for domain in domains0:
if (r0 in domain_dict_relations[domain]):
return True
return False
if offline:
if (entity in in_relations):
... |
class MFCC(torch.nn.Module):
def __init__(self, deltas=True, context=True, requires_grad=False, sample_rate=16000, f_min=0, f_max=None, n_fft=400, n_mels=23, n_mfcc=20, filter_shape='triangular', param_change_factor=1.0, param_rand_factor=0.0, left_frames=5, right_frames=5, win_length=25, hop_length=10):
su... |
def test_cross_module_calls():
import pybind11_cross_module_tests as cm
v1 = m.LocalVec()
v1.append(m.LocalType(1))
v2 = cm.LocalVec()
v2.append(cm.LocalType(2))
assert (m.return_self(v1) is v1)
assert (cm.return_self(v2) is v2)
assert (m.return_self(v2) is v2)
assert (cm.return_self... |
def compute_curl(c, a, work, T, K):
curl_hat = work[(a, 0, False)]
curl_hat = cross2(curl_hat, K, a)
c = T.backward(curl_hat, c)
return c |
class HumanoidTruncatedObsEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'humanoid.xml', 5)
utils.EzPickle.__init__(self)
def _get_obs(self):
data = self.sim.data
return np.concatenate([data.qpos.flat[2:], data.qvel.flat])
d... |
def main():
logging.basicConfig()
logger.setLevel(logging.INFO)
args = parse_args()
voxceleb1 = Path(args.voxceleb1)
assert voxceleb1.is_dir()
save_to = Path(args.save_to)
save_to.mkdir(exist_ok=True, parents=True)
logger.info('Preparing preprocessor')
preprocessor = problem.Preproce... |
def _Workspace_create_net_with_exception_intercept(ws, net, overwrite=False):
return CallWithExceptionIntercept(ws._create_net, ws._last_failed_op_net_position, GetNetName(net), StringifyProto(net), overwrite) |
def get_adversarial_losses_fn(mode):
if (mode == 'gan'):
return get_gan_losses_fn()
elif (mode == 'hinge_v1'):
return get_hinge_v1_losses_fn()
elif (mode == 'hinge_v2'):
return get_hinge_v2_losses_fn()
elif (mode == 'lsgan'):
return get_lsgan_losses_fn()
elif (mode ==... |
class TestGetGPTQConfig(BasePytorchTest):
def __init__(self, unit_test, quantization_method=QuantizationMethod.SYMMETRIC, rounding_type=RoundingType.STE, train_bias=False, quantization_parameters_learning=False):
super().__init__(unit_test)
self.quantization_method = quantization_method
self... |
.parametrize('testcase_seed', [' float_0 = 1.1\n var_0 = module_0.positional_only(float_0)\n', ' float_0 = 1.1\n int_0 = 42\n list_0 = []\n str_0 = "test"\n bytes_0 = b"key"\n str_1 = "value"\n dict_0 = {bytes_0: str_1}\n var_0 = module_0.all_params(float_0, int_0, *list_0, param4=str_0, *... |
def _lazy_init():
global _initialized, _cudart, _original_pid, _queued_calls
if _initialized:
return
if _in_bad_fork:
from sys import version_info
if (version_info < (3, 4)):
msg = "To use CUDA with multiprocessing, you must use Python 3.4+ and the 'spawn' start method"
... |
class TestBufferOptions(CythonTest):
def nonfatal_error(self, error):
self.error = error
self.assertTrue(self.expect_error)
def parse_opts(self, opts, expect_error=False):
assert (opts != '')
s = (u'def f():\n cdef object[%s] x' % opts)
self.expect_error = expect_error
... |
class ConvertMat2UA():
def run(mat_folder, save_folder):
if (not os.path.exists(mat_folder)):
raise FileNotFoundError(('cannot find file ' + mat_folder))
if (not os.path.exists(save_folder)):
os.mkdir(save_folder)
print('create {}'.format(save_folder))
pri... |
def preprocess_image(img: np.ndarray, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) -> torch.Tensor:
preprocessing = Compose([ToTensor(), Normalize(mean=mean, std=std)])
return preprocessing(img.copy()).unsqueeze(0) |
def test_range_proof_outside():
group = EcGroup()
x = Secret(value=15)
randomizer = Secret(value=group.order().random())
(g, h) = make_generators(2, group)
lo = 0
hi = 14
com = ((x * g) + (randomizer * h))
with pytest.raises(Exception):
stmt = RangeStmt(com.eval(), g, h, lo, hi, ... |
class TFSpeech2TextForConditionalGeneration(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class ExpandCuTensor(ExpandTransformation):
environments = [environments.cuTensor]
def expansion(node, parent_state, parent_sdfg):
(left_tensor, right_tensor, out_tensor) = node.validate(parent_sdfg, parent_state)
dtype = out_tensor.dtype.base_type
(func, cuda_type, _) = blas_helpers.cub... |
class ProxyRecommender(RecMixin, BaseRecommenderModel):
_charger
def __init__(self, data, config, params, *args, **kwargs):
self._random = np.random
self._params_list = [('_name', 'name', 'name', '', None, None), ('_path', 'path', 'path', '', None, None)]
self.autoset_params()
if... |
class ParseExpression(ParserElement):
def __init__(self, exprs, savelist=False):
super(ParseExpression, self).__init__(savelist)
if isinstance(exprs, _generatorType):
exprs = list(exprs)
if isinstance(exprs, basestring):
self.exprs = [ParserElement._literalStringClass... |
class Batch_generator(data.Dataset):
def __init__(self, nb_answer, img_dir, que_dir, prep_dir, mode='train'):
self.mode = mode
self.img_dir = img_dir
self.nb_answer = nb_answer
self.top_answer = json.load(open(os.path.join(prep_dir, 'ans2idx_1500.json')))
self.word2idx = json... |
def register_Ns3DsssParameterSet_methods(root_module, cls):
cls.add_constructor([param('ns3::DsssParameterSet const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DeserializeInformationField', 'uint8_t', [param('ns3::Buffer::Iterator', 'start'), param('uint8_t', 'length')], is_virtual=True)
cls.a... |
def make_term_args(arg_shapes, arg_kinds, arg_types, ats_mode, domain, material_value=None, poly_space_base=None):
from sfepy.base.base import basestr
from sfepy.discrete import FieldVariable, Material, Variables, Materials
from sfepy.discrete.fem import Field
from sfepy.solvers.ts import TimeStepper
... |
def convert_to_number_type_regex_string(x, number_type):
if (number_type == 'numeral'):
return (str(x) + '(?![A-Za-z0-9\'"])')
if (number_type == 'roman_upper'):
return (write_roman(x) + '(?![A-Za-z0-9\'"])')
elif (number_type == 'roman_lower'):
return (write_roman(x).lower() + '(?![... |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', type=str, required=True, help='Path to data')
parser.add_argument('--train-labels-path', type=str, required=True, help='Path to train labels')
parser.add_argument('--train-path', type=str, help='Path to training data (if p... |
def scale_input_and_detect_single(index, X):
X_transformed = pd.DataFrame(index=[index], columns=xset, data=scaler.transform(X))
(Yhat, error, temp, _) = autoencoder.detect(X_transformed, theta=theta, window=1, average=True)
return (Yhat, error, temp) |
('Moving nightly files into repo')
def move_nightly_files(spdir, platform):
source_dir = os.path.join(spdir, 'torch')
target_dir = os.path.abspath('torch')
listing = _get_listing(source_dir, target_dir, platform)
if platform.startswith('win'):
_copy_files(listing, source_dir, target_dir)
els... |
def vars_info_vl(var_list):
return (' ' + '\n '.join(['{} : {}'.format(v.name, get_shape(v)) for v in var_list])) |
class BertForMultiLabelSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
self.classifier = torch.nn.Li... |
def test_vilain_muc():
for (key, response, expected) in VILAIN95:
assert (_get_muc_prf(key, response) == expected) |
def write_splits(out_directory, snippets, splits):
total_weight = sum((split.weight for split in splits))
divs = []
subtotal = 0.0
for split in splits:
divs.append(int(((len(snippets) * subtotal) / total_weight)))
subtotal = (subtotal + split.weight)
divs.append(len(snippets))
fo... |
def dump_file(json_obj, output_path):
with open(output_path, 'w') as json_file:
json.dump(json_obj, json_file) |
class TestKernels(unittest.TestCase):
def test_kernels(self):
k = np.array([[6., (- 0.), (- 0.), (- 1.677099)], [(- 0.), 6., 2., 0.], [(- 0.), 2., 1., (- 0.1555163)], [(- 1.677099), 0., (- 0.1555163), 1.]])
k1 = np.array([[4., (- 3.), (- 0.), (- 1.)], [(- 3.), 3., 0.5848329, 1.], [(- 0.), 0.5848329,... |
_function
def _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
assert (isinstance(x, torch.Tensor) and (x.ndim == 4))
(fu_w, fu_h) = _get_filter_size(fu)
(fd_w, fd_h) = _get_filter_size(fd)
if (b is not None):
a... |
class constrained_by_normal(constrained_paramset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pdf_type = 'normal'
self.auxdata = kwargs.pop('auxdata')
sigmas = kwargs.pop('sigmas', None)
if sigmas:
self.sigmas = sigmas
def width(self):
... |
def sample_data(dump_paths, para=False, doc_sample_ratio=0.2, vec_sample_ratio=0.2, seed=29, max_norm=None, max_norm_cf=1.3, num_dummy_zeros=0, norm_th=999):
vecs = []
random.seed(seed)
np.random.seed(seed)
print('sampling from:')
for dump_path in dump_paths:
print(dump_path)
dumps = [h5... |
def undo_filter_average(filter_unit, scanline, previous, result):
ai = (- filter_unit)
for i in range(len(result)):
x = scanline[i]
if (ai < 0):
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = ((x + ((a + b) >> 1)) & 255)
ai += 1 |
def list_pretrained_tag_models(tag: str):
models = []
for k in _PRETRAINED.keys():
if (tag in _PRETRAINED[k]):
models.append(k)
return models |
def single_prompt_helper(keywords_lst, keywords_dict, fnc, chosen_nums):
counter = 1
chosen_keywords_lst = []
chosen_replacements_lst = []
for i in range(0, len(keywords_lst)):
if (counter <= max(chosen_nums)):
keyword = keywords_lst[i]
keyword_pos = keywords_dict[keyword... |
class ModelDownloader():
def __init__(self, model_env_name='CAFFE2_MODELS'):
self.model_env_name = model_env_name
def _model_dir(self, model):
caffe2_home = os.path.expanduser(os.getenv('CAFFE2_HOME', '~/.caffe2'))
models_dir = os.getenv(self.model_env_name, os.path.join(caffe2_home, 'mo... |
def run_data_preprocessing(args):
vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{args.bert_type}.txt')
tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=args.do_lower_case)
for set_name in ['dev', 'test', 'train']:
new_filename = (path_wikisql + ('%s_tok_processed.pkl' % ... |
class TFMobileViTOutput(tf.keras.layers.Layer):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None:
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(hidden_size, name='dense')
self.dropout = tf.keras.layers.Dropout(config.hidd... |
def register_Ns3CallbackImplBase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsEqual', 'bool', [param('ns3::Pt... |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data-root', '-d', required=True, type=str)
parser.add_argument('--output-manifest-root', '-m', required=True, type=str)
parser.add_argument('--lang', '-l', required=True, type=str)
parser.add_argument('--convert-to-wav', action='s... |
_model
def resnext101_64x4d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['resnext101_32x4d']
model = ResNet(Bottleneck, [3, 4, 23, 3], cardinality=64, base_width=4, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretraine... |
class ResNetGenerator64(nn.Module):
def __init__(self, z_dim=256, n_label=10, im_size=64, im_chan=3, embed_size=256, nfilter=64, nfilter_max=512, actvn=F.relu, distribution='normal', bottom_width=4):
super(ResNetGenerator64, self).__init__()
self.num_features = num_features = nfilter
self.di... |
def is_dir_url(link):
link_path = url_to_path(link.url_without_fragment)
return os.path.isdir(link_path) |
def make_linear_2d(x_coef, y_coef, const):
equation = []
if ((x_coef == 0) and (y_coef == 0)):
return f'0={frac_to_str(const)}'
if (x_coef != 0):
if (x_coef < 0):
equation.append('-')
if (abs(x_coef) != 1):
equation.append(f'{abs(x_coef)}')
equation.ap... |
class ParallelMode(Enum):
NOT_PARALLEL = 'not_parallel'
NOT_DISTRIBUTED = 'not_distributed'
DISTRIBUTED = 'distributed'
SAGEMAKER_MODEL_PARALLEL = 'sagemaker_model_parallel'
SAGEMAKER_DATA_PARALLEL = 'sagemaker_data_parallel'
TPU = 'tpu' |
_utils.test()
def test_cross_scope_matrix():
a = ti.Matrix([[1, 2], [3, 4]])
def foo() -> ti.types.vector(4, ti.i32):
return ti.Vector([a[(0, 0)], a[(0, 1)], a[(1, 0)], a[(1, 1)]])
assert (foo() == [1, 2, 3, 4]).all() |
def log_mixture_nb(x: torch.Tensor, mu_1: torch.Tensor, mu_2: torch.Tensor, theta_1: torch.Tensor, theta_2: torch.Tensor, pi_logits: torch.Tensor, eps=1e-08):
if (theta_2 is not None):
log_nb_1 = log_nb_positive(x, mu_1, theta_1)
log_nb_2 = log_nb_positive(x, mu_2, theta_2)
else:
theta =... |
class TestTextClsIO(object):
def test_chip_ctc(self):
io = TextClsIO(is_tokenized=False, tokenize_callback=jieba.tokenize, text_key='text', mapping={' ': ''}, encoding='utf-8')
train_data = io.read('data/cblue/CHIP-CTC/CHIP-CTC_train.json')
dev_data = io.read('data/cblue/CHIP-CTC/CHIP-CTC_de... |
_model
def tf_efficientnet_b2(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model |
class TFFunnelPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class DPRReaderTokenizerFast():
def __init__(self, *args, **kwargs):
requires_tokenizers(self)
def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) |
class ExactThompsonSampler(ThompsonSampler[ProbabilisticModel]):
def sample(self, model: ProbabilisticModel, sample_size: int, at: TensorType, select_output: Callable[([TensorType], TensorType)]=select_nth_output) -> TensorType:
tf.debugging.assert_positive(sample_size)
tf.debugging.assert_shapes([(... |
def test_line_visit():
tracer = ExecutionTracer()
tracer.current_thread_identifier = threading.current_thread().ident
tracer.track_line_visit(42)
tracer.track_line_visit(43)
tracer.track_line_visit(42)
assert (tracer.get_trace().covered_line_ids == OrderedSet([42, 43])) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.