code stringlengths 101 5.91M |
|---|
class ModelTemplate(metaclass=ABCMeta):
def __init__(self, token_emb_mat, glove_emb_mat, tds, cds, tl, scope):
self.scope = scope
self.global_step = tf.get_variable('global_step', shape=[], dtype=tf.int32, initializer=tf.constant_initializer(0), trainable=False)
(self.token_emb_mat, self.glo... |
def pll(minimizer, loss, pois) -> float:
with ExitStack() as stack:
for p in pois:
param = p.parameter
stack.enter_context(param.set_value(p.value))
param.floating = False
if any((param_loss.floating for param_loss in loss.get_params())):
minimum = min... |
def translate_double_cpp_exception(code, pos, lhs_type, lhs_code, rhs_code, lhs_exc_val, assign_exc_val, nogil):
(handle_lhs_exc, lhc_check_py_exc) = get_exception_handler(lhs_exc_val)
(handle_assignment_exc, assignment_check_py_exc) = get_exception_handler(assign_exc_val)
code.putln('try {')
code.putln... |
def soft_unicode(s):
from markupsafe import soft_unicode
warnings.warn("'jinja2.utils.soft_unicode' will be removed in version 3.0. Use 'markupsafe.soft_unicode' instead.", DeprecationWarning, stacklevel=2)
return soft_unicode(s) |
def dla46_c(pretrained=None, **kwargs):
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1], [16, 32, 64, 64, 128, 256], block=Bottleneck, **kwargs)
if (pretrained is not None):
model.load_pretrained_model(data='imagenet', name='dla46_c', hash='2bfd52c3')
return model |
.parametrize('hidden_size,sparse_feature_num', [((3,), 6)])
def test_FLEN(hidden_size, sparse_feature_num):
model_name = 'FLEN'
sample_size = SAMPLE_SIZE
(x, y, feature_columns) = get_test_data(sample_size, embedding_size=2, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num, use_gr... |
def test_adagrad_elastic_hinge(bin_train_data):
(X_bin, y_bin) = bin_train_data
clf = AdaGradClassifier(alpha=0.5, l1_ratio=0.85, n_iter=10, random_state=0)
clf.fit(X_bin, y_bin)
assert (not hasattr(clf, 'predict_proba'))
assert (clf.score(X_bin, y_bin) == 1.0) |
def _check_bn_apply(module, flag):
if issubclass(module.__class__, nn.modules.batchnorm._BatchNorm):
flag[0] = True |
def plot_video(pred_json, gt_json, save_dir_i, fig_num=None, template=True):
duration = (gt_json['duration'] - clip_len)
(t_min, t_max) = (0, duration)
x = np.arange(t_min, t_max, clip_len)
if (fig_num is None):
fig_num = round((duration / gap))
(fig, axs) = plt.subplots(nrows=1, ncols=fig_n... |
class LoggingBase():
def __init__(self):
uuid_name = str(uuid.uuid4())[0:4]
if hasattr(self, 'typename'):
self.log_name = f'{self.typename()}-{uuid_name}'
else:
self.log_name = f'{self.__class__.__name__}-{uuid_name}'
self._logging = logging.getLogger(self.log... |
class Ellipse(GraphicPrimitive):
def __init__(self, x, y, r1, r2, angle, options):
self.x = float(x)
self.y = float(y)
self.r1 = float(r1)
self.r2 = float(r2)
if ((self.r1 <= 0) or (self.r2 <= 0)):
raise ValueError('both radii must be positive')
self.angle... |
.parametrize('reducer_class', [Sum, Product])
def test_sum_and_product_for_single_builder(reducer_class: type[(Sum[ProbabilisticModel] | Product[ProbabilisticModel])]) -> None:
(data, models) = ({TAG: empty_dataset([1], [1])}, {TAG: QuadraticMeanAndRBFKernel()})
acq = reducer_class(_Static((lambda x: (x ** 2)))... |
class TFMobileViTTransformer(tf.keras.layers.Layer):
def __init__(self, config: MobileViTConfig, hidden_size: int, num_stages: int, **kwargs) -> None:
super().__init__(**kwargs)
self.layers = []
for i in range(num_stages):
transformer_layer = TFMobileViTTransformerLayer(config, h... |
def xml2txt(file_path):
output = file_path.replace('.xml', '_text.txt')
sent_list = []
with open(file_path, 'rb') as f:
raw = f.read()
root = etree.fromstring(raw)
for sentence in root:
sent = sentence.find('text').text
terms = sentence.find('aspectTerms')
... |
class CategoricalMLPPolicy(StochasticPolicy, LayersPowered, Serializable):
def __init__(self, name, env_spec, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, prob_network=None):
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
with tf.variable... |
def test_parse_endifloop():
p = sqlparse.parse('END IF')[0]
assert (len(p.tokens) == 1)
assert (p.tokens[0].ttype is Keyword)
p = sqlparse.parse('END IF')[0]
assert (len(p.tokens) == 1)
p = sqlparse.parse('END\t\nIF')[0]
assert (len(p.tokens) == 1)
assert (p.tokens[0].ttype is Keyword)... |
class IALLoss(nn.Module):
def __init__(self, device, temperature=0.05, alpha=0.5):
super(IALLoss, self).__init__()
self.temp = 1.0
self.alpha = alpha
self.device = device
self.zoom = 0.1
def forward(self, src_emb, ref_emb, data_dict):
src_emb = F.normalize(src_emb... |
class MLFNBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride, fsm_channels, groups=32):
super(MLFNBlock, self).__init__()
self.groups = groups
mid_channels = (out_channels // 2)
self.fm_conv1 = nn.Conv2d(in_channels, mid_channels, 1, bias=False)
self.fm_bn... |
def train_dev_split(data_dir):
data_dir = os.path.abspath(data_dir)
print(f'data_dir: {data_dir}')
for dataset_dir_name in ['ag_news_csv', 'dbpedia_csv', 'yelp_review_full_csv']:
dataset_dir = os.path.join(data_dir, dataset_dir_name)
print(f'''
dataset_dir: {dataset_dir}''')
input_pa... |
class DirectPlannerDestOneSided(MulticastDirectPlanner):
def plan(self, jobs: List[TransferJob]) -> TopologyPlan:
src_region_tag = jobs[0].src_iface.region_tag()
dst_region_tags = [iface.region_tag() for iface in jobs[0].dst_ifaces]
for job in jobs[1:]:
assert (job.src_iface.regi... |
def replicate_variables_to_devices(meta_graph_def, worker_device, num_replicas_per_worker):
var_op_name_to_original_device_str = {}
for node in meta_graph_def.graph_def.node:
if ('Variable' in node.op):
var_op_name_to_original_device_str[node.name] = node.device
sparse_var_op_names = []
... |
class ReturnnDatasetResetMpSharedEpochCallback():
def __init__(self, dataset: ReturnnDataset, epoch_mp_shared: torch.multiprocessing.Value):
self.dataset = dataset
self.epoch_mp_shared = epoch_mp_shared
def __call__(self):
epoch = self.epoch_mp_shared.value
self.dataset.init_seq_... |
def assert_topology(ref_outputs, outputs):
for (ref_f, f) in zip(iterate_function(ref_outputs, 'left'), iterate_function(outputs, 'right')):
assert (ref_f.name == f.name)
assert (ref_f.arguments == f.arguments)
for (ref_v, v) in zip(ref_f.inputs, f.inputs):
assert (ref_v.d.shape ... |
_test(assert_ii_1=False)
def test_xilinx_decoupled_array_interfaces():
with set_temporary('compiler', 'xilinx', 'decouple_array_interfaces', value=True):
return run_atax(dace.dtypes.DeviceType.FPGA) |
def test_to_jams():
default_clipid = 'a001'
dataset = tut2017se.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
jam = clip.to_jams()
assert jam.validate()
events = jam.search(namespace='segment_open')[0]['data']
assert (len(events) == 3)
assert np.allclose(events[0].time, 1.5... |
def pad(x):
max_pad = 2
padd = (np.random.rand(4) * max_pad).astype(int)
x = np.pad(x, ((padd[0], padd[1]), (padd[2], padd[3]), (0, 0)), mode='constant', constant_values=0)
x = skimage.transform.resize(x, (32, 32), preserve_range=True)
return x |
def detect_face(face_detector: Any, detector_backend: str, img: np.ndarray, align: bool=True) -> tuple:
obj = detect_faces(face_detector, detector_backend, img, align)
if (len(obj) > 0):
(face, region, confidence) = obj[0]
else:
face = None
region = [0, 0, img.shape[1], img.shape[0]]... |
def tag_visible(element):
ignore = {'style', 'script', 'head', 'title', 'meta', '[document]'}
return ((element.parent.name not in ignore) and (not isinstance(element, Comment))) |
def parse_textgrid(filename):
tg = textgrid.TextGrid.fromFile(filename)
list_words = tg.getList('words')
words_list = list_words[0]
result = []
for ele in words_list:
d = parse_Interval(ele)
result.append(d)
return result |
class datasetSampler():
def __init__(self, config, source, target, gridSze, numberOfSamples=None):
self.gtPath = formatDirPath(source)
self.targetPath = formatDirPath(target)
self.numberOfSamples = numberOfSamples
print('number of data samples to be processed', self.numberOfSamples)
... |
def contains_each(V, B):
for b in B:
if (not (b in V)):
return False
return True |
def deprecate_function(new_function, old_name, removal_version=None, new_location=None, future_warn=False, error=False):
new_name = new_function.__name__
if (new_location is not None):
new_name = f'{new_location}.{new_name}'
message = f'{old_name} has been deprecated, please use {new_name}.'
if ... |
class TestSequenceGenerator(TestSequenceGeneratorBase):
def setUp(self):
(self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model) = test_utils.sequence_generator_setup()
self.sample = {'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}}
def test_with_normalization(... |
class McnetKernelCombEnvironment(BidirectionalPredictionEnvironment):
def name(self):
return 'McnetKernelCombEnvironment'
def create_generator(self, shallow, gf_dim, c_dim, ks, num_block, layers, kf_dim, enable_res, rc_loc):
if shallow:
generator = generators.KernelShallowGenerator(g... |
def save_config(cfg, path, args):
if args.is_main_process():
with open(path, 'w') as f:
f.write(cfg.dump()) |
class TestPCovFPS(unittest.TestCase):
def setUp(self):
(self.X, self.y) = get_dataset(return_X_y=True)
self.idx = [0, 2, 6, 7, 1, 3, 4]
def test_restart(self):
selector = PCovFPS(n_to_select=1, initialize=self.idx[0])
selector.fit(self.X, y=self.y)
for i in range(2, len(s... |
def gt_label2entity(gt_infos):
gt_entities = []
for gt_info in gt_infos:
line_entities = []
label = gt_info['label']
for (key, value) in label.items():
for (_, places) in value.items():
for place in places:
line_entities.append([key, place[... |
class NearestNDInterpolator(NDInterpolatorBase):
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale, need_contiguous=False, need_values=False)
if (tree_options is None):
tree_options = dict()
self.tree = cKDTree(sel... |
class CifarGatv2Config(CifarConfig):
def __init__(self, num_samples, hidden) -> None:
super().__init__(num_samples=num_samples)
self.hidden = hidden
def model(self, hparams):
return Gatv2CifarNet(self.hidden, num_graph_layers=4, residual=True, readout='mean', dropout=hparams['dropout'])
... |
def _hash_label(label, digest_size=32):
return blake2b(label.encode('ascii'), digest_size=digest_size).hexdigest() |
def teardown_test():
if _error_on_warnings:
warnings.resetwarnings()
warnings.simplefilter('default') |
class Snake(nn.Module):
def __init__(self, in_features, a=None, trainable=True):
super(Snake, self).__init__()
self.in_features = (in_features if isinstance(in_features, list) else [in_features])
if (a is not None):
self.a = Parameter((torch.ones(self.in_features) * a))
e... |
class RandomDataset(torch.utils.data.Dataset):
def __init__(self, n_examples: int=64, window_size: int=32, split: str='train', transforms: List=[]):
self.n_examples = n_examples
self.split = split
self.data = [dict(images=torch.rand(window_size, 3, 200, 200), observations=torch.rand(window_s... |
.script
def assorted_types_args_kwargs(tensor_arg: Tensor, str_arg: str, int_arg: int, tensor_kwarg: Tensor=torch.tensor([2, 2]), str_kwarg: str='str_kwarg', int_kwarg: int=2):
return ((tensor_arg + tensor_kwarg), (str_arg + str_kwarg), (int_arg + int_kwarg)) |
('conll_span')
class ConllSpanReader(DatasetReader):
def __init__(self, token_indexers: Dict[(str, TokenIndexer)], tokenizer: Tokenizer=None, max_sequence_length: int=512, max_entity_length: int=128, max_mention_length: int=16, iob_scheme: str='iob2', encoding: str='utf-8', use_entity_feature: bool=False, **kwargs)... |
def main(_):
if (not tf.gfile.IsDirectory(FLAGS.output_dir)):
tf.gfile.MakeDirs(FLAGS.output_dir)
cat_nms = (FLAGS.category_names.split(',') if FLAGS.category_names else None)
_convert_dataset('val', FLAGS.dataset_dir, cat_nms=cat_nms)
_convert_dataset('train', FLAGS.dataset_dir, cat_nms=cat_nms... |
_utils.test(arch=ti.cpu)
def test_ternary_op():
def select():
a = ti.math.vec2(1.0, 1.0)
b = 3
c = ti.math.vec3(1.0, 1.0, 2.0)
d = (a if b else c)
with pytest.raises(ti.TaichiCompilationError, match='Cannot broadcast tensor to tensor'):
select() |
def test_prod_mul_to_prod():
var1 = optplan.Parameter()
var2 = optplan.Parameter()
var3 = optplan.Parameter()
var4 = optplan.Parameter()
prod1 = optplan.Product(functions=[var1, var2])
prod2 = optplan.Product(functions=[var3, var4])
prod3 = (prod1 * prod2)
assert isinstance(prod3, optpla... |
def assert_fp_equal(x, y, err_msg='', nulp=50):
try:
assert_array_almost_equal_nulp(x, y, nulp)
except AssertionError as e:
raise AssertionError(('%s\n%s' % (e, err_msg))) |
def test_numpy():
out = np.zeros(1).astype(np.float32)
symintasklet_numpy(out, value=np.float32(1.5))
assert (out[0] == np.float32(1.5)) |
class RawBytes(object):
def __init__(self, *bytes):
self.bytes = bytes
def __len__(self):
return len(self.bytes) |
class SplitByNode():
def __init__(self, group=None):
self.rank = (- 1)
self.size = (- 1)
try:
import torch
if ((not torch.distributed.is_available()) or (not torch.distributed.is_initialized())):
return
except Exception as e:
print(... |
def get_uuid(element, state=None):
if isinstance(element, SDFG):
return ids_to_string(element.sdfg_id)
elif isinstance(element, SDFGState):
return ids_to_string(element.parent.sdfg_id, element.parent.node_id(element))
elif isinstance(element, nodes.Node):
return ids_to_string(state.p... |
class write_port():
def __init__(self):
self.latency = 0
def service_writes(self, incoming_requests_arr_np, incoming_cycles_arr_np):
out_cycles_arr_np = (incoming_cycles_arr_np + self.latency)
out_cycles_arr_np = out_cycles_arr_np.reshape((out_cycles_arr_np.shape[0], 1))
return o... |
def run_contour_integral(device_type: dace.dtypes.DeviceType):
(NR, NM, slab_per_bc, num_int_pts) = (50, 150, 2, 32)
(Ham, int_pts, Y) = initialize(NR, NM, slab_per_bc, num_int_pts)
if (device_type in {dace.dtypes.DeviceType.CPU, dace.dtypes.DeviceType.GPU}):
sdfg = dace_contour_integral.to_sdfg()
... |
def _triplet_mate_frontalpose_nonmate_topk_probe_frontalpose():
n_subjects = 9
vggface2 = VGGFace2('/proj/janus6/vggface2', seed=42)
frontalset = [im for im in vggface2.frontalset(n_frontal=(n_subjects + 1))]
subjectid = sorted(list(set([im.category() for im in frontalset])))
matelist = [im for im i... |
class MultiHeadAttention(nn.Module):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, (n_head * d_k))
self.w_ks = nn.Linear(d_model, (n_head * d_k))
... |
class IgrfClassTests(unittest.TestCase):
def setUp(self):
self.IGRF = igrf.IGRF()
self.date1899 = dt.datetime(1899, 3, 17)
self.date2010 = dt.datetime(2010, 1, 1)
self.date2049 = dt.datetime(2049, 6, 21)
def tearDown(self):
del self.IGRF
def test_initRaises(self):
... |
class PartitionTuples_level_size(PartitionTuples):
def __init__(self, level, size):
if (not ((level in NN) and (size in NN))):
raise ValueError('n and level must be non-negative integers')
super().__init__(category=FiniteEnumeratedSets())
self._level = level
self._size = ... |
class FaceTrainer(DefaultTrainer):
def __init__(self, cfg):
TrainerBase.__init__(self)
logger = logging.getLogger('fastreid.partial-fc.trainer')
if (not logger.isEnabledFor(logging.INFO)):
setup_logger()
data_loader = self.build_train_loader(cfg)
cfg = self.auto_s... |
def get_chains(node, inherit_map, path):
if (node in inherit_map):
for par in inherit_map[node]:
path = get_chains(par, inherit_map, (path + [par]))
return path |
class LegacyMetadata(object):
def __init__(self, path=None, fileobj=None, mapping=None, scheme='default'):
if ([path, fileobj, mapping].count(None) < 2):
raise TypeError('path, fileobj and mapping are exclusive')
self._fields = {}
self.requires_files = []
self._dependenci... |
def test_ufunc_add_outer_simple4():
N.set(10)
A = np.random.randint(1, 10, size=(2, 2, 2, 2, N.get()), dtype=np.int32)
s = ufunc_add_outer_simple4(A)
assert np.array_equal(np.add.outer(A, N.get()), s) |
def get_act_layer(act_type):
if (act_type == 'relu6'):
return partial(tf.keras.layers.ReLU, max_value=6.0)
else:
raise NotImplemented |
class AlignedDataset(BaseDataset):
def __init__(self, opt):
BaseDataset.__init__(self, opt)
self.dir_AB = os.path.join(opt.dataroot, opt.phase)
self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size))
imgs = []
for path_ in self.AB_paths:
cur_path =... |
class CreateGraph(Protocol):
def __call__(self, run_dir: Optional[str]) -> Tuple[(nn.Module, Optimizer)]:
pass |
def test_population_sample_solution():
population = MIOPopulation(5)
solution = MagicMock()
population.add_solution(0.1, solution)
assert (population.sample_solution() == solution)
assert (population.counter == 1) |
class TestERWR(TfGraphTestCase):
.large
def test_erwr_cartpole(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
deterministic.set_seed(1)
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy', env_spec=env.spec, hidde... |
class MeanAveragePrecision(ScoreFunction):
name = 'mAP'
def _compute(self, predictions: np.ndarray, targets: np.ndarray, **kwargs) -> float:
assert (predictions.ndim == 2)
assert (targets.ndim == 2)
return average_precision_score(targets, predictions, average='macro') |
class TestDiff():
def test_definition(self):
for n in [16, 17, 64, 127, 32]:
x = (((arange(n) * 2) * pi) / n)
assert_array_almost_equal(diff(sin(x)), direct_diff(sin(x)))
assert_array_almost_equal(diff(sin(x), 2), direct_diff(sin(x), 2))
assert_array_almost_eq... |
class Trainer():
def __init__(self, dataloader, hierarchical_transformer, config, i):
super(Trainer, self).__init__()
self.iter = i
self.config = config
self.cpu = torch.device('cpu')
self.multi_gpu = (len(self.config.gpu_idx) > 1)
self.dataloader = dataloader
... |
def parse_content_range_header(value, on_update=None):
if (value is None):
return None
try:
(units, rangedef) = (value or '').strip().split(None, 1)
except ValueError:
return None
if ('/' not in rangedef):
return None
(rng, length) = rangedef.split('/', 1)
if (len... |
def txt_to_json(dir_to_glove):
glove = {}
txt = open(dir_to_glove, 'r')
while True:
line = txt.readline()
if (not line):
break
glove[line.split(' ')[0]] = ([float(i) for i in line.split(' ')[1:(- 1)]] + [float(line.split(' ')[(- 1)][:(- 1)])])
with open('glove.42B.300... |
.slow
def test_solution(output_dir):
import sfepy
from sfepy.base.base import Struct
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.homogenization.homogen_app import HomogenizationApp
import os.path as op
ok = True
(required, other) = get_standard_keywords()
re... |
def SpawnProcessForDataPipeline(multiprocessing_ctx, datapipe):
req_queue = multiprocessing_ctx.Queue()
res_queue = multiprocessing_ctx.Queue()
process = multiprocessing_ctx.Process(target=DataPipeToQueuesLoop, args=(datapipe, req_queue, res_queue))
return (process, req_queue, res_queue) |
def print_results(article, abstract, decoded_output):
print('')
print('Article -> ', article)
print('ref_sum -> ', abstract)
print('generated ->', decoded_output)
tf.logging.info('ARTICLE: %s', article)
tf.logging.info('REFERENCE SUMMARY: %s', abstract)
tf.logging.info('GENERATED SUMMARY: %... |
class MultiHeadedAttentionWithRelations(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
super(MultiHeadedAttentionWithRelations, self).__init__()
assert ((d_model % h) == 0)
self.d_k = (d_model // h)
self.h = h
self.linears = clones((lambda : nn.Linear(d_model, d_mod... |
def custom_tokenizers(test_case):
return unittest.skipUnless(_run_custom_tokenizers, 'test of custom tokenizers')(test_case) |
class Path():
def __init__(self, v):
self.start = self.end = v
self.length = 0
self.active = True
def is_cycle(self) -> bool:
return ((self.start is self.end) and (self.length > 0)) |
class AGNEWs(Dataset):
def __init__(self, label_data_path, alphabet_path, l0=1014):
self.label_data_path = label_data_path
self.l0 = l0
self.loadAlphabet(alphabet_path)
self.load(label_data_path)
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
... |
def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta):
def _convert_category_id(segment_info, meta):
if (segment_info['category_id'] in meta['thing_dataset_id_to_contiguous_id']):
segment_info['category_id'] = meta['thing_dataset_id_to_contiguous_id'][segment_info['category_id']]
... |
def loss(pred, ref):
square_error = tf.nn.l2_loss(tf.sub(pred, ref))
l2_loss = tf.div(tf.cast(square_error, dtype=tf.float32), ((3 * IMAGE_SIZE) * IMAGE_SIZE), name='L2_Loss')
tf.add_to_collection('losses', l2_loss)
return tf.add_n(tf.get_collection('losses'), name='Total_loss') |
.register('save_npy')
class SaveNpyOpProp(mx.operator.CustomOpProp):
def __init__(self, save_name='op', save_dir='.'):
super(SaveNpyOpProp, self).__init__(need_top_grad=True)
self._save_name = save_name
self._save_dir = save_dir
def list_arguments(self):
return ['data']
def l... |
def test_gpu_schedule_scalar_autodetect():
def add(a: (dace.float32[(10, 10)] dace.StorageType.GPU_Global), b: (dace.float32[(10, 10)] dace.StorageType.GPU_Global), c: (dace.float32[10] dace.StorageType.CPU_Heap)):
return ((a + (b b)) + c[0])
sdfg = add.to_sdfg()
set_default_schedule_and_storage... |
def normalize_word(word):
if ((word == '/.') or (word == '/?')):
return word[1:]
else:
return word |
def main():
global best_acc
start_epoch = args.start_epoch
if (not os.path.isdir(args.checkpoint)):
mkdir_p(args.checkpoint)
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,... |
class TestOperationController(testing_utils.TestCase):
def setUp(self):
super(TestOperationController, self).setUp()
(self.model_space, _) = testing_utils.get_example_conv1d_space()
self.controller = architect.OperationController(state_space=self.model_space, controller_units=8, kl_threshold... |
def cnn_7layer_bn2(in_ch=3, in_dim=32, width=64, linear_size=512, num_class=10):
model = nn.Sequential(nn.Conv2d(in_ch, width, 3, stride=1, padding=1), nn.BatchNorm2d(width), nn.ReLU(), nn.Conv2d(width, width, 3, stride=1, padding=1), nn.BatchNorm2d(width), nn.ReLU(), nn.Conv2d(width, (2 * width), 3, stride=2, padd... |
def multispeaker_example_cacher(text, n_timesteps, mel_temp, length_scale, spk):
global CURRENTLY_LOADED_MODEL
if (CURRENTLY_LOADED_MODEL != 'matcha_vctk'):
global model, vocoder, denoiser
(model, vocoder, denoiser) = load_model('matcha_vctk', 'hifigan_univ_v1')
CURRENTLY_LOADED_MODEL = ... |
def fix_m0(args):
print('train m0 and fix it till the end')
device = (torch.device(('cuda:' + str(args.device))) if torch.cuda.is_available() else torch.device('cpu'))
dataset = DynRecDataset(name=args.dataset)
pinsage_hyperparam_list = get_pinsage_hyperparam_list(dataset_name=args.dataset)
(edge_in... |
class WordCopyingDecoder(BaseCopyingDecoder):
def __init__(self, delimiter=' ', tokens_feature_name='tokens', length_feature_name='length', source_copy_feature_name='source_copy_indices', prepend_token=None, append_token=None):
super(WordCopyingDecoder, self).__init__(delimiter=delimiter, tokens_feature_nam... |
def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats):
t_list = [transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize(**normalize)]
if (scale_size != input_size):
t_list = ([transforms.Resize(scale_size)] + t_list)
return transforms.Compose(t_list) |
()
def gcp_check(bucket: str=typer.Argument(..., help='GCP bucket to check access for'), debug: bool=typer.Option(False, '--debug', help='Print debug info')):
hline = ('=' * 80)
rprint(f'''{hline}
[bold]Checking Skyplane configuration...[/bold]
{hline}''')
if debug:
rprint(f'[bright_black]Skyplane c... |
class IntersimExpert(BasePolicy):
def __init__(self, intersim_env, mu=0, *args, **kwargs):
super().__init__(*args, observation_space=gym.spaces.Space(), action_space=gym.spaces.Space(), **kwargs)
self._intersim = intersim_env
self._mu = mu
def forward(self, *args, **kwargs):
rais... |
_tokenizers
class DPRContextEncoderTokenizationTest(BertTokenizationTest):
tokenizer_class = DPRContextEncoderTokenizer
rust_tokenizer_class = DPRContextEncoderTokenizerFast
test_rust_tokenizer = True |
class TestZipfian():
def test_zipfian_asymptotic(self):
a = 6.5
N =
k = np.arange(1, 21)
assert_allclose(zipfian.pmf(k, a, N), zipf.pmf(k, a))
assert_allclose(zipfian.cdf(k, a, N), zipf.cdf(k, a))
assert_allclose(zipfian.sf(k, a, N), zipf.sf(k, a))
assert_all... |
def file_oriented_report(tests: TestList, tests_type: TestStatusType, interested_folders: List[str], coverage_only: List[str], covered_lines: Dict[(str, Set[int])], uncovered_lines: Dict[(str, Set[int])]) -> None:
with open(os.path.join(SUMMARY_FOLDER_DIR, 'file_summary'), 'w+') as summary_file:
covered_sum... |
def prepro(args):
if (not os.path.exists(args.target_dir)):
os.makedirs(args.target_dir)
prepro_each(args, 'train', out_name='train')
prepro_each(args, 'dev', out_name='dev')
prepro_each(args, 'test', out_name='test') |
def get_concept_id(code):
if (code not in code_to_concept_id_map):
code_to_concept_id_map[code] = (extra_code_offset + len(extra_codes))
extra_codes.append(code)
return code_to_concept_id_map[code] |
def update_override_act_fn(overrides):
assert isinstance(overrides, dict)
global _OVERRIDE_FN
_OVERRIDE_FN.update(overrides) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.