code stringlengths 101 5.91M |
|---|
def _index_wav(index, data_path, formt, subset):
eval_long_filenames = _get_long_eval_filenames(data_path)
audio_path = os.path.join(data_path, ((formt + '_') + subset))
wavfiles = glob.glob(os.path.join(audio_path, '*.wav'))
wavfiles.sort()
for wf in wavfiles:
clip_id = '{}/{}'.format(((for... |
class _LPMaker():
def __init__(self, model, model_args, model_kwargs, trace_prob_evaluator, transforms):
self.model = model
self.model_args = model_args
self.model_kwargs = model_kwargs
self.trace_prob_evaluator = trace_prob_evaluator
self.transforms = transforms
self... |
class TFData2VecVisionPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def get_title_and_underscore(package_name: str) -> str:
title = f'Snorkel {package_name.capitalize()} Package'
underscore = ('-' * len(title))
return (title, underscore) |
.skip('string broadcasting is broken')
def test_broadcast_string_int():
this = ak.Array(['one', 'two', 'one', 'nine'])
that = ak.contents.NumpyArray(np.array([1, 2, 1, 9], dtype='int32'), parameters={'kind': 'integer'})
(this_next, that_next) = ak.operations.ak_broadcast_arrays.broadcast_arrays(this, that)
... |
def tail_generator(gen_in: Generator[(T, None, None)], out_list: List[T]) -> Generator[(T, None, None)]:
for item in gen_in:
out_list.append(item)
(yield item) |
def test_edsr():
EDSR(in_channels=3, out_channels=3, mid_channels=8, num_blocks=2, upscale_factor=2)
net = EDSR(in_channels=3, out_channels=3, mid_channels=8, num_blocks=2, upscale_factor=3)
net.init_weights(pretrained=None)
input_shape = (1, 3, 12, 12)
img = _demo_inputs(input_shape)
output = n... |
class ParallelModule(nn.Module):
def __init__(self, input_dims, parallel_modules, post_parallel_module, **kwargs):
super().__init__()
self._input_dims = input_dims
self._parallel_modules = nn.ModuleList(parallel_modules)
print(parallel_modules)
self._post_parallel_module = po... |
class HoistState(transformation.SingleStateTransformation):
nsdfg = transformation.PatternNode(nodes.NestedSDFG)
def expressions(cls):
return [sdutil.node_path_graph(cls.nsdfg)]
def can_be_applied(self, graph: SDFGState, expr_index, sdfg, permissive=False):
nsdfg = self.nsdfg
if (gra... |
def split_entity_prefix(amr, prefix):
while True:
index = None
for (i, lemma) in enumerate(amr.lemmas):
if lemma.lower().startswith((prefix + '-')):
index = i
break
else:
break
pos = amr.pos_tags[index]
ner = amr.ner_tag... |
class EmptyArray(EmptyMeta, Content):
def __init__(self, *, parameters=None, backend=None):
if (not ((parameters is None) or (len(parameters) == 0))):
raise TypeError(f'{type(self).__name__} cannot contain parameters')
if (backend is None):
backend = NumpyBackend.instance()
... |
def bucketized(seconds):
boundaries = list(range(0, 11))
time = np.log2((seconds + 1))
return np.searchsorted(boundaries, time) |
class TinyImageNetDataset(ShardDataset):
NUM_IMAGES_PER_CLASS = 500
def __init__(self, data_folder: Path, data_type='train', rank=1, worldsize=1):
self.data_type = data_type
self._common_data_folder = data_folder
self._data_folder = os.path.join(data_folder, data_type)
self.label... |
def fit_to_console(obj, initial_indent='', subsequent_indent='', width=None):
import shutil
import textwrap
import pprint
(columns, lines) = shutil.get_terminal_size()
full_width = (width or (columns - 1))
wrapper = textwrap.TextWrapper(width=full_width, initial_indent=initial_indent, subsequent... |
def set_verbose(index):
if isinstance(index, faiss.Index):
index = faiss.downcast_index(index)
elif isinstance(index, faiss.IndexBinary):
index = faiss.downcast_IndexBinary(index)
index.verbose = True
if isinstance(index, faiss.IndexPreTransform):
set_verbose(index.index)
eli... |
class Cumsum(Flow):
def __init__(self, dim=(- 2)):
super().__init__()
self.dim = dim
def forward(self, x):
y = x.cumsum(self.dim)
log_det_jac = torch.zeros_like(x)
return (y, log_det_jac)
.export
def inverse(self, y):
x = diff(y, self.dim)
inv_log_... |
def CreateGemmGroupedOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints, complex_transforms=None, epilogue_functor=EpilogueFunctor.LinearCombination, swizzling_functor=SwizzlingFunctor.Identity8):
if (complex_transforms is None):
complex_transforms = [(ComplexTransform.none, Com... |
def quic_graph_lasso_ebic_manual(X, gamma=0):
print('QuicGraphicalLasso (manual EBIC) with:')
print(' mode: path')
print(' gamma: {}'.format(gamma))
model = QuicGraphicalLasso(lam=1.0, mode='path', init_method='cov', path=np.logspace(np.log10(0.01), np.log10(1.0), num=100, endpoint=True))
model.... |
def clean_lu_tva(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame:
if (output_format not in {'compact', 'standard'}):
raise ValueError(f'output_format {output_format} is invalid. It needs to b... |
_numpy_output(check_dtype=True)
def test_ufunc_add_ff(A: dace.float32[10], B: dace.float32[10]):
return np.add(A, B) |
def _test_op(dt, taichi_op, np_op):
print('arch={} default_fp={}'.format(ti.lang.impl.current_cfg().arch, ti.lang.impl.current_cfg().default_fp))
n = 4
val = ti.field(dt, shape=n)
def f(i):
return ((i * 0.1) + 0.4)
def fill():
for i in range(n):
val[i] = taichi_op(ti.func... |
def constrained_replay(df, row, eavesdropped_data, attack_intervals, *args):
constraints = args[0]
check_constraints(constraints)
print(len(eavesdropped_data[constraints[0]].loc[row['Replay_Copy']:((row['Replay_Copy'] + (row['End'] - row['Start'])) + 1)]))
print(len(test_data))
try:
test_dat... |
class KeyPairDataset(Dataset):
def __init__(self, dataset: Dataset, key1: str, key2: str):
self.dataset = dataset
self.key1 = key1
self.key2 = key2
def __len__(self):
return len(self.dataset)
def __getitem__(self, i):
return {'text': self.dataset[i][self.key1], 'text_... |
_toolkit()
class GoogleSearch(FunctionToolkit):
name_for_human = 'Google Search'
description_for_human = 'Toolkit for interacting with Google Search.'
name_for_model = 'GoogleSearch'
description_for_model = "A toolkit for interacting with Google Search, providing functionalities such as performing web, ... |
def EuthanizeIfNecessary(timeout_secs=120):
watcher = WatcherThread(timeout_secs)
watcher.start() |
def ee_to_aa(euler_angle: Union[(torch.Tensor, numpy.ndarray)], convention: str='xyz') -> Union[(torch.Tensor, numpy.ndarray)]:
if (euler_angle.shape[(- 1)] != 3):
raise ValueError(f'Invalid input euler_angle f{euler_angle.shape}.')
t = Compose([euler_angles_to_matrix, matrix_to_quaternion, quaternion_t... |
class GcnArxivNet(ArxivNet):
def make_graph_layer(self, hidden_dim, layer_idx):
return GCNConv(hidden_dim, hidden_dim) |
def mux(select, left, right):
return [np.vectorize((lambda c, x, y: (x if c else y)))(select, left, right)] |
def convert_to_tensor(value: Union[(Tensor, T, RawTensorTypes)], *, dims: Sequence[Dim]=None, dtype: Optional[str]=None, sparse_dim: Optional[Dim]=None, shape: Sequence[Dim]=None, device: Optional[str]=None, keep_scalar_on_cpu: bool=False, name: Optional[str]=None, _backend: Optional[Type[Backend]]=None) -> Tensor[T]:
... |
def _maybe_get_scalar(value):
value_t = _maybe_get_const(value, 't')
if (isinstance(value_t, torch.Tensor) and (value_t.shape == ())):
return value_t
return value |
def count_leading_spaces(s):
ws = re.search('\\S', s)
if ws:
return ws.start()
else:
return 0 |
class YZ_CX_EncodingCircuit(EncodingCircuitBase):
def __init__(self, num_qubits: int, num_features: int, num_layers: int=1, c: float=1.0) -> None:
super().__init__(num_qubits, num_features)
self._num_layers = num_layers
self._c = c
def num_parameters(self) -> int:
return ((2 * se... |
def test_mixed_6a(module, name):
test_conv2d(module.branch0, (name + '/Branch_0/Conv2d_1a_3x3'))
test_conv2d(module.branch1[0], (name + '/Branch_1/Conv2d_0a_1x1'))
test_conv2d(module.branch1[1], (name + '/Branch_1/Conv2d_0b_3x3'))
test_conv2d(module.branch1[2], (name + '/Branch_1/Conv2d_1a_3x3')) |
def check_validity(predicted_program_string, sent_ids):
predicted_program_string = predicted_program_string.replace(' ', ' ')
predicted_program_string = predicted_program_string.replace('] (', '] [ (')
predicted_program_string = predicted_program_string.replace('))', ') )')
trees = predicted_program_st... |
class NER(object):
def __init__(self):
self.processor = get_spacy()
def __call__(self, text):
if (not isinstance(text, unicode)):
text = unicode(text)
doc = self.processor(text)
return doc.ents |
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--gpu', action='store_true')
args = parser.parse_args()
env = d3rlpy.envs.Atari(gym.make(args.en... |
def sample_chan_commenters(num_samp_commenters, min_subs, comments_fp, all_com_subs_dir, all_com_sel_subs_dir, commenters_samp_fp, commenters_samp_need_sel_fp):
latest_commenter_s = set([])
chan_commenters_d = collections.defaultdict(set)
bad_line_c = 0
for line in open(comments_fp):
try:
... |
class XLMRobertaTokenizer(metaclass=DummyObject):
_backends = ['sentencepiece']
def __init__(self, *args, **kwargs):
requires_backends(self, ['sentencepiece']) |
def _clean_url_path(path, is_local_path):
if is_local_path:
clean_func = _clean_file_url_path
else:
clean_func = _clean_url_path_part
parts = _reserved_chars_re.split(path)
cleaned_parts = []
for (to_clean, reserved) in pairwise(itertools.chain(parts, [''])):
cleaned_parts.ap... |
_method
class PolyhedronRepresentation(SageObject):
INEQUALITY = INEQUALITY
EQUATION = EQUATION
VERTEX = VERTEX
RAY = RAY
LINE = LINE
def __len__(self):
return self._vector.degree()
def __getitem__(self, i):
return self._vector[i]
def __hash__(self):
return hash(t... |
_kl(Pareto, Beta)
_kl(Pareto, ContinuousBernoulli)
_kl(Pareto, Uniform)
def _kl_pareto_infinity(p, q):
return _infinite_like(p.scale) |
def add_text(img, flip):
font = ImageFont.truetype(fm.findfont(fm.FontProperties(family='DejaVu Sans')), 72)
im = Image.open(img)
draw = ImageDraw.Draw(im)
draw.text((30, 30), f'FLIP: {round(flip, 4)}', (255, 251, 0), font=font)
im.save(img) |
class TrainYTVOS(torch.utils.data.Dataset):
def __init__(self, root, split, clip_l, clip_n):
self.root = root
self.split = split
with open(os.path.join(root, 'ImageSets', '{}.txt'.format(split)), 'r') as f:
self.video_list = f.read().splitlines()
self.clip_l = clip_l
... |
def make_model(args, parent=False):
module = import_module(('model.' + args.base.lower()))
class BWN(getattr(module, args.base)):
def __init__(self, args):
super(BWN, self).__init__(args, conv=binary_conv)
return BWN(args) |
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0, is_train=None):
if ((args is None) or (isinstance(args, (tuple, list)) and (not args))):
raise ValueError('`args` must be specified')
if (not isinstance(args, (tuple, list))):
args = [args... |
def run(csv_path, data_dir, row_start, row_end, screen_width, screen_height, delay=None, connection=None):
data_dir = Path(data_dir).expanduser()
if (row_end is not None):
assert (row_end > row_start), f'Starting row {row_start} must be strictly less than ending row {row_end}!'
assert ((screen_width... |
def p_concat_list2(p):
(startl, endl) = p.linespan(3)
(startc, endc) = p.lexspan(3)
di3 = dace.dtypes.DebugInfo(startl, startc, endl, endc)
p[0] = (p[1] + [AST_Matrix_Row(di3, p[3])]) |
def preprocess_for_eval(image, output_height, output_width, add_image_summaries=True):
if add_image_summaries:
tf.summary.image('image', tf.expand_dims(image, 0))
image = tf.to_float(image)
resized_image = tf.image.resize_image_with_crop_or_pad(image, output_width, output_height)
if add_image_su... |
def Lambda(vs, body):
ctx = body.ctx
if is_app(vs):
vs = [vs]
num_vars = len(vs)
_vs = (Ast * num_vars)()
for i in range(num_vars):
_vs[i] = vs[i].as_ast()
return QuantifierRef(Z3_mk_lambda_const(ctx.ref(), num_vars, _vs, body.as_ast()), ctx) |
(scope='module')
def val_loader(item_user_sequential_dataset):
val = Bert4RecValidationDataset(item_user_sequential_dataset, item_user_sequential_dataset, item_user_sequential_dataset, max_sequence_length=5)
return torch.utils.data.DataLoader(val) |
class TestLimitedSizeDict(SnipsTest):
def test_should_raise_when_no_size_limit(self):
with self.assertRaises(ValueError) as ctx:
LimitedSizeDict()
self.assertEqual(str(ctx.exception.args[0]), "'size_limit' must be passed as a keyword argument")
def test_should_initialize_with_argumen... |
def read_result(result_path, lexicons, pairs, match_dist_thres, gt_folder, lexicon_type):
results = json.load(open(result_path, 'r'))
results.sort(reverse=True, key=(lambda x: x['score']))
results = [result for result in results if (len(result['rec']) > 0)]
if (not (lexicons is None)):
print('Pr... |
def register_Ns3CallbackImpl__Void_Ns3TcpSocketStateEcnState_t_Ns3TcpSocketStateEcnState_t_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::TcpSocketState::EcnState_t, ns3::TcpSocketState::... |
def compute_hierarchy_graph(scene_graph: nx.DiGraph, precision_thresh=0.75):
G = nx.DiGraph()
G.add_nodes_from(scene_graph.nodes)
for (i, j) in scene_graph.edges:
if (scene_graph.edges[(i, j)]['precision'] > precision_thresh):
G.add_edge(j, i)
hierarchy = {node: node for node in G.no... |
def break_partition_cycles(graph: Graph):
parts = set()
roots = defaultdict(set)
for u in graph.nodes:
parts.add(u.stage_id)
for v in u.out_edges:
if (u.stage_id > v.stage_id):
roots[v.stage_id].add(v)
n_parts = len(parts)
for (idx, group) in roots.items()... |
class UpstreamExpert(UpstreamBase):
def __init__(self, ckpt, **kwargs):
super().__init__(**kwargs)
checkpoint = torch.load(ckpt)
self.cfg = WavLMConfig(checkpoint['cfg'])
self.model = WavLM(self.cfg)
self.model.load_state_dict(checkpoint['model'])
self.model.feature_g... |
def generate_all_label_space_clusterers():
for clusterer in get_networkx_clusterers():
(yield clusterer)
for clusterer in get_matrix_clusterers():
(yield clusterer)
if ((sys.platform != 'win32') and ((sys.platform != 'darwin') and (sys.version_info[0] == 2))):
for (clusterer, _) in g... |
def inception_arg_scope(weight_decay=4e-05, use_batch_norm=True, batch_norm_decay=0.9997, batch_norm_epsilon=0.001, activation_fn=tf.nn.relu, batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
batch_norm_params = {'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'updates_collections': batch_norm_upda... |
def _get_requirements_dict(txtfile):
with open(txtfile, 'r', encoding='utf-8') as snapshot:
snapshot_dict = {}
for line in snapshot:
try:
(k, v) = line.split('==')
snapshot_dict[k] = v
except ValueError:
snapshot_dict[line] = No... |
_util.register_pytree_node_class
class DataPoint():
_name: str
_location: str
_type_: str
data: _Array
def name(self):
return _convert_to_str(self._name)
def location(self):
return _convert_to_str(self._location)
def type_(self):
return _convert_to_str(self._type_)
... |
def main(args):
utils.import_user_module(args)
if (args.buffer_size < 1):
args.buffer_size = 1
if ((args.max_tokens is None) and (args.max_sentences is None)):
args.max_sentences = 1
assert ((not args.sampling) or (args.nbest == args.beam)), '--sampling requires --nbest to be equal to --... |
class DenMune():
def __init__(self, train_data=None, test_data=None, train_truth=None, test_truth=None, file_2d=None, k_nearest=0, rgn_tsne=False, prop_step=0):
if (train_data is None):
raise Exception('No data is provided. At least train data should be provided. Set train_data argmunt properly.... |
class Logger(object):
def __init__(self, log_file):
self.terminal = sys.stdout
self.log = open(log_file, 'a')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
self.log.flush()
def flush(self):
pass |
class ResNetConfig(BackboneConfigMixin, PretrainedConfig):
model_type = 'resnet'
layer_types = ['basic', 'bottleneck']
def __init__(self, num_channels=3, embedding_size=64, hidden_sizes=[256, 512, 1024, 2048], depths=[3, 4, 6, 3], layer_type='bottleneck', hidden_act='relu', downsample_in_first_stage=False, ... |
.usefixtures('spark', 'schema_string')
()
def dataframe_string(spark, schema_string):
data_string = [(1, ['2', '[PAD]', '[PAD]', '[PAD]', '[PAD]'], [19842]), (1, ['2', '4', '[PAD]', '[PAD]', '[PAD]'], [19842, 19844]), (1, ['2', '4', '3', '[PAD]', '[PAD]'], [19842, 19844, 19843]), (1, ['2', '4', '3', '5', '[PAD]'], ... |
def add_cutmix_training_flags(parser):
parser.add_argument('--w-cutmix', action='store_true', help='use cutmix training')
parser.add_argument('--beta', default=1.0, type=float, help='hyperparameter beta')
parser.add_argument('--cutmix-prob', default=1.0, type=float, help='cutmix probability') |
def load_surface_ply_dtu(filename):
with open(filename, 'rb') as fh:
assert (fh.readline() == b'ply\n')
assert (fh.readline() == b'format binary_little_endian 1.0\n')
for _ in range(14):
fh.readline()
nr_elements = int(fh.readline().strip().decode('ascii').split(' ')[(- 1... |
def parse(opt_path, is_train=True):
with open(opt_path, mode='r') as f:
opt = yaml.load(f, Loader=Loader)
gpu_list = ','.join((str(x) for x in opt['gpu_ids']))
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list
print(('export CUDA_VISIBLE_DEVICES=' + gpu_list))
opt['is_train'] = is_train
if (... |
class AttentiveStatisticsPooling(nn.Module):
def __init__(self, input_size: int):
super().__init__()
self._indim = input_size
self.sap_linear = nn.Linear(input_size, input_size)
self.attention = nn.Parameter(torch.FloatTensor(input_size, 1))
def input_size(self) -> int:
r... |
class Polynomial_singular_repr():
def _singular_(self, singular=singular):
return _singular_func(self, singular)
def _singular_init_func(self, singular=singular):
return _singular_init_func(self, singular) |
class _EnvironmentExtender(TypeConstraints):
logger = logger.getChild('_EnvironmentExtender')
def __init__(self, environment, **kws):
self.environment = environment
self.tyvar_reps = ([None] * environment.tyvars)
self.rep_tyvar = {}
super(_EnvironmentExtender, self).__init__(**kw... |
def features_per_rank(args: Arguments) -> int:
return (args.ffn_hidden_size // hidden_sharding_degree(args)) |
class ParaphraserTrainer():
def __init__(self, model_name, train_config, mode='train'):
device = 'cpu'
assert (model_name in set(['t5-small', 't5-base', 't5-large']))
if torch.cuda.is_available():
device = torch.device('cuda')
train_data_path = ''
dropout = 0.0
... |
def all_zeros(modules):
weight_zero = torch.allclose(modules.weight.data, torch.zeros_like(modules.weight.data))
if hasattr(modules, 'bias'):
bias_zero = torch.allclose(modules.bias.data, torch.zeros_like(modules.bias.data))
else:
bias_zero = True
return (weight_zero and bias_zero) |
def visualise_point_cloud_registration(src_points, ref_points, gt_transform, est_transform):
src_point_cloud = open3d.make_open3d_point_cloud(src_points)
src_point_cloud.estimate_normals()
src_point_cloud.paint_uniform_color(open3d.get_color('custom_blue'))
ref_point_cloud = open3d.make_open3d_point_clo... |
def register_Ns3DefaultDeleter__Ns3MmWaveChunkProcessor_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::DefaultDeleter< ns3::MmWaveChunkProcessor > const &', 'arg0')])
cls.add_method('Delete', 'void', [param('ns3::MmWaveChunkProcessor *', 'object')], is_static=True)
r... |
def create_optimizer(args, model, filter_bias_and_bn=True):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if (weight_decay and filter_bias_and_bn):
skip = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = add_weight_decay... |
def _convert_dict_keys_to_int(dct):
if isinstance(dct, dict):
return {int(k): v for (k, v) in iteritems(dct)}
return dct |
def main(args):
if (args.task_name == 'multiwoz21'):
cfg = Multiwoz21Config()
elif (args.task_name == 'dstc8'):
cfg = Dstc8Config()
else:
raise AssertionError('Task name should be included in [multiwoz21, dstc8].')
dataset_config = DATASET_CONFIG[args.task_name]
annotator = A... |
def require_rjieba(test_case):
return unittest.skipUnless(is_rjieba_available(), 'test requires rjieba')(test_case) |
class Subprocess():
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=stderr, cwd=working_dir, univers... |
def testdata_to_tfrecord():
filename = '../deepsea_filtered.npz'
data = np.load(filename)
x = data['x_test']
y = data['y_test']
with tf.io.TFRecordWriter('./data/testdata.tfrecord') as writer:
for i in tqdm(range(len(y)), desc='Processing Test Data', ascii=True):
example_proto = ... |
class DOMContextEmbedder(Embedder):
def __init__(self, input_dim, output_dim):
super(DOMContextEmbedder, self).__init__()
self._linear = Linear(input_dim, output_dim, bias=True)
self._embed_dim = output_dim
def forward(self, dom_embeds, utt_embeds):
concat = torch.cat([dom_embeds... |
class EmptyArray(Content):
def __init__(self):
pass
def __len__(self):
return 0
def __getitem__(self, where):
if isinstance(where, int):
[][where]
else:
return EmptyArray()
def tostring_part(self, indent, pre, post):
return (((indent + pre)... |
def compute_scores_and_write_to_csv(target_filepattern, prediction_filepattern, output_filename, scorer, aggregator, delimiter='\n'):
target_filenames = _glob(target_filepattern)
prediction_filenames = _glob(prediction_filepattern)
if ((len(target_filenames) < 1) or (len(target_filenames) != len(prediction_... |
class group(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(group, self).__init__()
self.conv_a = mfm(in_channels, in_channels, 1, 1, 0)
self.conv = mfm(in_channels, out_channels, kernel_size, stride, padding)
def forward(self, x):
x... |
def train():
output_model_params()
loadFile = True
(ifLoad, data) = load_file(cfg.processed_path, 'processed data', 'pickle')
if ((not ifLoad) or (not loadFile)):
train_data_obj = Dataset(cfg.train_dataset_path, data_type='train')
dev_data_obj = Dataset(cfg.dev_dataset_path, data_type='d... |
def collection_mosaic_day(imcol, region_of_interest, fun_before_mosaic=None):
imlist = imcol.toList(imcol.size())
longitude = region_of_interest.centroid().coordinates().get(0)
hours_add = ee.Number(longitude).multiply((12 / 180.0))
unique_solar_dates = imlist.map((lambda im: ee.Image(im).date().advance... |
def hist(sx):
sx = discretize(sx)
d = dict()
for s in sx:
if (type(s) == list):
s = tuple(s)
d[s] = (d.get(s, 0) + 1)
return map((lambda z: (float(z) / len(sx))), d.values()) |
def postprocess(spec: _Spec, preds: Dict[(str, _Array)], sinkhorn_temperature: float, sinkhorn_steps: int, hard: bool) -> Dict[(str, _DataPoint)]:
result = {}
for name in preds.keys():
(_, loc, t) = spec[name]
new_t = t
data = preds[name]
if (t == _Type.SCALAR):
if ha... |
def build_model(hp):
model_url = '
module = hub.KerasLayer(model_url, trainable=True)
model = MyBiTModel(num_classes=37, module=module)
lr = get_lr(hp)
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(boundaries=SCHEDULE_BOUNDARIES, values=[lr, (lr * 0.1), (lr * 0.001), (lr * 0.000... |
class ProfilerOverheadSamplesTable():
counters: list
expiredMarkerCleaning: list
locking: list
threads: list
time: list
length: int |
class SSTPipe(CLSBasePipe):
def __init__(self, subtree=False, train_subtree=True, lower=False, granularity=5, tokenizer='spacy'):
super().__init__(tokenizer=tokenizer, lang='en')
self.subtree = subtree
self.train_tree = train_subtree
self.lower = lower
assert (granularity in ... |
_model_architecture('transformer_lm', 'transformer_lm_gpt3_medium')
def transformer_lm_gpt3_medium(args):
args.decoder_layers = safe_getattr(args, 'decoder_layers', 24)
args.decoder_embed_dim = safe_getattr(args, 'decoder_embed_dim', 1024)
args.decoder_attention_heads = safe_getattr(args, 'decoder_attention... |
def result_to_file(result, file_name):
with open(file_name, 'a') as writer:
logger.info('***** Eval results *****')
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key])))) |
def multiplication(x: List[T]) -> Union[(T, fenics.Constant)]:
if (len(x) == 0):
y = fenics.Constant(1.0)
_loggers.warning('Empty list handed to multiplication, returning 1.')
else:
y = x[0]
for item in x[1:]:
y *= item
return y |
def register_Ns3GtpcModifyBearerRequestMessageBearerContextToBeModified_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::GtpcModifyBearerRequestMessage::BearerContextToBeModified const &', 'arg0')])
cls.add_instance_attribute('epsBearerId', 'uint8_t', is_const=False)
c... |
def ref_clip_by_norm(x, clip_norm, axis):
x_norm = np.sqrt(np.sum((x ** 2.0), axis=axis, keepdims=True))
x = ((clip_norm * x) / np.maximum(x_norm, clip_norm))
return x |
class Test_init_nd_shape_and_axes():
_if_array_api_gpu
_api_compatible
def test_py_0d_defaults(self, xp):
x = xp.asarray(4)
shape = None
axes = None
shape_expected = ()
axes_expected = []
(shape_res, axes_res) = _init_nd_shape_and_axes(x, shape, axes)
... |
def preprocess_knowledge(text, english_stopwords):
tokens = nltk.word_tokenize(text.lower())
return [token for token in tokens if (token not in english_stopwords)] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.