code stringlengths 101 5.91M |
|---|
def min_max_scale(data):
min_val = np.min(np.min(data, axis=0), axis=0)
data = (data - min_val)
max_val = np.max(np.max(data, axis=0), axis=0)
norm_data = (data / (max_val + 1e-07))
return (norm_data, min_val, max_val) |
def insert_table(cursor, table_name: str, column2elements: Dict[(str, List)]) -> None:
column_names = list(column2elements.keys())
num_rows = len(column2elements[column_names[0]])
one_success = False
for row_id in range(num_rows):
row = tuple([column2elements[column_name][row_id] for column_name... |
def mkpath(*paths):
path = os.path.join(*[str(path) for path in paths])
path = os.path.realpath(path)
return path |
class AutoencoderKL(pl.LightningModule):
def __init__(self, ddconfig, lossconfig, embed_dim, ckpt_path=None, ignore_keys=[], image_key='image', colorize_nlabels=None, monitor=None):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decode... |
def _findNode(parent, name, debug_name=None, parse=None):
if (debug_name is None):
debug_name = name
result = parent.find(name)
if (result is None):
raise ValueError("missing element '{}'".format(debug_name))
if (parse is not None):
try:
return parse(result.text)
... |
_module()
class TFCommonDecoder(BaseDecoder):
def __init__(self, max_seq_len=64, n_layers=3, n_head=8, d_k=64, d_v=64, d_model=512, d_inner=1024, dropout=0.1, num_classes=37, mask_id=37, **kwargs):
super().__init__()
self.layer_stack = ModuleList([TFCommonDecoderLayer(d_model, d_inner, n_head, d_k, ... |
_start_docstrings(TrainingArguments.__doc__)
class Seq2SeqTrainingArguments(TrainingArguments):
sortish_sampler: bool = field(default=False, metadata={'help': 'Whether to use SortishSampler or not.'})
predict_with_generate: bool = field(default=False, metadata={'help': 'Whether to use generate to calculate gene... |
def _test_shape_indices(model):
for i in range(model.n_clusters):
(m, n) = model.get_shape(i)
(i_ind, j_ind) = model.get_indices(i)
assert (len(i_ind) == m)
assert (len(j_ind) == n) |
def generateLegend(frame, sweeps):
s = ''
for key in sweeps:
if (key not in frame):
s = ((s + key) + '=not present, ')
else:
s = ((((s + key) + '=') + str(frame[key][0])) + ', ')
return s |
def train(epoch, train_loader, model, opt, args, logger):
model.train()
train_loss = np.zeros(len(train_loader))
train_bpd = np.zeros(len(train_loader))
num_data = 0
beta = min([((epoch * 1.0) / max([args.warmup, 1.0])), args.max_beta])
logger.info('beta = {:5.4f}'.format(beta))
end = time.t... |
def test_totalvi_reordered_mapping_mudata():
adata = synthetic_iid()
protein_adata = synthetic_iid(n_genes=50)
mdata = MuData({'rna': adata, 'protein': protein_adata})
TOTALVI.setup_mudata(mdata, batch_key='batch', modalities={'rna_layer': 'rna', 'batch_key': 'rna', 'protein_layer': 'protein'})
mode... |
def test_resolver_cache(simple_schema, mocker):
schema = schemathesis.from_dict(simple_schema)
spy = mocker.patch('schemathesis.specs.openapi.schemas.InliningResolver', wraps=InliningResolver)
assert ('_resolver' not in schema.__dict__)
assert isinstance(schema.resolver, InliningResolver)
assert (sp... |
class ConcatDataset(Dataset[T_co]):
datasets: List[Dataset[T_co]]
cumulative_sizes: List[int]
def cumsum(sequence):
(r, s) = ([], 0)
for e in sequence:
l = len(e)
r.append((l + s))
s += l
return r
def __init__(self, datasets: Iterable[Dataset])... |
class TestHMASynthesizer():
def test___init__(self):
metadata = get_multi_table_metadata()
metadata.validate = Mock()
instance = HMASynthesizer(metadata)
assert (instance.metadata == metadata)
assert isinstance(instance._table_synthesizers['nesreca'], GaussianCopulaSynthesize... |
def switch_to_t2t_indexing():
global GO_ID
global EOS_ID
global UNK_ID
GO_ID = 2
EOS_ID = 1
UNK_ID = 3 |
def get_nlc_train_set(directory):
train_path = os.path.join(directory, 'train')
print((train_path + '.x.txt'))
print((train_path + '.y.txt'))
if (not (gfile.Exists((train_path + '.x.txt')) and gfile.Exists((train_path + '.y.txt')))):
corpus_file = maybe_download(directory, 'nlc-train.tar', _NLC_... |
def generate_config_registration(base_cls: Type[TDynamicConfig], default_factory: Optional[Callable[([], TDynamicConfig)]]=None) -> Tuple[(Callable[([Type[TDynamicConfig]], None)], Callable[([], TDynamicConfig)])]:
CONFIG_LIST: Dict[(str, Type[TDynamicConfig])] = {}
def register_config(cls: Type[TDynamicConfig]... |
def multi_worker_inference(infer_model, ckpt, inference_input_file, inference_output_file, hparams, num_workers, jobid):
assert (num_workers > 1)
final_output_infer = inference_output_file
output_infer = ('%s_%d' % (inference_output_file, jobid))
output_infer_done = ('%s_done_%d' % (inference_output_fil... |
class NestedTabularMLAlgo(TabularMLAlgo, ImportanceEstimator):
def params(self) -> dict:
if (self._ml_algo._params is None):
self._ml_algo._params = copy(self.default_params)
return self._ml_algo._params
def params(self, new_params: dict):
assert isinstance(new_params, dict)
... |
def calc_coherence(qubit, noise_methods=None):
if (noise_methods is None):
noise_methods = (qubit.supported_noise_channels() + ['t1_effective', 't2_effective'])
def cap_coherence(time):
return (np.inf if (time > .0) else time)
return np.array([cap_coherence(getattr(qubit, m)()) for m in nois... |
class CreoWrapperError(Exception):
def __init__(self, message):
super(CreoWrapperError, self).__init__(message) |
_footprint
def opening(image, footprint=None, out=None, *, mode='reflect', cval=0.0):
footprint = pad_footprint(footprint, pad_end=False)
eroded = erosion(image, footprint, mode=mode, cval=cval)
out = dilation(eroded, mirror_footprint(footprint), out=out, mode=mode, cval=cval)
return out |
def test_rainbow_paper_count():
rainbow_entries = rldb.find_all({'source-title': 'Rainbow: Combining Improvements in Deep Reinforcement Learning'})
assert (len(rainbow_entries) == (((0 + 16) + 108) + 108)) |
def test_gcs_singlepart_zero_bytes():
assert interface_test_framework('gcp:us-central1-a', f'test-skyplane-{uuid.uuid4()}', False, test_delete_bucket=True, file_size_mb=0) |
def test_built_in_scalars_in_cli(testdir, cli, snapshot_cli):
schema_file = testdir.make_graphql_schema_file('\nscalar Date\nscalar Time\nscalar DateTime\nscalar IP\nscalar IPv4\nscalar IPv6\nscalar BigInt\nscalar Long\nscalar UUID\n\ntype Query {\n getByDate(value: Date!): Int!\n getByTime(value: Time!): Int!\n ... |
class EvalPrediction(NamedTuple):
predictions: Union[(np.ndarray, Tuple[np.ndarray])]
label_ids: np.ndarray |
class MyDataset(Dataset):
def __init__(self, root_path='datasets/sun360_d1_t30000_v03000'):
self.root_path = root_path
self.num_training = 500
train_json_path = os.path.join(self.root_path, 'train.json')
self.data = json.load(open(train_json_path, 'r'))[:self.num_training]
(s... |
class XmodOnnxConfig(OnnxConfig):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task == 'multiple-choice'):
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', d... |
def register_Ns3McStatsCalculator_methods(root_module, cls):
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('GetLteOutputFilename', 'std::string', [])
cls.add_method('GetMmWaveOutputFilena... |
def fix_seeds(seed: int=3407) -> None:
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed) |
class PTBTokenizer(object):
def __init__(self, language='en'):
self.language = language
self.nonbreaking_prefixes = {}
self.nonbreaking_prefixes_numeric = {}
self.nonbreaking_prefixes['en'] = ' A B C D E F G H I J K L M N O P Q R S T U V W X Y Z \n Adj Adm Adv Asst Bart Bl... |
def test_net(args, ind_range=None):
dataset = build_dataset(cfg.TEST.DATASETS, is_train=False)
logger = TestingLogger(args.cfg_file.split('/')[(- 1)], log_period=int(np.ceil((10 / cfg.TEST.IMS_PER_GPU))))
if (ind_range is not None):
(start_ind, end_ind) = ind_range
else:
start_ind = 0
... |
def serializedATN():
with StringIO() as buf:
buf.write('\x03\x03q')
buf.write('\u09cf\x04\x02\t\x02\x04\x03\t\x03\x04\x04\t\x04\x04\x05\t\x05\x04\x06\t\x06\x04\x07\t\x07')
buf.write('\x04\x08\t\x08\x04\t\t\t\x04\n\t\n\x04\x0b\t\x0b\x04\x0c\t\x0c\x04\r\t\r\x04\x0e')
buf.write('\t\x0e\... |
def filter_data(data, text):
text_tokens = set(text.split(' '))
data = {k: v for (k, v) in data if ((not is_blocked_key(k)) and (not is_empty(v)))}
if ('name' not in data):
assert ('article_title' in data)
data['name'] = data['article_title']
if ('article_title' in data):
data.po... |
def LF_relative(span):
left = get_left_span(span, span.sentence, window=6)
right = get_right_span(span, span.sentence, window=6)
left_trigger = match_regex(rgx_relatives, left)
right_trigger = match_regex(rgx_relatives, right)
return (OTHER if (left_trigger or right_trigger) else PATIENT) |
.corpus
def test_vocabulary():
config = dotenv_values()
corpus = LibriSpeech(config['LibriSpeech'])
text_list = corpus.data_dict['train-clean-100']['text_list']
with tempfile.TemporaryDirectory() as directory:
logging.info(directory)
text_file = os.path.join(directory, 'text.txt')
... |
def get_num_layer_for_vit(var_name, num_max_layer):
if (var_name in ('cls_token', 'mask_token', 'pos_embed')):
return 0
elif var_name.startswith('patch_embed'):
return 0
elif var_name.startswith('rel_pos_bias'):
return (num_max_layer - 1)
elif var_name.startswith('blocks'):
... |
class DataAugmentationForDistorted(object):
def __init__(self, args):
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = (IMAGENET_INCEPTION_MEAN if (not imagenet_default_mean_and_std) else IMAGENET_DEFAULT_MEAN)
std = (IMAGENET_INCEPTION_STD if (not imagenet_default_m... |
def write_stamp_file(stamp_file_name: str, stamp_contents: str) -> None:
try:
os.makedirs(os.path.dirname(stamp_file_name))
except OSError as exception:
if (exception.errno != errno.EEXIST):
raise
with open(stamp_file_name, 'w') as stampfile:
stampfile.write(stamp_content... |
def red(*msg, sep=','):
msg = sep.join([str(x) for x in msg])
return ((Fore.RED + msg) + Style.RESET_ALL) |
class QuantileEncoder(util.BaseEncoder, util.SupervisedTransformerMixin):
prefit_ordinal = True
encoding_relation = util.EncodingRelation.ONE_TO_ONE
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', quantile=0.5, m=1.0):
su... |
def benchmark(clf):
print(('_' * 80))
print('Training: ')
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = (time() - t0)
print(('train time: %0.3fs' % train_time))
t0 = time()
pred = clf.predict(X_test)
test_time = (time() - t0)
print(('test time: %0.3fs' % test... |
def get_requests_auth(auth: (RawAuth | None), auth_type: (str | None)) -> ((HTTPDigestAuth | RawAuth) | None):
from requests.auth import HTTPDigestAuth
if (auth and (auth_type == 'digest')):
return HTTPDigestAuth(*auth)
return auth |
def theta_by_pari(self, Max, var_str='q', safe_flag=True):
if (hasattr(self, '__theta_vec') and (len(self.__theta_vec) >= Max)):
theta_vec = self.__theta_vec[:Max]
else:
theta_vec = self.representation_number_list(Max)
self.__theta_vec = theta_vec
if (var_str == ''):
if safe_... |
def evaluate(row: int, col: int, val: float, presolver: str, modified_columns: list, modified_rows: list, modified_var_bounds: list, inactive_columns: list, redundant_rows: list, conflict_detected: bool):
global amount_expecting_nones_col
global amount_expecting_nones_rows
if (((amount_expecting_nones_rows ... |
def convert_pil_to_tensor(image: Image) -> Tensor:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return pil_to_tensor(image) |
def main():
input_shape = (3, 32, 32)
visualisation_channels = [0, 1, 2]
model = SimpleAE(input_shape, visualisation_channels)
print('Created model (empty)')
model.eval()
device = which_device(model)
batch_size = 4
number_of_batches = math.ceil((255.0 / float(batch_size)))
time_total... |
def build_sem_seg_head(cfg, input_shape):
name = cfg.MODEL.SEM_SEG_HEAD.NAME
return SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) |
def _load_state_dict(model, model_url, progress):
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
state_dict = load_state_dict_from_url(model_url, progress=progress)
for key in list(state_dict.keys()):
res = pattern.match(key)... |
class NegativeInfinityType():
def __repr__(self) -> str:
return '-Infinity'
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return True
def __le__(self, other: object) -> bool:
return True
def __eq__(self, other: object) -> ... |
class Inverse(nn.Module):
def __init__(self, flow):
super(Inverse, self).__init__()
self.flow = flow
def forward(self, x, logpx=None):
return self.flow.inverse(x, logpx)
def inverse(self, y, logpy=None):
return self.flow.forward(y, logpy) |
class ConvolutionalComponent(tf.keras.Model):
def __init__(self, channels, kernels, strides, name='ConvolutionalComponent', **kwargs):
super().__init__(name=name, **kwargs)
self.channels = channels
self.kernels = kernels
self.strides = strides
self.num_of_nets = (len(self.cha... |
def _trace_and_get_graph_from_model(model, args, training):
orig_state_dict_keys = _unique_state_dict(model).keys()
with set_training(model, training):
(trace, torch_out) = torch.jit.get_trace_graph(model, args)
if (orig_state_dict_keys != _unique_state_dict(model).keys()):
raise RuntimeErro... |
def forward_pass(log_probs, labels, blank, label_rep=False):
(T, U, _) = log_probs.shape
S = ((T - U) + 2)
alphas = np.zeros((S, U))
for u in range(1, U):
alphas[(0, u)] = (alphas[(0, (u - 1))] + log_probs[((u - 1), (u - 1), labels[(u - 1)])])
for t in range(1, S):
alphas[(t, 0)] = (... |
class TestPhilox(Base):
def setup_class(cls):
cls.bit_generator = Philox
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(join(pwd, './data/philox-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/philox-testset-2.csv'))
cls.seed_error_type = T... |
def register_Ns3MsduAggregator_methods(root_module, cls):
cls.add_constructor([param('ns3::MsduAggregator const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Aggregate', 'void', [param('ns3::Ptr< ns3::Packet const >', 'msdu'), param('ns3::Ptr< ns3::Packet >', 'amsdu'), param('ns3::Mac48Address', 'sr... |
def my_loss(classifier, regression, points, mode):
alpha = 0.5
MSE = nn.MSELoss()
MSEl = MSE(regression, points)
cross_entropy = nn.CrossEntropyLoss()
ce = cross_entropy(classifier, mode)
loss = ((ce * alpha) + (MSEl * (1 - alpha)))
return (loss, MSEl, ce) |
def to_one_hot(x, n):
x_ = torch.unsqueeze(x, 2)
dims = (*x.size(), n)
one_hot = torch.FloatTensor(*dims).zero_()
one_hot.scatter_(2, x_, 1)
return one_hot |
def main():
parser = argparse.ArgumentParser(description='Parse the config path')
parser.add_argument('-c', '--config', dest='path', help='The path to the config file. e.g. python train.py --config configs/dc_config.json')
config = parser.parse_args()
with open(config.path) as f:
args = json.loa... |
def all_test_env_combinations(n):
assert (n >= 3)
for i in range(n):
(yield [i])
for j in range((i + 1), n):
(yield [i, j]) |
def get_phyche_factor_dic(k):
full_path = os.path.realpath(__file__)
if (2 == k):
file_path = ('%s/data/mmc3.data' % os.path.dirname(full_path))
elif (3 == k):
file_path = ('%s/data/mmc4.data' % os.path.dirname(full_path))
else:
sys.stderr.write('The k can just be 2 or 3.')
... |
class Flashes(BaseDataset):
def __init__(self, config, device):
super().__init__(config, device)
root_dir = Path(os.path.expanduser(config['data_path']))
self._paths = {'train': [], 'val': [], 'test': []}
train_dir = Path(root_dir, 'train')
train_sequence_paths = [path for pa... |
def plot_vector_field(fig, ax, vector_field, skip_rate=1):
skip = (slice(None, None, skip_rate), slice(None, None, skip_rate))
(p, dx, dy, x, y, _) = vector_field
im = ax.imshow(np.swapaxes(p, 0, 1), extent=[x.min(), x.max(), y.min(), y.max()], cmap=plt.get_cmap('plasma'), interpolation='nearest', aspect='a... |
def _named_tempfile_func(error_class):
def named_temp_file(*args, **kwargs):
raise error_class()
return named_temp_file |
def copy_exp_dir(log_dir: str):
cur_dir = os.path.join(os.getcwd(), 'src')
dest_dir = os.path.join(log_dir, 'src')
shutil.copytree(cur_dir, dest_dir)
print(f'Source copied into {dest_dir}') |
class ArgumentParser():
_type = type
_parser
def int_list_parser(x):
return [int(a) for a in re.split('[,_ ;]', x) if a]
_parser
def str_list_parser(x):
return x.split(',')
_parser
def int_or_none_parser(x):
return int(x)
_parser
def float_or_none_parser(x):
... |
def unpickle_build(obj, state):
setstate = getattr(obj, '__setstate__', None)
if (setstate is not None):
setstate(state)
return
if (isinstance(state, tuple) and (len(state) == 2)):
(state, slots) = state
else:
slots = None
if (state is not None):
assert isinst... |
class LinearSeq(object):
def __init__(self, user_size, item_size, size, batch_size, learning_rate, learning_rate_decay_factor, user_attributes=None, item_attributes=None, item_ind2logit_ind=None, logit_ind2item_ind=None, n_input_items=0, loss_function='ce', logit_size_test=None, dropout=1.0, use_sep_item=True, n_sa... |
def ncut_cost(cut, D, W):
cut = np.array(cut)
cut_cost = _ncut_cy.cut_cost(cut, W.data, W.indices, W.indptr, num_cols=W.shape[0])
assoc_a = D.data[cut].sum()
assoc_b = D.data[(~ cut)].sum()
return ((cut_cost / assoc_a) + (cut_cost / assoc_b)) |
def groupby_outlet_topics(topicsDF):
mean_topics = [f.mean(('t' + str((i + 1)))) for i in range(num_topics)]
count_sources = [f.sum(col) for col in ['sourcesFemaleCount', 'sourcesMaleCount']]
count_articles_per_outlet = [f.count('outlet').alias('numArticles')]
aggregator = ((count_articles_per_outlet + ... |
def add_big_sample_args(parser):
parser.add_argument('--shape', type=int, nargs=2, help='Shape of latents to generate. Pass as two seperate integers, in the form H W', required=True) |
def register_dataset(name, **args):
name = name.lower()
def _register(dataset):
_registered_datasets[name] = (dataset, args)
return dataset
return _register |
class GCN(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout):
super(GCN, self).__init__()
self.convs = torch.nn.ModuleList()
self.convs.append(GCNConv(in_channels, hidden_channels, normalize=False))
for _ in range((num_layers - 2)):
... |
def inception_resnet_v2(inputs, num_classes=1001, is_training=True, dropout_keep_prob=0.8, reuse=None, scope='InceptionResnetV2'):
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
... |
class ScalarConjPrior(ConjPrior, ABC):
def __init__(self, sample=None):
super().__init__(sample=sample)
self.dim = 1
def process_time_series(self, x):
(t, x) = super().process_time_series(x)
x = (x.flatten() if (x is not None) else x)
return (t, x)
def get_time_series... |
class ConcatOutputAndAttentionWrapper(RNNCell):
def __init__(self, cell):
super(ConcatOutputAndAttentionWrapper, self).__init__()
self._cell = cell
def state_size(self):
return self._cell.state_size
def output_size(self):
return (self._cell.output_size + self._cell.state_size... |
class TestEnSpell(unittest.TestCase):
def test_correct(self):
self.assertEqual(spell.correct('ths')['target'], 'the')
self.assertEqual(spell.correct('ergo')['target'], 'ergo')
self.assertEqual(spell.correct('this')['target'], 'this')
self.assertEqual(spell.correct('-')['target'], '-'... |
class EncoderForecasterBaseFactory(PredictionBaseFactory):
def __init__(self, batch_size, in_seq_len, out_seq_len, height, width, ctx_num=1, name='encoder_forecaster'):
super(EncoderForecasterBaseFactory, self).__init__(batch_size=batch_size, in_seq_len=in_seq_len, out_seq_len=out_seq_len, height=height, wi... |
class _StandardStemmer(_LanguageSpecificStemmer):
def _r1r2_standard(self, word, vowels):
r1 = ''
r2 = ''
for i in range(1, len(word)):
if ((word[i] not in vowels) and (word[(i - 1)] in vowels)):
r1 = word[(i + 1):]
break
for i in range(1, ... |
def create_dataset(dataset: str, datasets_dir: str, transform: Optional[List[Transform]]=None, target_transform: Optional[List[Transform]]=None, train: bool=True, augmentation: bool=True) -> Dataset:
dataset_dir = os.path.join(datasets_dir, dataset)
if (transform is not None):
raw_transforms = transform... |
def resnext18(baseWidth, cardinality, **unused):
model = ResNeXt(baseWidth, cardinality, BasicBlock, [2, 2, 2, 2], 1000)
return model |
class ChangeItDataset(Dataset):
def __init__(self, pickle_roots, single_class=None, annotation_root=None, file_mode='unannotated', noise_adapt_weight_root=None, noise_adapt_weight_threshold_file=None, deterministic=False):
self.classes = {x: i for (i, x) in enumerate(sorted(set([os.path.basename(fn) for fn ... |
def celu_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, alpha=1.0, axis=1):
dy = grad_inputs[0]
x0 = inputs[0]
(fstart, fstop, fstep) = create_slice(dy.shape, axis, True)
(bstart, bstop, bstep) = create_slice(dy.shape, axis, False)
dy0 = F.slice(dy, fstart, fstop, fstep)
dy1... |
class GradCAM(ExplainerBase):
explanation_type = 'local'
alias = ['gradcam', 'grad-cam']
def __init__(self, model, target_layer, preprocess_function: Callable, tokenizer: Callable, loss_function: Callable, patch_shape: tuple=(24, 24), **kwargs):
super().__init__()
if ((not is_tf_available())... |
def get_path(datafolder, id):
if ('Ses01' in id):
return os.path.join(datafolder, 'Session1/sentences/wav', id[:(- 5)], (id + '.wav'))
if ('Ses02' in id):
return os.path.join(datafolder, 'Session2/sentences/wav', id[:(- 5)], (id + '.wav'))
if ('Ses03' in id):
return os.path.join(data... |
def main():
parser = build_argparse()
args = parser.parse_args()
paths = default_paths.get_default_paths()
for treebank in args.treebanks:
process_treebank(treebank, common.ModelType.TOKENIZER, paths, args.output_dir) |
def test_gen_drrg_targets():
target_generator = textdet_targets.DRRGTargets()
assert np.allclose(target_generator.orientation_thr, 2.0)
assert np.allclose(target_generator.resample_step, 8.0)
assert (target_generator.num_min_comps == 9)
assert (target_generator.num_max_comps == 600)
assert np.al... |
class Attention2d(nn.Module):
def __init__(self, dim, out_dim=None, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0):
super().__init__()
out_dim = (dim if (out_dim is None) else out_dim)
self.num_heads = num_heads
head_dim = (out_dim // num_heads)
sel... |
def _observe(state, player_id) -> Array:
board = jax.lax.cond((player_id == state.current_player), (lambda : state._board.reshape((8, 8))), (lambda : (state._board * (- 1)).reshape((8, 8))))
def make(color):
return ((board * color) > 0)
return jnp.stack(jax.vmap(make)(jnp.int32([1, (- 1)])), (- 1)) |
def convert_latex_macro_to_mathjax(macro):
left_bracket = macro.find('[')
right_bracket = macro.find('[')
if (left_bracket >= 0):
right_bracket = macro.find(']')
num_args = int(macro[(left_bracket + 1):right_bracket])
else:
num_args = 0
start_name = (macro.find('{') + 1)
... |
class SeqNCAConfig(ModelConfig):
name: str = 'SeqNCA'
conv_filters: int = 64
fc_size: int = 64
patch_width: int = 3 |
class ConvTBC(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=0):
super(ConvTBC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _single(kernel_size)
self.padding = _single(padding)
... |
class ChunkStore():
def __init__(self, chunk_dir: PathLike):
self.chunk_dir = Path(chunk_dir)
self.chunk_dir.mkdir(parents=True, exist_ok=True)
self.region_key_upload_id_mappings: Dict[(str, str)] = {}
for chunk_file in self.chunk_dir.glob('*.chunk'):
logger.warning(f'Del... |
def obj_asd(result, reference, voxelspacing=None, connectivity=1):
sds = list()
(labelmap1, labelmap2, _a, _b, mapping) = __distinct_binary_object_correspondences(result, reference, connectivity)
slicers1 = find_objects(labelmap1)
slicers2 = find_objects(labelmap2)
for (lid2, lid1) in list(mapping.i... |
def read_intrinsics_text(path):
cameras = {}
with open(path, 'r') as fid:
while True:
line = fid.readline()
if (not line):
break
line = line.strip()
if ((len(line) > 0) and (line[0] != '#')):
elems = line.split()
... |
def download_one_image(bucket, split, image_id, download_folder):
try:
bucket.download_file(f'{split}/{image_id}.jpg', os.path.join(download_folder, f'{image_id}.jpg'))
except botocore.exceptions.ClientError as exception:
pass |
def _fused_bias_act_ref(x, b, axis, act, alpha, gain):
x = tf.convert_to_tensor(x)
b = (tf.convert_to_tensor(b) if (b is not None) else tf.constant([], dtype=x.dtype))
act_spec = activation_funcs[act]
assert ((b.shape.rank == 1) and ((b.shape[0] == 0) or (b.shape[0] == x.shape[axis])))
assert ((b.sh... |
def read_sample_file():
file = get_param(['sample_file'], '')
if (file == ''):
return ({}, '')
if (type(file) is dict):
SAMPLES = file
input = 'yaml'
else:
delim = get_param(['delim'], '\\s+')
db = pd.read_csv(file, sep=delim, comment='#', dtype=str, keep_default_... |
def calc_local_total_norm_wo_sqrt(parameters, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter((lambda p: (p.grad is not None)), parameters))
norm_type = float(norm_type)
if (norm_type == inf):
raise NotImplementedError()
else:... |
class JobScheduler():
def __init__(self, job_file: str, scheduler: str='sge', config: dict=None):
assert (scheduler in cluster_resolver.options), f'Invalid scheduler: {scheduler}'
self.scheduler = scheduler
self.file = job_file
path = os.path.realpath(self.file)
self.name = P... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.