code stringlengths 101 5.91M |
|---|
def find_text_to_tweet_tokens_mapping(text, tweet_tokens):
current_tok = 0
current_tok_c_pos = 0
n_toks = len(tweet_tokens)
tweet_toks_c_mapping = [list()]
for (c_pos, c) in enumerate(text):
if c.isspace():
continue
if (current_tok_c_pos == len(tweet_tokens[current_tok]))... |
_cache
def split_schema(cache_key: CacheKey) -> tuple[(Schema, Schema)]:
(keywords, non_keywords) = ({}, {})
for (keyword, value) in cache_key.schema.items():
if (keyword in ALL_KEYWORDS):
keywords[keyword] = value
else:
non_keywords[keyword] = value
return (keywords,... |
def _get_w(bg, st, station_dic, end_t, mdl, domain, output_dir, n_days, channel_list):
next_month = (bg + datetime.timedelta(n_days))
nt = station_dic[str(st)]['network']
save_dir = os.path.join(output_dir, st)
save_dir2 = os.path.join((output_dir + 'xml'), st)
while (next_month <= end_t):
i... |
def add_frag_train_args(parser):
parser.add_argument('--debug', default=False, action='store_true')
parser.add_argument('--debug-overfit', default=False, action='store_true')
parser.add_argument('--gpu', default=False, action='store_true')
parser.add_argument('--seed', default=42, action='store', type=i... |
def main():
parser = argparse.ArgumentParser(description='Generates SVO triples from the framenet data.')
parser.add_argument('roles_fpath', help='Path to a CSV file with the parsed framenet roles.')
parser.add_argument('verbs_fpath', help='Path to a CSV file with the parsed framenet verb clusters.')
pa... |
class DeterministicPolicy(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_dim=256, n_hidden=2):
super().__init__()
self.net = mlp([obs_dim, *([hidden_dim] * n_hidden), act_dim], output_activation=nn.Tanh)
def forward(self, obs):
return self.net(obs)
def act(self, obs, determi... |
class SSH(nn.Module):
def __init__(self, in_channel, out_channel):
super(SSH, self).__init__()
assert ((out_channel % 4) == 0)
leaky = 0
if (out_channel <= 64):
leaky = 0.1
self.conv3X3 = conv_bn_no_relu(in_channel, (out_channel // 2), stride=1)
self.conv5... |
def register_Ns3FfMacSchedSapProviderSchedUlNoiseInterferenceReqParameters_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::FfMacSchedSapProvider::SchedUlNoiseInterferenceReqParameters const &', 'arg0')])
cls.add_instance_attribute('m_rip', 'uint16_t', is_const=False)
... |
def test_combine_floordiv_float_tensors():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
resu... |
class XGLMModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class ImageNetC(Downloader):
def __init__(self, corruption=None, severity=0):
base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'ImageNetC')
if (not os.path.exists(os.path.join(base_dir, '_SUCCESS'))):
base = '
suf = '?download=1'
self.do... |
def test_logistic_ucb_initialize():
with pytest.raises(ValueError):
LogisticUCB(n_actions=2, dim=2, epsilon=(- 0.2))
n_actions = 3
policy = LogisticUCB(n_actions=n_actions, dim=2, epsilon=0.5)
for i in range(n_actions):
assert isinstance(policy.model_list[i], MiniBatchLogisticRegression) |
def parse_table(env: str, system: str, suffix: str) -> None:
private_copy = {'Num. Workers': [], 'FPS': [], 'Env': [], 'System': [], 'Method': []}
sep = f'<!-- {env} - {system} -->'
raw = open('README.md').read().split(sep)[1].strip().splitlines()
worker_num = list(map(int, raw[0].split('|')[2:(- 1)]))
... |
def extract_values(d, subkey=None, verbose=False):
if (subkey is None):
s = set()
for v in d.values():
for x in v.keys():
s.add(x)
if (len(s) == 1):
subkey = next(iter(s))
else:
raise ValueError('please choose subkey from', s)
i... |
def iemocap_for_superb(target_dir: str, cache_dir: str, iemocap: str, test_fold: int, valid_ratio: float=0.2, get_path_only: bool=False):
target_dir = Path(target_dir)
train_path = (target_dir / f'train.csv')
valid_path = (target_dir / f'valid.csv')
test_paths = [(target_dir / f'test.csv')]
if get_p... |
class ConcatAggregator(Aggregator):
def __init__(self, batch_size, dim, dropout=0.0, act=tf.nn.relu, name=None):
super(ConcatAggregator, self).__init__(batch_size, dim, dropout, act, name)
with tf.variable_scope(self.name):
self.weights = tf.get_variable(shape=[(self.dim * 2), self.dim],... |
def register_Ns3DsrHelper_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::DsrHelper const &', 'arg0')])
cls.add_method('Copy', 'ns3::DsrHelper *', [], is_const=True)
cls.add_method('Create', 'ns3::Ptr< ns3::dsr::DsrRouting >', [param('ns3::Ptr< ns3::Node >', 'node')],... |
def get_data_loaders(data_path, task, language, representations, pca_size, batch_size):
dataset_cls = get_data_cls(task)
(trainloader, pca, classes, words) = get_data_loader(dataset_cls, data_path, language, representations, pca_size, 'train', batch_size=batch_size, shuffle=True)
(devloader, _, classes, wor... |
def all_survival_function_estimators():
estimators = set()
for cls in all_survival_estimators():
if hasattr(cls, 'predict_survival_function'):
if issubclass(cls, CoxnetSurvivalAnalysis):
est = cls(fit_baseline_model=True)
else:
est = cls()
... |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['s... |
class TestNamedTupleAPI(unittest.TestCase):
def test_native_functions_yaml(self):
operators_found = set()
regex = re.compile('^(\\w*)(\\(|\\.)')
file = open(aten_native_yaml, 'r')
for f in yaml.load(file.read()):
f = f['func']
ret = f.split('->')[1].strip()
... |
def construct_from_generators_indices(generators, filtration, base_ring, check):
generators = [list(g) for g in generators]
if (len(generators) == 0):
dim = ZZ(0)
else:
dim = ZZ(len(generators[0]))
ambient = VectorSpace(base_ring, dim)
if (matrix(base_ring, generators).rank() < dim):... |
class MNISTNet(nn.Module):
def __init__(self):
super().__init__()
self.conv_layers = nn.Sequential(nn.Conv2d(1, 10, kernel_size=5), nn.MaxPool2d(2), nn.ReLU(), nn.Conv2d(10, 20, kernel_size=5), nn.Dropout(), nn.MaxPool2d(2), nn.ReLU())
self.fc_layers = nn.Sequential(nn.Linear(320, 50), nn.Re... |
def make_rttm_and_score(prediction_dir: str, score_dir: str, gt_rttm: str, frame_shift: int, thresholds: List[int], medians: List[int], subsampling: int=1, sampling_rate: int=16000):
Path(score_dir).mkdir(exist_ok=True, parents=True)
dscore_dir = (Path(score_dir) / 'dscore')
rttm_dir = (Path(score_dir) / 'r... |
def test_predict_proba_weighting_soft_voting(create_pool_classifiers):
query = np.array([[(- 1), 1]])
expected = np.array([0.5769, 0.4231])
competences = np.array([[0.5, 1.0, 0.2]])
predictions = np.array([[0, 1, 0]])
probabilities = np.array([[[0.5, 0.5], [1.0, 0.0], [0.33, 0.67]]])
pool_classi... |
.node
class Allgather(MPINode):
implementations = {'MPI': ExpandAllgatherMPI}
default_implementation = 'MPI'
def __init__(self, name, *args, **kwargs):
super().__init__(name, *args, inputs={'_inbuffer'}, outputs={'_outbuffer'}, **kwargs)
def validate(self, sdfg, state):
(inbuffer, outbuf... |
def test_local_bindings():
import pybind11_cross_module_tests as cm
i1 = m.LocalType(5)
assert (i1.get() == 4)
assert (i1.get3() == 8)
i2 = cm.LocalType(10)
assert (i2.get() == 11)
assert (i2.get2() == 12)
assert (not hasattr(i1, 'get2'))
assert (not hasattr(i2, 'get3'))
assert (... |
class _SumLinearOperator(LinearOperator):
def __init__(self, A, B):
if ((not isinstance(A, LinearOperator)) or (not isinstance(B, LinearOperator))):
raise ValueError('both operands have to be a LinearOperator')
if (A.shape != B.shape):
raise ValueError(f'cannot add {A} and {B... |
def get_openclip_embeddings(model, tokenizer, vocabulary, prompt='a '):
model.eval()
sentences = [(prompt + x) for x in vocabulary]
text = tokenizer(sentences).to(model.token_embedding.weight.device)
with torch.no_grad():
if (len(text) > 10000):
text_features = torch.cat([model.encod... |
class UsageError(ClickException):
exit_code = 2
def __init__(self, message, ctx=None):
ClickException.__init__(self, message)
self.ctx = ctx
self.cmd = (self.ctx.command if self.ctx else None)
def show(self, file=None):
if (file is None):
file = get_text_stderr()
... |
def initialize(module: nn.Module, init_cfg: Union[(Dict, List[dict])]) -> None:
if (not isinstance(init_cfg, (dict, list))):
raise TypeError(f'init_cfg must be a dict or a list of dict, but got {type(init_cfg)}')
if isinstance(init_cfg, dict):
init_cfg = [init_cfg]
for cfg in... |
class Net(nn.Module):
def __init__(self, n, c, n_split=4):
super(Net, self).__init__()
dim_1 = (2 + (((3 * n) * (n - 1)) // 4))
if ((dim_1 % n_split) != 0):
warnings.warn('changed dim_1')
dim_1 -= (dim_1 % n_split)
self.input_layer = SplitLinear(nn.Linear(((n ... |
class TreeRNNCell(RNNCell):
def __init__(self, cell, input_size, reduce_func):
self._cell = cell
self._input_size = input_size
self._reduce_func = reduce_func
def __call__(self, inputs, state, scope=None):
with tf.variable_scope((scope or self.__class__.__name__)):
d ... |
class CapsuleLayer(layers.Layer):
def __init__(self, num_capsule, dim_capsule, routings=3, kernel_initializer='glorot_uniform', **kwargs):
super(CapsuleLayer, self).__init__(**kwargs)
self.num_capsule = num_capsule
self.dim_capsule = dim_capsule
self.routings = routings
self.... |
class SoftQuantizerRegularization():
def __init__(self, total_gradient_steps: int):
self.linear_decay = LinearTempDecay(total_gradient_steps)
self.count_iter = 0
def __call__(self, model: nn.Module, entropy_reg: float):
soft_reg_aux: List[torch.Tensor] = []
b = self.linear_decay(... |
def conduct_experiment_for_multiple_runs(num_input_dimensions, num_train_samples, num_features):
list_of_log10_ws_dist_weight = []
list_of_log10_ws_dist_hybrid = []
for _ in range(num_experiment_runs):
(log10_ws_dist_weight, log10_ws_dist_hybrid) = conduct_experiment(num_input_dimensions=num_input_d... |
class TestPrettyPrinters(test_libcython_in_gdb.DebugTestCase):
def setUp(self):
super(TestPrettyPrinters, self).setUp()
self.break_and_run('b = c = d = 0')
def get_pyobject(self, code):
value = gdb.parse_and_eval(code)
assert (libpython.pointervalue(value) != 0)
return va... |
def _try_to_match_transformation(graph: Union[(SDFG, SDFGState)], collapsed_graph: nx.DiGraph, subgraph: Dict[(int, int)], sdfg: SDFG, xform: Union[(xf.PatternTransformation, Type[xf.PatternTransformation])], expr_idx: int, nxpattern: nx.DiGraph, state_id: int, permissive: bool, options: Dict[(str, Any)]) -> Optional[x... |
class CrystalOfQueerTableaux(CrystalOfWords, QueerSuperCrystalsMixin):
def __init__(self, cartan_type, shape):
from sage.categories.regular_supercrystals import RegularSuperCrystals
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
Parent.__init__(self, category=(Regula... |
def load_checkpoint(model, optimizer, PATH):
data = torch.load(PATH)
model.load_state_dict(data['model_state_dict'])
optimizer.load_state_dict(data['optimizer_state_dict'])
return (data['epoch'], data['loss']) |
class TodoistShareTask(VirtualFunctionTool):
name = 'TodoistShareTask'
summary = 'Shares a task with another user.'
parameters: List[ArgParameter] = [{'name': 'task_id', 'type': 'string', 'description': 'The id of the task.', 'required': True}, {'name': 'user_email', 'type': 'string', 'description': 'The em... |
def generate_lookup(layers_to_id: Dict[(Node, str)], tensors_to_id: Dict[(Node, str)]) -> str:
lookup = []
for (field_node, field_id) in chain(layers_to_id.items(), tensors_to_id.items()):
fields = re.findall('\\[[a-zA-Z0-9_]*\\]', field_node.scope)
fields = map((lambda s: s[1:(- 1)]), fields)
... |
def run_pool(poolsize, chunksize):
client = utils.init_client(MONGO_ARGS)
id_collection = client[DB_NAME][READ_COL]
query = utils.prepare_query(FILTERS)
document_ids = id_collection.find(query).distinct('_id')
logger.info(f'Obtained ID list for {len(document_ids)} articles.')
if (DOC_LIMIT > 0):... |
def register_Ns3LteRrcSapPdschConfigCommon_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRrcSap::PdschConfigCommon const &', 'arg0')])
cls.add_instance_attribute('pb', 'int8_t', is_const=False)
cls.add_instance_attribute('referenceSignalPower', 'int8_t', is_const... |
_criterion('masked_lm')
class MaskedLmLoss(FairseqCriterion):
def forward(self, model, sample, reduce=True):
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
if (sample_size == 0):
masked_tokens = None
logits = model(**s... |
def resnet_ddg_110(depth, num_classes=10, num_splits=2):
return resnet_ddg(110, num_classes=num_classes, num_splits=num_splits) |
def device(x):
if isinstance(x, (numpy.ndarray, numpy.generic)):
return 'cpu'
return x.device |
class Critic(object):
def __init__(self, state_dim, action_dim, device, LR, GAMMA):
self.state_dim = state_dim
self.action_dim = action_dim
self.device = device
self.LR = LR
self.GAMMA = GAMMA
self.network = QNetwork(state_dim=self.state_dim, action_dim=self.action_di... |
class FindDefault():
def __init__(self, target: str, instance_column: str):
self.target = target
self.instance_name = instance_column
self.train_ins = random.sample(b.instances, math.ceil((len(b.instances) * 0.75)))
self.test_ins = [x for x in b.instances if (x not in self.train_ins)... |
def generate_dataset(number_of_examples, test=False):
if test:
b = math.pi
else:
b = (0 if random.choice([True, False]) else math.pi)
x = (((torch.rand(number_of_examples, 1) * 4) * math.pi) - (2 * math.pi))
y = torch.sin((x + b))
return (x, y) |
def ttest(x, y, conf_level=0.95, **kw):
if (len(x) != len(y)):
raise AttributeError('vectors x and y must be of same length')
test = myR.t_test(x, y, conf_level=conf_level, **kw)._sage_()
t = test.get('DATA').get('p.value')
return (t, test) |
def test_id_loss(_id, string):
(x_train, y_train) = load_data(ids[_id:(_id + 1)])
y_predicted = my_model.predict(x_train)
print(string, loss(y_predicted, y_train[(- 1):])) |
class TaskletNode(ScheduleTreeNode):
node: nodes.Tasklet
in_memlets: Dict[(str, Memlet)]
out_memlets: Dict[(str, Memlet)]
def as_string(self, indent: int=0):
in_memlets = ', '.join((f'{v}' for v in self.in_memlets.values()))
out_memlets = ', '.join((f'{v}' for v in self.out_memlets.value... |
class TranslatorRegistry(object):
registry_ = {}
def Register(cls, op_name):
def Wrapper(func):
cls.registry_[op_name] = func
return func
return Wrapper
def TranslateLayer(cls, layer, pretrained_blobs, is_test, **kwargs):
try:
(caffe_ops, params) =... |
def transformer(batch_size):
model = ('Transformer (batch size %d)' % batch_size)
command = 'python3 train.py -data %s/translation/multi30k.atok.low.pt'
command += (' -batch_size %d -proj_share_weight' % batch_size)
working_directory = 'translation'
num_steps_arg = '-step'
return JobTemplate(mod... |
def main():
args = parse_args()
datasets = (DATASET_CONFIGS.keys() if (args.datasets == ['all']) else args.datasets)
for dataset in datasets:
print(f'[{dataset}] Converting ...')
cfg = DATASET_CONFIGS[dataset]
prefix = cfg.pop('prefix', dataset)
input_path = os.path.join(args... |
_function
def Fricke_module(l):
t = PolynomialRing(QQ, 't').gen()
return (Fricke_polynomial(l) / t) |
def accuracy_topk_subselected(logits, targets):
targets = torch.tensor(list(map((lambda x: class_sublist_1_8.index(x)), targets)))
return accuracy_topk(logits, targets) |
_numpy_output()
def test_full_like(A: dace.complex64[(N, M, 2)]):
return np.full_like(A, fill_value=5) |
def jpeg_compression(scale, src, dst, config):
dim = config['n_features_per_level']
n_levels = config['n_levels']
for d in range(dim):
for i in range(n_levels):
src_path = os.path.join(src, f'dim{d}', f'{str(i).zfill(2)}.png')
save_path = os.path.join(dst, f'dim{d}', str(scal... |
def save_config_to_file(cfg, pre='cfg', logger=None):
for (key, val) in cfg.items():
if isinstance(cfg[key], edict):
if (logger is not None):
logger.info(('\n%s.%s = edict()' % (pre, key)))
else:
print(('\n%s.%s = edict()' % (pre, key)))
sa... |
def acc_single(a, b, mask):
ind = (mask == 1)
if (torch.sum(ind) == 0):
return 0
correct = (a[ind] == b[ind]).float()
acc = (torch.sum(correct) / correct.size(0))
return acc |
def _rank_not_in_group(group):
if (group == GroupMember.WORLD):
return False
return (group == GroupMember.NON_GROUP_MEMBER) |
def register_Ns3SimpleRefCount__Ns3FlowClassifier_Ns3Empty_Ns3DefaultDeleter__lt__ns3FlowClassifier__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter< ns3::FlowClassifier > > const &', 'o')])
return |
class BiasCorrectionDepthwiseTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test):
super().__init__(unit_test, input_shape=(8, 8, 1), experimental_exporter=True)
def get_quantization_config(self):
return mct.core.QuantizationConfig(weights_bias_correction=True)
def create_network... |
def amenities_is_valid(column_names, data):
boolean_column = column_names[0]
numerical_column = column_names[1]
true_values = (data[boolean_column] & (data[numerical_column] == 0.0))
false_values = (~ data[boolean_column])
return (true_values | false_values) |
class DenseIndexer(object):
def __init__(self, buffer_size: int=50000):
self.buffer_size = buffer_size
self.index_id_to_db_id = []
self.index = None
def index_data(self, vector_files: List[str]):
start_time = time.time()
buffer = []
for (i, item) in enumerate(iter... |
def update_L(Lname):
DL.L = Layout(Lname)
(xmin, xmax, ymin, ymax) = DL.L.ax
zmax = (DL.L.maxheight - 0.1)
tx = copy.copy(DL.a)
rx = copy.copy(DL.b)
(tx_x.min, tx_x.max) = (xmin, xmax)
(tx_y.min, tx_y.max) = (ymin, ymax)
tx_z.max = zmax
tx_x.value = tx[0]
tx_y.value = tx[1]
t... |
def KL_divergence(mu, logvar):
return ((0.5 * torch.sum(((((- (mu ** 2)) + 1) + logvar) - torch.exp(logvar)))) / mu.shape[0]) |
def test_varlen_string():
t = ListType(NumpyType('uint8', parameters={'__array__': 'char'}), parameters={'__array__': 'string'})
assert (str(ak.types.from_datashape(str(t), highlevel=False)) == str(t)) |
class OreFunction(AlgebraElement):
def __init__(self, parent, numerator, denominator=None, simplify=True):
AlgebraElement.__init__(self, parent)
ring = parent._ring
numerator = ring(numerator)
if (denominator is None):
denominator = ring.one()
else:
de... |
def set_build_dir(path):
global BUILD_DIR
BUILD_DIR = mk_util.norm_path(path)
mk_dir(BUILD_DIR) |
.skipif((not _test_internal.have_fenv()), reason='no fenv()')
def test_add_round_up():
np.random.seed(1234)
_test_internal.test_add_round((10 ** 5), 'up') |
class BatchTraceHistory(_History):
def on_batch_end(self, epoch, logs):
self._record_trace()
return super().on_batch_end(epoch, logs) |
class NEMCell(RNNCell):
def __init__(self, num_units, name='NEMCell'):
self._num_units = num_units
self._name = name
def state_size(self):
return self._num_units
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
with tf.v... |
class PretrainedConfig(object):
pretrained_config_archive_map = {}
def __init__(self, **kwargs):
self.finetuning_task = kwargs.pop('finetuning_task', None)
self.num_labels = kwargs.pop('num_labels', 2)
self.output_attentions = kwargs.pop('output_attentions', False)
self.output_hi... |
def test_gamma():
try:
import statsmodels.api as sm
except ImportError:
pytest.xfail("`statsmodels` not found. `Gamma` datafit can't be tested.")
rho = 0.01
(n_samples, n_features) = (100, 10)
(X, y, _) = make_correlated_data(n_samples, n_features, random_state=0)
y[(y <= 0)] = 0... |
def _initialize_backend():
from .._functions.thnn import _all_functions as _thnn_functions
from .._functions.rnn import RNN, RNNTanhCell, RNNReLUCell, GRUCell, LSTMCell
from .._functions.dropout import Dropout, FeatureDropout
backend.register_function('RNN', RNN)
backend.register_function('RNNTanhCe... |
class ButterflyPermutationTest(tf.test.TestCase):
def test(self):
for units in TEST_DIMENSIONS:
if (not (units % 2)):
fp = ButterflyPerm(units=units, frequency=(units // 2))
self.assertAllClose(fp(fp.inverse_matrix), tf.eye(units))
else:
... |
def save_sample_q(model, epoch, arg, num=100, save=True, i=0, video=False):
milestone = str(epoch)
if (i > 0):
milestone += ('-' + str(i))
batches = num_to_groups(num, num)
all_images_list = list(map((lambda n: model.sample(batch_size=n, save_video=video)), batches))
all_images = torch.cat(a... |
class ResultsLog(object):
supported_data_formats = ['csv', 'json']
def __init__(self, path, resume=True, data_format='csv'):
if (data_format not in ResultsLog.supported_data_formats):
raise ValueError(('data_format must of the following: ' + '|'.join(['{}'.format(k) for k in ResultsLog.suppo... |
class _composite_rays_train(Function):
_fwd(cast_inputs=torch.float32)
def forward(ctx, sigmas, rgbs, deltas, rays, bound):
sigmas = sigmas.contiguous()
rgbs = rgbs.contiguous()
deltas = deltas.contiguous()
rays = rays.contiguous()
M = sigmas.shape[0]
N = rays.sha... |
def update_model(net, optimizer, scheduler, epoch, i_tb, exp_path, exp_name, scores, train_record, log_file=None):
acc1 = scores
snapshot_name = ('all_ep_%d_acc1_%.3f' % ((epoch + 1), acc1))
if (acc1 > train_record['best_acc1']):
train_record['best_acc1'] = acc1
train_record['last_model_name... |
def getpydocsign(a, var):
global lcb_map
if isfunction(var):
if ('result' in var):
af = var['result']
else:
af = var['name']
if (af in var['vars']):
return getpydocsign(af, var['vars'][af])
else:
errmess(('getctype: function %s has ... |
def register_Ns3OlsrIfaceAssocTuple_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_output_stream_operator()
cls.add_constructor([])
cls.add_constructor([param('ns3::olsr::IfaceAssocTuple const &', 'arg0')])
cls.add_instance_attribute('ifaceAddr', 'ns3::Ipv4Address', is_c... |
class PatternAvoider(GenericBacktracker):
def __init__(self, parent, patterns):
GenericBacktracker.__init__(self, [], 1)
self._patterns = patterns
self._parent = parent
def _rec(self, obj, state):
i = state
if (state != self._parent.n):
new_state = (state + 1)... |
class AMPTrainer(SimpleTrainer):
def __init__(self, model, data_loader, optimizer, param_wrapper, grad_scaler=None):
unsupported = 'AMPTrainer does not support single-process multi-device training!'
if isinstance(model, DistributedDataParallel):
assert (not (model.device_ids and (len(mod... |
.parametrize('seed', [313])
.parametrize('act_name, ctx, func_name', list_ctx_and_func_name(['binary_tanh', 'binary_sigmoid']))
def test_activation_double_backward(act_name, seed, ctx, func_name):
from nbla_test_utils import backward_function_tester
act = getattr(F, act_name)
rng = np.random.RandomState(see... |
class BatchNorm2d(_BNBase):
def __init__(self, in_size: int, name: str=''):
super(BatchNorm2d, self).__init__(in_size, batch_norm=nn.BatchNorm2d, name=name) |
def rev_list(branch, num_commits):
res = subprocess.run(['git', 'rev-list', '--max-count', f'{num_commits}', '--first-parent', branch], stdout=subprocess.PIPE, encoding='utf-8')
res.check_returncode()
return res.stdout.rstrip('\n').split('\n') |
def simExtCallScriptFunction(functionNameAtScriptName, scriptHandleOrType, inputInts, inputFloats, inputStrings, inputBuffer):
char_pointers = []
for s in inputStrings:
char_pointers.append(ffi.new('char[]', s.encode('ascii')))
strIn = ffi.new('char *[]', char_pointers)
outInt = ffi.new('int **'... |
class Data2VecAudioConfig(PretrainedConfig):
model_type = 'data2vec-audio'
def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, final_d... |
class CounterExampleError(Error):
def __init__(self, cause, model, types, src, srcv, tgtv, trans):
self.cause = cause
self.model = model
self.types = types
self.src = src
self.srcv = srcv
self.tgtv = tgtv
self.trans = trans
cause_str = {PRESAFE: 'Precondit... |
def create_loader(datasets):
loader_train = DataLoader(datasets[0], collate_fn=Batch.collate(), batch_size=cfg.train.batch_size, shuffle=True, num_workers=cfg.num_workers, pin_memory=False)
loaders = [loader_train]
for i in range(1, len(datasets)):
loaders.append(DataLoader(datasets[i], collate_fn=B... |
def test_comparison_with_strings():
p = sqlparse.parse("foo = 'bar'")[0]
assert (len(p.tokens) == 1)
assert isinstance(p.tokens[0], sql.Comparison)
assert (p.tokens[0].right.value == "'bar'")
assert (p.tokens[0].right.ttype == T.String.Single) |
def pipeline(image):
image = yellow_dectection(image)
height = image.shape[0]
width = image.shape[1]
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
cannyed_image = cv2.Canny(gray_image, 100, 200)
lines = cv2.HoughLinesP(cannyed_image, rho=6, theta=(np.pi / 60), threshold=200, lines=np.arra... |
def launch():
raiser = EventRaiser()
raiser_log = pformat(raiser._eventMixin_events, indent=4)
log.debug(('raiser: %s' % raiser_log))
raiser_listeners_ids = {}
(event_class, handler_id) = raiser.addListener(EventName, _handle_EventName, priority=0)
raiser_listeners_ids['_handle_EventName'] = han... |
def test_tags2chunks(BIOES_tags_example):
((tags, cas_tags, _), *_) = BIOES_tags_example
translator = ChunksTagsTranslator(scheme='BIOES')
chunks = translator.tags2chunks(tags)
assert (len(chunks) == 5)
for (chunk_type, chunk_start, chunk_end) in chunks:
assert all(((tag.split('-')[1] == chu... |
class DynamicLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=1, bias=True, batch_first=True, dropout=0, bidirectional=False, only_use_last_hidden_state=False, rnn_type='LSTM'):
super(DynamicLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_... |
class UNet(nn.Module):
def __init__(self, n_in_channels, n_out_channels, n_layers, batch_norm=False):
super(UNet, self).__init__()
self.n_in_channels = n_in_channels
self.n_out_channels = n_out_channels
self.n_layers = n_layers
nc = 64
self.batch_norm = batch_norm
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.