code stringlengths 101 5.91M |
|---|
_function
def variable(R, v):
if (v in ZZ):
v = str(v)
tail = re.compile('\\d+$')
matches = []
for g in R.gens():
match = tail.search(str(g))
if ((match is not None) and (match.group() == v)):
matches.append(g)
if (not matches):
... |
def find_upper_bounds(theta_uc, upper_bounds_list):
for (theta_b, upper_bounds) in upper_bounds_list:
if (theta_uc <= theta_b):
return (theta_b, upper_bounds)
return upper_bounds_list[(- 1)] |
def test_chi_to_gauss():
assert_allclose(chi_to_gauss(470, 600, 80, 12), 331., rtol=1e-05)
assert_allclose(chi_to_gauss(700, 600, 80, 12), 586., rtol=1e-05)
assert_allclose(chi_to_gauss(700, 600, 80, 1), 695., rtol=1e-05)
assert_allclose(chi_to_gauss(470, 600, 80, 1), 463., rtol=1e-05)
assert_equal(... |
def assert_type_compatibility(defined_symbols: collections.OrderedDict, types: tuple):
if (None in types):
raise IncompatibleTypeError('`None` was given', types)
vec_types = list(set([t for t in types if isinstance(t, dtypes.vector)]))
ptr_types = list(set([t for t in types if isinstance(t, dtypes.p... |
def download(url):
f = tempfile.TemporaryFile()
parsed = urlparse(url)
name = Path(parsed.path).name
with requests.get(url, stream=True) as r:
r.raise_for_status()
total_size = int(r.headers.get('content-length', 0))
prog = tqdm.tqdm(unit='B', unit_scale=True, unit_divisor=1024, ... |
class KernelPRank(_BasePRank):
def __init__(self, n_iter=10, shuffle=True, random_state=None, kernel='linear', gamma=None, degree=3, coef0=1, kernel_params=None):
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.kernel = kernel
self.gamma = ga... |
def get_params(opt, size):
(w, h) = size
new_h = h
new_w = w
(crop_w, crop_h) = (0, 0)
if (opt.preprocess == 'resize_and_crop'):
new_h = new_w = opt.load_size
crop_h = crop_w = opt.crop_size
elif (opt.preprocess == 'scale_width_and_crop'):
new_w = opt.load_size
ne... |
def download_datasets(root, url):
download_and_extract_archive(url=url, download_root=root, extract_root=storage_dir) |
def purify(string):
return string.lower().replace(' ', '_').replace('-', '_').replace('/', '_or_') |
class LukeTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = LukeTokenizer
test_rust_tokenizer = False
from_pretrained_kwargs = {'cls_token': '<s>'}
def setUp(self):
super().setUp()
self.special_tokens_map = {'entity_token_1': '<ent>', 'entity_token_2': '<ent2>'}
... |
class MarianTokenizer(PreTrainedTokenizer):
vocab_files_names = vocab_files_names
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['attention_mask'... |
def register_Ns3WifiMacQueueItem_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::WifiMacQueueItem const &', 'arg0')])
cls.add_constructor([param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::WifiMacHeader const &', 'header')])
cls.add_constructor([param(... |
def train(epoch):
model.train()
if (epoch > 30):
for param_group in optimizer.param_groups:
param_group['lr'] = 0.001
for data in train_loader:
optimizer.zero_grad()
F.nll_loss(model(data.to(device)), data.y).backward()
optimizer.step() |
_app.route('/draw', methods=['GET'])
def draw():
if ('id' in session):
id = session['id']
print('uuid: ', id)
return render_template('draw.html', title='Write') |
('semantic-role-labeling')
class SemanticRoleLabelerPredictor(Predictor):
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
super().__init__(model, dataset_reader)
self._tokenizer = SpacyWordSplitter(language='en_core_web_sm', pos_tags=True)
def make_srl_string(words: List... |
class CodeGenerator(NodeVisitor):
def __init__(self, environment, name, filename, stream=None, defer_init=False, optimized=True):
if (stream is None):
stream = NativeStringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = ... |
def setup_classifiers():
rng = np.random.RandomState(654321)
(X, y) = make_classification(n_classes=2, n_samples=1000, weights=[0.2, 0.8], random_state=rng)
(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.33, random_state=rng)
scalar = StandardScaler()
X_train = scalar.fit_tr... |
def save_texture_to_numpy(tex: texture_type.rw_texture(num_dimensions=2, fmt=Format.rgba8, lod=0), img: ndarray_type.ndarray(dtype=vec3, ndim=2)):
for (i, j) in img:
img[(i, j)] = ops.round((tex.load(vector(2, i32)([i, j])).rgb * 255)) |
class FedCurvWeightedAverage(WeightedAverage):
def call(self, local_tensors, tensor_db, tensor_name, fl_round, tags):
if (tensor_name.endswith('_u') or tensor_name.endswith('_v') or tensor_name.endswith('_w')):
tensors = [local_tensor.tensor for local_tensor in local_tensors]
agg_res... |
def checkpoint_wrapper(m, offload_to_cpu=False):
assert (not hasattr(m, 'precheckpoint_forward')), 'checkpoint function has already been applied?'
m.precheckpoint_forward = m.forward
m.forward = functools.partial(_checkpointed_forward, m.precheckpoint_forward, offload_to_cpu)
return m |
def generate_forward_method(stage_id: int, graph: Graph, partition_nodes: List[Node], model_outputs: List[Node], partition_fields: Dict[(Node, str)], stage_depth_from_end: int, generate_explicit_del=False, generate_activation_propagation=True, move_tensors=False) -> Tuple[(List[str], Dict[(str, List)])]:
inputs = g... |
class LangAnnotationModel(LightningModule):
def __init__(self):
super().__init__()
self.finished_annotation_train = False
self.dummy_net = Linear(1, 1)
def on_train_batch_start(self, batch: Any, batch_idx: int, dataloader_idx: int) -> None:
if self.finished_annotation_train:
... |
def CremonaDatabase(name=None, mini=None, set_global=None):
if (set_global is not None):
from sage.misc.superseded import deprecation
deprecation(25825, 'the set_global argument for CremonaDatabase is deprecated and ignored')
if (name is None):
if DatabaseCremona().is_present():
... |
class Conv3x3Drop(nn.Module):
def __init__(self, in_feat, out_feat):
super(Conv3x3Drop, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(in_feat, out_feat, kernel_size=3, stride=1, padding=1), nn.Dropout(p=0.2), nn.ReLU())
self.conv2 = nn.Sequential(nn.Conv2d(out_feat, out_feat, kernel_... |
def init_array(A, B):
n = N.get()
for i in range(n):
for j in range(n):
for k in range(n):
A[(i, j, k)] = (datatype((((i + j) + (n - k)) * 10)) / n)
B[(i, j, k)] = (datatype((((i + j) + (n - k)) * 10)) / n) |
def test_almost_equal():
assert (not ak.almost_equal([True, False, False], ak.to_backend([True, False, False], 'typetracer'))) |
def flip_rotate_image(image):
pil_img = Image.fromarray(image)
pil_img = pil_img.transpose(Image.FLIP_TOP_BOTTOM)
pil_img = pil_img.transpose(Image.ROTATE_90)
return np.array(pil_img) |
def zmove(src, target):
src = tk.uncached_path(src)
target = tk.uncached_path(target)
if (not src.endswith('.gz')):
tmp_path = (src + '.gz')
if os.path.exists(tmp_path):
os.unlink(tmp_path)
sp.check_call(['gzip', src])
src += '.gz'
if (not target.endswith('.gz... |
def matrix_similarity_classes(n, q=None, invertible=False):
if (q is None):
q = ZZ['q'].gen()
basering = q.parent()
if (n == 0):
return basering.one()
if invertible:
tilde = (1 - (~ q))
return sum((((q ** max(la)) * (tilde ** len([x for x in la.to_exp() if (x > 0)]))) for... |
def get_mixture_grad_b(b, a, b0, eta):
def A_func(b):
return mixture.A(a=a, b=(b + b0), eta=eta)
A1 = numerical_1st_derivative(b, A_func, EPSILON)
A2 = numerical_2nd_derivative(b, A_func, EPSILON)
r = mixture.r(a=a, b=(b + b0), eta=eta)
v = mixture.v(a=a, b=(b + b0), eta=eta)
return dict... |
def extract_user_id(x: dict) -> int:
keys: dict_keys = x.keys()
if (C.Keys.USER_ID in keys):
return int(x[C.Keys.USER_ID])
else:
return (- 1) |
class TFMPNetPreTrainedModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class LRFinder(lr_scheduler._LRScheduler):
def __init__(self, optimizer, lr_min, lr_max, step_size, linear=False):
ratio = (lr_max / lr_min)
self.linear = linear
self.lr_min = lr_min
self.lr_mult = ((ratio / step_size) if linear else (ratio ** (1 / step_size)))
self.iteration... |
def main_wordnet(input_file, output_file, nums_lst):
hypos_prompt_lst = []
hypos_counter = 0
with open(input_file) as in_f:
input_prompts = in_f.readlines()
counter = 0
for prompt in input_prompts:
hypos_lst = single_prompt_wordnet(prompt.strip('\n'), nums_lst)
if ((hypos_lst... |
def layer_norm_and_dropout(input_tensor, dropout_prob, is_training, name=None):
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob, is_training)
return output_tensor |
def test_parallel_thompson_sampling_raises_for_changing_batch_size() -> None:
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing='ij'), axis=(- 1)), ((- 1), 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
... |
def _read_pretrained_tokens(embeddings_file_uri: str) -> List[str]:
from stog.modules.token_embedders import EmbeddingsTextFile
logger.info('Reading pretrained tokens from: %s', embeddings_file_uri)
tokens: List[str] = []
with EmbeddingsTextFile(embeddings_file_uri) as embeddings_file:
for (line... |
def get_params(argv='1'):
print('SET: {}'.format(argv))
params = dict(quick_test=True, dataset_dir='DCASE2020_SELD_dataset/', feat_label_dir='DCASE2020_SELD_dataset/feat_label_hnet/', model_dir='models/', dcase_dir='results/', mode='dev', dataset='foa', fs=24000, hop_len_s=0.02, label_hop_len_s=0.1, max_audio_l... |
def save_model_to_weights_file(weights_file, model):
logger.info('Saving parameters and momentum to {}'.format(os.path.abspath(weights_file)))
blobs = {}
for param in model.params:
scoped_name = str(param)
unscoped_name = c2_utils.UnscopeName(scoped_name)
if (unscoped_name not in blo... |
def batch_mse(output, target):
bs = target.shape[0]
predict = (torch.argmax(output, 1) / 255)
target = (target.long() / 255)
assert ((predict.max() <= 1.0) and (target.max() <= 1.0))
mse = F.mse_loss(predict, target, reduction='sum')
return (mse, bs) |
def rewrite_train_hist(working_dir, model_fn, knowledge_fn, data, suffix='new', metric_name_dict={'acc': 0, 'knowledge': 1, 'loss': 2}):
import tensorflow as tf
from ..utils.io import read_history
old_df = read_history([os.path.join(working_dir, 'train_history.csv')], metric_name_dict)
new_fh = open(os.... |
def _random_links(n_synthetic: int, n_attacks: int, n_neighbors: int) -> np.ndarray:
rng = np.random.default_rng()
return np.array([rng.choice(n_synthetic, size=n_neighbors, replace=False) for _ in range(n_attacks)]) |
def get_eval_loaders(data_args, transform_args, task_sequence, batch_size, frontal_lateral, return_info_dict=False):
eval_loaders = []
if data_args.eval_su:
eval_loaders += [get_loader(data_args, transform_args, 'valid', task_sequence, su_frac=1, nih_frac=0, batch_size=batch_size, is_training=False, shu... |
(frozen=True)
class EntryOverlapNgrams():
entry_data_overlap_key: EntryDataOverlapKey
overlapping_ngram_counts: List[Tuple[(str, int)]] |
def get_params(basedir: str, dirname: str) -> Dict[(str, Any)]:
fname = os.path.join(dirname, 'size-params.txt')
nettype = dirname.split('-')[0]
with open(os.path.join(basedir, fname), 'r') as fp:
result = {'network': nettype}
result.update(ast.literal_eval(fp.readlines()[0]))
return... |
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if (random.random() > self.p):
return x
return self.fn(x) |
def demo():
print('SprintDataset demo.')
from argparse import ArgumentParser
from returnn.util.basic import progress_bar_with_time
from returnn.log import log
from returnn.config import Config
from returnn.datasets.basic import init_dataset
arg_parser = ArgumentParser()
arg_parser.add_ar... |
def _set_module_by_path(module, path, value):
path = path.split('.')
for name in path[:(- 1)]:
module = getattr(module, name)
setattr(module, path[(- 1)], value) |
class JieBaTokenizer(object):
def __init__(self):
self.tokenizer = jieba
def word_tokenizer(self, doc):
tokens = self.tokenizer.cut(doc)
tokens = '<split>'.join(tokens).split('<split>')
start = 0
token_spans = []
for token in tokens:
token_spans.append... |
def get_surface_form_orig(format_sql_2, schema):
column_names_surface_form = []
column_names_surface_form_original = []
column_names_original = schema['column_names_original']
table_names_original = schema['table_names_original']
for (i, (table_id, column_name)) in enumerate(column_names_original):
... |
class IndexExpression():
def __init__(self, indexing: t.Union[(int, tuple, t.List[int], t.List[tuple], t.List[list])]=None, axis: t.Union[(int, tuple)]=None) -> None:
self.expression = None
self.set_indexing(indexing, axis)
def set_indexing(self, indexing: t.Union[(int, tuple, slice, t.List[int]... |
def pytest_configure(config):
if config.pluginmanager.hasplugin('xdist'):
config.pluginmanager.register(XDistHooks())
RANDOM_SEED_RANGE = list(range(100))
random_seed_var = environ.get('SKLEARN_TESTS_GLOBAL_RANDOM_SEED')
if (hasattr(config, 'workerinput') and ('random_seeds' in config.workerinpu... |
def query_on_voxel_backward(inputs, min_=[(- 1), (- 1), (- 1)], max_=[1, 1, 1], use_ste=False, boundary_check=False):
grad_output = inputs[0]
query = inputs[1]
feature = inputs[2]
grid_sizes = feature.shape[:(- 1)]
if use_ste:
return (None, None)
gq = grad_query(grad_output, query, featu... |
def gen_module(testcase):
if ('constructor_args' in testcase):
args = testcase['constructor_args']
module = testcase['constructor'](*args)
module.train(False)
return module
module = testcase['constructor']()
module.train(False)
return module |
class Self_Attn(nn.Module):
def __init__(self, in_channels, spectral_norm):
super(Self_Attn, self).__init__()
self.in_channels = in_channels
if spectral_norm:
self.conv1x1_theta = snconv2d(in_channels=in_channels, out_channels=(in_channels // 8), kernel_size=1, stride=1, padding=... |
def forward_wf_src(model, u, rec_coords, space_order=8, f0=0.015, illum=False, fw=True):
wsrc = src_wavefield(model, u, fw=True)
(rec, _, I, _) = forward(model, None, rec_coords, None, space_order=space_order, qwf=wsrc, illum=illum, f0=f0, fw=fw)
return (rec.data, getattr(I, 'data', None)) |
def get_batch_dim(array: Union[(NDArray, Sequence[NDArray])]) -> int:
return get_axis_size(array, axis=0) |
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
if (name not in networks_map):
raise ValueError(('Name of network unknown %s' % name))
func = networks_map[name]
(func)
def network_fn(images, **kwargs):
arg_scope = arg_scopes_map[name](weight_decay=weight_decay... |
def modcrop(img, modulo):
(ih, iw) = img.size
ih = (ih - (ih % modulo))
iw = (iw - (iw % modulo))
img = img.crop((0, 0, ih, iw))
return img |
def check_nonnegative(input_matrix: Union[(sparse.csr_matrix, np.ndarray)]):
if (not has_nonnegative_entries(input_matrix)):
raise ValueError('Only nonnegative values are expected.') |
def filter_homograph_positions(dataset):
return dataset.filtered_sorted(key_test={'homograph_char_end': (lambda value: (value > 0)), 'homograph_phn_end': (lambda value: (value > 0))}) |
def register_Ns3SlotAllocInfo_methods(root_module, cls):
cls.add_constructor([param('ns3::SlotAllocInfo const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('uint8_t', 'slotIdx'), param('ns3::SlotAllocInfo::TddMode', 'tddMode'), param('ns3::SlotAllocInfo::TddSlotType', 'slotType'), param('... |
def train(labeled_loader, unlabeled_loader, model, criteria_x, optimizer, epoch, args, logger, tb_logger):
unlabeled_loader.sampler.set_epoch(epoch)
batch_time = AverageMeter()
data_time = AverageMeter()
loss_x_meter = AverageMeter()
loss_u_meter = AverageMeter()
loss_contrast_meter = AverageMet... |
class NodeInstance(NodeEnumerator):
def __init__(self, node: Node):
self.node = node
def enumerate(self, state: EnvironmentState, **kwargs):
(yield state.get_node(self.node.id)) |
class RefSgdW(MixinWeightDecayFused, RefSolver):
def __init__(self, lr, momentum, wd):
super().__init__(wd)
self.lr = lr
self.momentum = momentum
self.v = {}
self.init_lr = lr
def _set_state_impl(self, key, param):
self.v[key] = np.zeros_like(param)
def _updat... |
class _BasicNet(nn.Module):
def get_nb_params(self):
return sum([p.numel() for p in self.parameters()]) |
class TFMobileBertForMaskedLM(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class Sigmoid_MobileNet(nn.Module):
cfg = [64, (128, 2), 128, (256, 2), 256, (512, 2), 512, 512, 512, 512, 512, (1024, 2), 1024]
def __init__(self, num_classes=10):
super(Sigmoid_MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
sel... |
def _strip_string(string):
string = string.replace('\n', '')
string = string.replace('\\!', '')
string = string.replace('\\\\', '\\')
string = string.replace('tfrac', 'frac')
string = string.replace('dfrac', 'frac')
string = string.replace('\\left', '')
string = string.replace('\\right', '')... |
class ToTensor_(object):
def __init__(self):
self.rgb2bgr = transforms.Lambda((lambda x: x[([2, 1, 0], ...)]))
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32).transpose((2, 0, 1))
mask = np.expand_dims(np.array(sample['label']).astype(np.float32), (- 1)).tr... |
def add_model_training_inputs(model):
logger = logging.getLogger(__name__)
logger.info('Loading dataset: {}'.format(cfg.TRAIN.DATASETS))
roidb = combined_roidb_for_training(cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
logger.info('{:d} roidb entries'.format(len(roidb)))
model_builder.add_training_i... |
def get_wikitext2_raw_train_valid_test_ds(model_name_or_path, tokenizer, block_size=512, overwrite_cache=False, DATA_DIR=DEFAULT_DATA_DIR, split='all'):
wt2_data_path = os.path.join(DATA_DIR, 'wikitext-2-raw')
train_file = os.path.join(wt2_data_path, 'wiki.train.raw')
valid_file = os.path.join(wt2_data_path... |
def test_reduce_mean_dyn_batch_time():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
... |
class TFMPNetModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class YosoPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class LayoutType(enum.Enum):
ColumnMajor = enum_auto()
RowMajor = enum_auto()
ColumnMajorInterleaved2 = enum_auto()
RowMajorInterleaved2 = enum_auto()
ColumnMajorInterleaved32 = enum_auto()
RowMajorInterleaved32 = enum_auto()
ColumnMajorInterleaved64 = enum_auto()
RowMajorInterleaved64 =... |
_converter_regitstry('DMA_gather')
def DMA_gather_converter(context: 'SG2260Context', reg: DMA_gather_reg):
return dma_gather_base(context, reg) |
class CSBSDmat(SpectralMatrix):
def __init__(self, test, trial, scale=1, measure=1, assemble=None, kind=None, fixed_resolution=None):
SpectralMatrix.__init__(self, test, trial, scale=scale, measure=measure, assemble=assemble, kind=kind, fixed_resolution=fixed_resolution)
self._matvec_methods += ['cy... |
class FarMemTest(nodec.AppConfig):
def __init__(self, addr, size):
self.addr = addr
self.size = size
def config_files(self):
m = {'farmem.ko': open('../images/farmem/farmem.ko', 'rb')}
return {**m, **super().config_files()}
def run_cmds(self, node):
return ['mount -t ... |
def get_test_acc(event_file):
val_auc_list = np.zeros(100)
test_auc_list = np.zeros(100)
for e in list(tf.train.summary_iterator(event_file)):
if (len(e.summary.value) == 0):
continue
if (e.summary.value[0].tag == 'data/val_auc'):
val_auc_list[(e.step - 1)] = e.summar... |
def test_invalid_sample_without_replacement_algorithm():
with pytest.raises(ValueError):
sample_without_replacement(5, 4, 'unknown') |
def build_progress_bar(args, iterator, epoch=None, prefix=None, default='tqdm', no_progress_bar='none'):
if (args.log_format is None):
args.log_format = (no_progress_bar if args.no_progress_bar else default)
if ((args.log_format == 'tqdm') and (not sys.stderr.isatty())):
args.log_format = 'simpl... |
class ResNetMid(nn.Module):
def __init__(self, num_classes, loss, block, layers, last_stride=2, fc_dims=None, **kwargs):
self.inplanes = 64
super(ResNetMid, self).__init__()
self.loss = loss
self.feature_dim = (512 * block.expansion)
self.conv1 = nn.Conv2d(3, 64, kernel_size=... |
class HookContext():
operation: (APIOperation | None) = None
_property(removed_in='4.0', replacement='operation')
def endpoint(self) -> (APIOperation | None):
return self.operation |
def get_room_graph(id, zone_number):
(feature_list, location_list) = get_det(id)
(cluster_record, center_feature) = cluster_feature(feature_list, zone_number=zone_number)
g = nx.Graph()
g = add_node(g, center_feature, location_list, cluster_record)
g = add_edge(g, center_feature)
return g |
class EpisodeMonitor(gym.ActionWrapper):
def __init__(self, env: gym.Env):
super().__init__(env)
self._reset_stats()
self.total_timesteps = 0
def _reset_stats(self):
self.reward_sum = 0.0
self.episode_length = 0
self.start_time = time.time()
def step(self, act... |
def get_eval_set(eval_on, eval_batch_size=8):
if (eval_on == 'dev'):
eval_examples = processor.get_dev_examples(args.data_dir)
elif (eval_on == 'test'):
eval_examples = processor.get_test_examples(args.data_dir)
else:
raise ValueError('eval on dev or test set only')
eval_features... |
def test_fastscnn_backbone():
with pytest.raises(AssertionError):
FastSCNN(3, (32, 48), 64, (64, 96, 128), (2, 2, 1), global_out_channels=127, higher_in_channels=64, lower_in_channels=128)
model = FastSCNN(in_channels=3, downsample_dw_channels=(4, 6), global_in_channels=8, global_block_channels=(8, 12, ... |
class TestKerasSetLayerToBitwidth(unittest.TestCase):
def test_set_layer_to_bitwidth_weights(self):
(layer, node) = test_setup()
wrapper_layer = KerasTrainableQuantizationWrapper(layer, weights_quantizers={KERNEL: ConfigurableWeightsQuantizer(node_q_cfg=node.candidates_quantization_cfg, float_weight... |
def train_model(db: FeverDocDB, params: Union[(Params, Dict[(str, Any)])], cuda_device: int, serialization_dir: str) -> Model:
prepare_environment(params)
os.makedirs(serialization_dir, exist_ok=True)
sys.stdout = TeeLogger(os.path.join(serialization_dir, 'stdout.log'), sys.stdout)
sys.stderr = TeeLogge... |
class MatrixFeatures(object):
def __init__(self, r):
self.r = r
def __call__(self, data):
index = radius(data.pos, data.pos, self.r)
difference = (data.pos[index[0]] - data.pos[index[1]])
distance = torch.linalg.norm(difference, dim=1)
weight = (self.r - distance)
... |
class TestFetchVerifyColumnNameAndType(unittest.TestCase):
def generate_select(self, table, columns):
return ('SELECT %s FROM %s' % (','.join(columns), table))
((testing.get_driver() in ['mysql', 'hive']), 'skip non mysql/hive tests')
def test_verify_column_name_and_type(self):
conn = testin... |
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--gpu', action='store_true')
args = parser.parse_args()
env = d3rlpy.envs.Atari(gym.make(args.en... |
def read_all_polytopes(file_name):
polytopes = []
with open(file_name) as f:
pc = read_palp_point_collection(f)
while (pc is not None):
polytopes.append(LatticePolytope(pc, compute_vertices=False))
pc = read_palp_point_collection(f)
return polytopes |
class GNNNodeHead(nn.Module):
def __init__(self, dim_in, dim_out):
super(GNNNodeHead, self).__init__()
self.layer_post_mp = MLP(dim_in, dim_out, num_layers=cfg.gnn.layers_post_mp, bias=True)
def _apply_index(self, batch):
if (batch.node_label_index.shape[0] == batch.node_label.shape[0]):... |
class Checkpointer(object):
def __init__(self, distributed):
self.distributed = distributed
def load(self, checkpoint_path, model, optimizer=None):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
if self.distributed:
model = model.module
if ('model' in ch... |
def read_in_docs(corpus_dir: str, output_dir: str, pickle_dir: str, removal=True):
with open(os.path.join(pickle_dir, 'intro_text_often.pkl'), 'rb') as f:
intro_often = pickle.load(f)
with open(os.path.join(pickle_dir, 'summ_text_often.pkl'), 'rb') as f:
summ_often = pickle.load(f)
dict_para... |
def build_pretrain_args(language, dataset, charlm='default', command_args=None, extra_args=None, model_dir=DEFAULT_MODEL_DIR):
charlm = choose_charlm(language, dataset, charlm, default_charlms, ner_charlms)
charlm_args = build_charlm_args(language, charlm, model_dir=model_dir)
wordvec_args = []
if ((ext... |
def p_sample_t_1to0(model, x, y, y_0_hat, y_T_mean, one_minus_alphas_bar_sqrt):
device = next(model.parameters()).device
t = torch.tensor([0]).to(device)
sqrt_one_minus_alpha_bar_t = extract(one_minus_alphas_bar_sqrt, t, y)
sqrt_alpha_bar_t = (1 - sqrt_one_minus_alpha_bar_t.square()).sqrt()
eps_thet... |
def _dbz_to_integers(name):
from sage.rings.integer import Integer
return [Integer(i) for i in _dbz_to_string(name).split()] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.