code stringlengths 101 5.91M |
|---|
class Binding(Printable):
source: str
action: Action
filter: Filter
def __init__(self, source, action=Action.RANDOM, filter=Filter()):
self.source = source
self.action = action
self.filter = filter
def __filterBaseSystemConflict(self, vnode: str, node: Node, emulator: Emulato... |
def hilbert_conductor_inverse(d):
Z = ZZ
d = Z(d)
if (d <= 0):
raise ValueError('d needs to be positive')
if (d == 1):
return (Z((- 1)), Z(1))
if (d == 2):
return (Z((- 1)), Z((- 1)))
if d.is_prime():
if ((d % 4) == 3):
return (Z((- 1)), (- d))
... |
class MissingKeyException(Exception):
def __init__(self, keyname):
self.keyname = keyname
def __str__(self):
return ('Missing key: ' + self.keyname) |
class CLEVR_DataLoader(Dataset):
def __init__(self, subset, data_path, features_path, tokenizer, max_words=30, image_resolution=224):
self.data_path = data_path
self.features_path = features_path
self.default_features_path = os.path.join(self.data_path, 'images')
self.nsc_features_pa... |
class MethodTableSlot(SlotDescriptor):
def slot_code(self, scope):
if scope.pyfunc_entries:
return scope.method_table_cname
else:
return '0' |
class LogStatus(Status):
def __init__(self, status, console: RichConsole, level: int=logging.INFO, enabled: bool=True, speed: float=1.0, refresh_per_second: float=12.5):
super().__init__(status, console=console, spinner='simpleDots', speed=speed, refresh_per_second=refresh_per_second)
self.status = ... |
def compute_accuracy(tg_model, evalloader, scale=None, device=None):
if (device is None):
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
tg_model.eval()
correct = 0
total = 0
with torch.no_grad():
for (_, (inputs, targets)) in enumerate(evalloader):
... |
def store_nugget_nodes(gold_nuggets, sys_nuggets, m_mapping):
gold_nodes = []
sys_nodes = []
system_nugget_to_node = {}
gold_nugget_to_node = {}
mapped_system_mentions = set()
tid = 0
for (gold_index, (system_index, _)) in enumerate(m_mapping):
node_id = ('te%d' % tid)
tid +=... |
class Symbol(Serialize):
__slots__ = ('name',)
is_term = NotImplemented
def __init__(self, name):
self.name = name
def __eq__(self, other):
assert isinstance(other, Symbol), other
return ((self.is_term == other.is_term) and (self.name == other.name))
def __ne__(self, other):
... |
def test_numpy_2d():
x = ak.Array(ak.contents.NumpyArray(np.asarray([[1, 2, 3]]), parameters={'attrs': {'not': 'hashable'}, 'name': 'x'}))
y = ak.Array(ak.contents.NumpyArray(np.asarray([[1, 2, 3]]), parameters={'attrs': {'not': 'hashable'}, 'name': 'y'}))
result = (x + y)
assert (ak.parameters(result) ... |
class SageArxivConfig(ArxivConfig):
def model(self, hparams):
return SageArxivNet(hidden_dim=self.hidden, num_graph_layers=NUM_LAYERS, dropout=hparams['dropout'], residual=True)
def pretrained(self, model_dir):
return load_pretrained(self, dataset_name='arxiv', model_name='sage', hidden=self.hid... |
class SegmentationDataset(object):
def __init__(self, root, split, mode, transform, base_size=520, crop_size=480):
super(SegmentationDataset, self).__init__()
self.root = root
self.transform = transform
self.split = split
self.mode = (mode if (mode is not None) else split)
... |
class Model_dense_mse(nn.Module):
def __init__(self, layer_func, input_dim, output_dim, support_num, dropout, logging, features=None):
super(Model_dense_mse, self).__init__()
if FLAGS.trainable_embedding:
self.register_parameter('features', nn.Parameter(torch.from_numpy(features).float()... |
def load_model(base_model, ckpt_name, **kwargs):
load_dir = kwargs['save_dir']
base_model.model.from_pretrained(os.path.join(load_dir, ckpt_name)) |
def _to_param_value(val):
if isinstance(val, bool):
return ('true' if val else 'false')
return str(val) |
class ConvBertConfig(PretrainedConfig):
model_type = 'convbert'
def __init__(self, vocab_size=30522, hidden_size=768, is_encoder_decoder=False, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddi... |
class TestUtils(TestCase):
def test_se3ToXYZQUAT_XYZQUATToSe3(self):
m = pin.SE3.Identity()
m.translation = np.array([1.0, 2.0, 3.0])
m.rotation = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, (- 1.0)], [0.0, 1.0, 0.0]])
self.assertApprox(pin.SE3ToXYZQUAT(m).T, [1.0, 2.0, 3.0, (sqrt(2) / 2),... |
def without_common_entries(items: List[Dict[(str, Any)]]) -> List[Dict[(str, Any)]]:
common_keys = [key for key in items[0] if all(((item[key] == items[0][key]) for item in items))]
return [dict(((key, value) for (key, value) in item.items() if (key not in common_keys))) for item in items] |
def get_tau(meta, isotope_string):
isotope_meta = meta.loc[isotope_string]
half_life = isotope_meta.loc[(isotope_meta['key'] == 'Parent T1/2 value')]['value'].values[0]
half_life = convert_half_life_to_astropy_units(half_life)
return (half_life / np.log(2)) |
_torch
_staging_test
class TrainerIntegrationWithHubTester(unittest.TestCase):
def setUpClass(cls):
cls._token = login(username=USER, password=PASS)
def tearDownClass(cls):
for model in ['test-trainer', 'test-trainer-epoch', 'test-trainer-step']:
try:
delete_repo(toke... |
def update_kwargs(init_model_path, kwargs):
save_dict = torch.load(init_model_path, map_location=torch.device('cpu'))
config_dict = save_dict['config_dict']
del save_dict
config_dict.pop('gpu_idx')
config_dict['mode'] = 'test'
if ('val_bs' in kwargs):
config_dict['val_bs'] = kwargs['val_... |
def GenerateSM80_SparseTensorOp_16864_TN(manifest, args):
if (not CudaToolkitVersionSatisfies(args.cuda_version, 11, 1)):
return
layouts = [(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor)]
math_inst = MathInstruction([16, 8, 64], DataType.s8, DataType.s8, DataType.s32, OpcodeClass... |
def interactions_spark(interactions_pandas):
return get_spark_session().createDataFrame(interactions_pandas) |
class DBPediaProcessor(TextClassProcessor):
def __init__(self):
self.has_title = True
def get_labels(self):
return [str(i) for i in range(1, 15)]
def get_train_size(self):
return 560000
def get_dev_size(self):
return 70000 |
def test_modules() -> None:
fc = torch.nn.Linear(100, 200)
optim = torch.optim.Adam(fc.parameters())
modules = DummyModules(fc, optim)
checkpointer = modules.create_checkpointer('cpu:0')
assert ('fc' in checkpointer.modules)
assert ('optim' in checkpointer.modules)
assert (checkpointer.modul... |
def _maybe_raise_one_or_more(errors: list[Exception]) -> None:
if (not errors):
return
elif (len(errors) == 1):
raise errors[0]
else:
raise MultipleFailures('\n\n'.join((str(error) for error in errors)), errors) |
class DummyQuantizer():
def __call__(self, tensor, tag='', stat_id=None, override_att=None):
return tensor
def __repr__(self):
return 'DummyQuantizer - fp32' |
def eval_single_model(dataset, model):
top_1 = tf.keras.metrics.SparseCategoricalAccuracy()
top_5 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5)
model.eval()
model = model.to('cuda')
all_top_1 = []
all_top_5 = []
for (image_batch, label_batch) in dataset.as_numpy_iterator():
w... |
class BOX(Structure):
_fields_ = [('x', c_float), ('y', c_float), ('w', c_float), ('h', c_float)] |
def generate_size_member(byte_array):
s = StringIO()
s.write((' const SIZE : usize = %s.len();' % byte_array))
return s.getvalue() |
class WMT14(TranslationDataset):
urls = [(' 'wmt16_en_de.tar.gz')]
name = 'wmt14'
dirname = ''
def splits(cls, exts, fields, root='.data', train='train.tok.clean.bpe.32000', validation='newstest2013.tok.bpe.32000', test='newstest2014.tok.bpe.32000', **kwargs):
return super(WMT14, cls).splits(ext... |
def get_batches(data, batch_size):
batches = []
for i in range((((len(data) + batch_size) - 1) // batch_size)):
batches.append(data[(i * batch_size):((i + 1) * batch_size)])
return batches |
def main():
argparser = ArgumentParser()
argparser.add_argument('file', help="by Returnn search, in 'py' format")
argparser.add_argument('--out', required=True, help='output filename')
args = argparser.parse_args()
d = eval(open(args.file, 'r').read())
assert isinstance(d, dict)
assert (not ... |
def DeclareSort(name, ctx=None):
ctx = _get_ctx(ctx)
return SortRef(Z3_mk_uninterpreted_sort(ctx.ref(), to_symbol(name, ctx)), ctx) |
_decorator(list())
def get_ajax_data(html):
cont = json.loads(html, encoding='utf-8').get('data', '')
return get_weibo_list(cont) |
def temp_require_grad(vs):
prev_grad_status = [v.requires_grad for v in vs]
require_and_zero_grads(vs)
(yield)
for (v, status) in zip(vs, prev_grad_status):
v.requires_grad_(status) |
def register_Ns3ChannelCoordinationListener_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::ChannelCoordinationListener const &', 'arg0')])
cls.add_method('NotifyCchSlotStart', 'void', [param('ns3::Time', 'duration')], is_pure_virtual=True, is_virtual=True)
cls.add_me... |
def sample_return(G, params, max_path_length, discount):
G.policy.set_param_values(params)
path = rollout(G.env, G.policy, max_path_length)
path['returns'] = discount_cumsum(path['rewards'], discount)
path['undiscounted_return'] = sum(path['rewards'])
return path |
class QDAlgorithm(abc.ABC, QDAlgorithmLike, Summarisable, Saveable, Copyable, CreatableFromConfig):
name: str
container: Container
dimension: Optional[int]
budget: int
_base_ind_gen: Optional[Generator[(IndividualLike, None, None)]]
_nb_objectives: int
_optimisation_task: Union[(str, Sequenc... |
class Distribution(setuptools.dist.Distribution):
def fetch_build_eggs(self, specifiers):
raise SetupRequirementsError(specifiers)
def patch(cls):
orig = distutils.core.Distribution
distutils.core.Distribution = cls
try:
(yield)
finally:
distutils.... |
def get_token_pos(tok_list, value_label):
find_pos = []
found = False
label_list = [item for item in map(str.strip, re.split('(\\W+)', value_label)) if (len(item) > 0)]
len_label = len(label_list)
for i in range(((len(tok_list) + 1) - len_label)):
if (tok_list[i:(i + len_label)] == label_lis... |
def test_ListOffsetArray_NumpyArray():
ak_array_in = ak.contents.listoffsetarray.ListOffsetArray(ak.index.Index(np.array([1, 4, 4, 6, 7], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([6.6, 1.1, 2.2, 3.3, 4.4, 5.5, 7.7])))
data_frame = ak.to_rdataframe({'x': ak_array_in})
assert (data_frame.GetColu... |
def test_MLA():
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
mla = MLA(pool_classifiers, DFP=True)
mla.fit(X_dsel, y_dsel)
assert np.isclose(mla.score(X_test, y_test), 0.) |
def test__parse_size():
expected = {'12': .0, '12 b': 12, '12k': 12000.0, ' 12 M ': .0, ' 12 G ': .0, ' 12Tb ': .0, '12 Mib ': (12 * (1024.0 ** 2)), '12Tib': (12 * (1024.0 ** 4))}
for (inp, outp) in sorted(expected.items()):
if (outp is None):
with pytest.raises(ValueError):
... |
class VGG19(nn.Module):
def __init__(self, feature_list=[2, 7, 14], requires_grad=True):
super(VGG19, self).__init__()
self.feature_list = feature_list
vgg19 = torchvision.models.vgg19(pretrained=True)
self.model = torch.nn.Sequential(*list(vgg19.features.children())[:(self.feature_l... |
def str_replace_all(s: str, replacements: T.Dict[(str, str)]) -> str:
for (old, new) in replacements.items():
s = s.replace(old, new)
return s |
def evaluate(args, model_file, retag_pipeline):
if (args['num_generate'] > 0):
kbest = (args['num_generate'] + 1)
else:
kbest = None
with EvaluateParser(kbest=kbest) as evaluator:
foundation_cache = (retag_pipeline[0].foundation_cache if retag_pipeline else FoundationCache())
... |
class Decoder_module(nn.Module):
def __init__(self, inplanes, planes, rate=1):
super(Decoder_module, self).__init__()
self.atrous_convolution = SeparableConv2d_aspp(inplanes, planes, 3, stride=1, dilation=rate, padding=1)
def forward(self, x):
x = self.atrous_convolution(x)
retur... |
def get_labels_from_annotation(annotation_tensor, class_labels):
valid_entries_class_labels = class_labels[:(- 1)]
labels_2d = list(map((lambda x: tf.equal(annotation_tensor, x)), valid_entries_class_labels))
labels_2d_stacked = tf.stack(labels_2d, axis=2)
labels_2d_stacked_float = tf.to_float(labels_2d... |
def densenet121(num_classes, loss='softmax', pretrained=True, **kwargs):
model = DenseNet(num_classes=num_classes, loss=loss, num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), fc_dims=None, dropout_p=None, **kwargs)
if pretrained:
init_pretrained_weights(model, model_urls['densenet121'... |
class FullyObsWrapper(gym.core.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
self.observation_space.spaces['image'] = spaces.Box(low=0, high=255, shape=(self.env.width, self.env.height, 3), dtype='uint8')
def observation(self, obs):
env = self.unwrapped
full... |
class TaskRunner():
def __init__(self, tasks: List):
self.tasks = tasks
self.logger = logging.getLogger('task_runner')
def run(self, *initial_parameters: Tuple[Any]):
if (not self.tasks):
return
self.__run(0, list(initial_parameters))
for task in self.tasks:
... |
_processor('scene_graph_bert_tokenizer')
class SceneGraphBertTokenizer(MaskedTokenProcessor):
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self._probability = config.get('mask_probability', 0)
self._tokenizer.add_tokens(['[OCR]'], special_tokens=True... |
def set_device(batch, device):
if (isinstance(batch, tuple) or isinstance(batch, list)):
return [set_device(t, device) for t in batch]
elif isinstance(batch, torch.Tensor):
return batch.to(device)
else:
return batch |
def read_images_binary(path_to_model_file):
images = {}
with open(path_to_model_file, 'rb') as fid:
num_reg_images = read_next_bytes(fid, 8, 'Q')[0]
for image_index in range(num_reg_images):
binary_image_properties = read_next_bytes(fid, num_bytes=64, format_char_sequence='idddddddi'... |
.parametrize('sparse_container', (CSC_CONTAINERS + CSR_CONTAINERS))
def test_small_sparse_partial_fit(sparse_container):
X_sparse = sparse_container(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1, batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_compon... |
class Config():
section: str
data_dir: str
save_dir: str
train_data: str
dev_data: str
test_data: str
device: str
bert_model: str
bert_window_size: int
embedding_size: int
sp_embedding_size: int
a_scoring_batch_size: int
hidden_size: int
n_hidden_layers: int
m... |
def generate_pad(node_name, x, out_name, mode, pad_width, constant_val, base_name, func_counter):
func = nnabla_pb2.Function()
func.type = 'Pad'
set_function_name(func, node_name, base_name, func_counter)
func.input.extend([x])
func.output.extend([out_name])
pp = func.pad_param
pp.mode = mod... |
class CTTmat(SpectralMatrix):
def __init__(self, test, trial, scale=1, measure=1, assemble=None, kind=None, fixed_resolution=None):
SpectralMatrix.__init__(self, test, trial, scale=scale, measure=measure, assemble=assemble, kind=kind, fixed_resolution=fixed_resolution)
self._matvec_methods += ['cyth... |
class TestGymEnvironment(unittest.TestCase):
def setUp(self):
self.base_mdp = OvercookedGridworld.from_layout_name('cramped_room')
self.env = OvercookedEnv.from_mdp(self.base_mdp, **DEFAULT_ENV_PARAMS)
self.rnd_agent_pair = AgentPair(FixedPlanAgent([]), FixedPlanAgent([]))
np.random.... |
def default_mini_imagenet_serving_transform(image_size: int, training: bool) -> Callable:
return (transforms.Compose([transforms.RandomResizedCrop(image_size), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), transforms.RandomHorizontalFlip(), transforms.Normalize(**IMAGENET_NORMALIZATION)]) if... |
def read_data(filename):
with open(filename, 'r', encoding='utf8') as datafile:
res = []
for line in datafile:
line = line.strip().split('\t')
lines = line
length = len(line)
track = 0
for x in lines[0]:
if (x != ' '):
... |
class _GatherFromModelParallelRegion(torch.autograd.Function):
def forward(ctx, input_):
return _gather(input_)
def backward(ctx, grad_output):
return _split(grad_output) |
.parametrize('has_ins_space', [False, True])
def test_read_write_consistency(has_ins_space):
brat_io = BratIO(tokenize_callback='char', has_ins_space=has_ins_space, ins_space_tokenize_callback=(jieba.cut if has_ins_space else None), parse_attrs=True, parse_relations=True, encoding='utf-8')
src_fn = 'data/HwaMei... |
def set_cfg(cfg):
if (cfg is None):
return cfg
cfg.print = 'both'
cfg.device = 'auto'
cfg.out_dir = 'results'
cfg.cfg_dest = 'config.yaml'
cfg.custom_metrics = []
cfg.seed = 0
cfg.round = 4
cfg.tensorboard_each_run = False
cfg.tensorboard_agg = True
cfg.num_workers = ... |
def sci_sinusoidal_exponential_decay(initial_learning_rate=0.001, final_learning_rate=1e-05, decay_epochs=1000, delay_epochs=0, sine_freq=10, sine_decay_rate=0.5, verify=False, scheduler='sinusoidal_exponential_decay'):
lr0 = initial_learning_rate
lr1 = final_learning_rate
decay_rate = (np.log((lr1 / lr0)) ... |
def Conv3x3BN(in_channels, out_channels, stride=1, groups=1):
return nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1, groups=groups), nn.BatchNorm2d(out_channels)) |
def get_xy_fd(use_neg=False, hash_flag=False):
feature_columns = [SparseFeat('user', 3, embedding_dim=10, use_hash=hash_flag), SparseFeat('gender', 2, embedding_dim=4, use_hash=hash_flag), SparseFeat('item_id', (3 + 1), embedding_dim=8, use_hash=hash_flag), SparseFeat('cate_id', (2 + 1), embedding_dim=4, use_hash=h... |
(name='RoBERTa/Baseline/{task}/{batch}', task=TASKS, batch=[16, 32, 64, 128, 256, 512])
class BenchRoBERTaBaseline(BenchRoBERTa):
pass |
_LAYERS.register_module()
class InterpConv(nn.Module):
def __init__(self, in_channels, out_channels, with_cp=False, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), *, conv_cfg=None, conv_first=False, kernel_size=1, stride=1, padding=0, upsample_cfg=dict(scale_factor=2, mode='bilinear', align_corners=False)):
... |
class Trainer():
def __init__(self, config_file, enable_logging):
self.enable_logging = enable_logging
self.config = Trainer.parse_config(config_file)
self.env = gym.make(self.config['env_name'])
self.apply_seed()
self.state_dimension = self.env.observation_space.shape[0]
... |
def _setstate(self, state):
self.__dict__.update(state)
self.stoi = defaultdict((lambda : 0), self.stoi) |
class Dilated(nn.Module):
def __init__(self, dim, mapdim, dilate=3):
super(Dilated, self).__init__()
self.mapdim = mapdim
self._init_layers(dim, mapdim, dilate)
def _init_layers(self, dim, mapdim, dilate):
self.p1_conv = convolution(3, dim, dim)
self.p2_conv = nn.Conv2d(d... |
def cnn_network(convs, fcs, use_lstm, padding, inpt, masks, rnn_state, num_actions, lstm_unit, nenvs, step_size, scope):
out = make_cnn(convs, padding, inpt)
out = layers.flatten(out)
out = make_fcs(fcs, out)
(rnn_out, rnn_state) = make_lstm(lstm_unit, nenvs, step_size, out, masks, rnn_state)
if use... |
def decode_results(model, decoded_output, decoded_offsets):
results = {'output': [], '_meta': {'acoustic_model': {'name': os.path.basename(args.model_path)}, 'language_model': {'name': (os.path.basename(args.lm_path) if args.lm_path else None)}, 'decoder': {'lm': (args.lm_path is not None), 'alpha': (args.alpha if ... |
def get_scenario_spec_tiny():
return ScenarioSpec(class_name='helm.benchmark.scenarios.simple_scenarios.Simple1Scenario', args={'num_input_tokens': 5, 'vocab_size': 20, 'num_train_instances': 2, 'num_test_instances': 2}) |
def get_text_and_annotations_and_date(in_fp) -> (str, List[str], str):
soup = BeautifulSoup(open(in_fp), 'lxml')
date = soup.findAll('dct')[0].findAll('timex3')[0].attrs['value']
content = soup.findAll('text')[0]
annotations = []
end = 0
for timex in content.findAll('timex3'):
begin = (c... |
def test_patients(tmp_path: pathlib.Path) -> None:
patients = create_patients(tmp_path)
with patients.reader() as reader:
all_patients = list(reader)
assert (sorted((p.patient_id for p in all_patients)) == sorted(range(10, 25)))
for patient in all_patients:
assert (patient.events == dumm... |
class TestEmptySampleOps(TestCase):
def test_emptysample(self):
for i in range(0, 3):
PadEmptyTest = core.CreateOperator('PadEmptySamples', ['lengths', 'features1', 'features2'], ['out_lengths', 'out_features1', 'out_features2'])
workspace.FeedBlob('lengths', np.array(lengths[i], dty... |
class SymforceTestCaseMixin(unittest.TestCase):
LieGroupOpsType = T.Union[(interfaces.LieGroup, sf.Scalar)]
_UPDATE = False
KEEP_PATHS = ['.*/__pycache__/.*', '.*\\.pyc']
def should_update() -> bool:
if ('--update' in sys.argv):
SymforceTestCaseMixin._UPDATE = True
sys.ar... |
def print_autograd_prof_summary(prof, mode, sortby='cpu_time', topk=15):
valid_sortby = ['cpu_time', 'cuda_time', 'cpu_time_total', 'cuda_time_total', 'count']
if (sortby not in valid_sortby):
warn = 'WARNING: invalid sorting option for autograd profiler results: {}\nExpected `cpu_time`, `cpu_time_total... |
def assign_freebase_id_to_results(split, threshold=(- 1.5)):
infile_name = 'outputs/webqsp_{}_elq{}.json'.format(split, threshold)
predictions = load_json(infile_name)
for pred in tqdm(predictions, total=len(predictions)):
fids = []
for wid in pred['wiki_ids']:
if (wid == 'null')... |
def rewind_body(prepared_request):
body_seek = getattr(prepared_request.body, 'seek', None)
if ((body_seek is not None) and isinstance(prepared_request._body_position, integer_types)):
try:
body_seek(prepared_request._body_position)
except (IOError, OSError):
raise Unrewi... |
def query_on_voxel_hash(query, feature, G0=16, growth_factor=1.5, T0=(2 ** 15), L=16, D=2, min_=[(- 1.0), (- 1.0), (- 1.0)], max_=[1.0, 1.0, 1.0], boundary_check=False, ctx=None):
func = LanczosQueryOnVoxelHash(ctx, G0, growth_factor, T0, L, D, min_, max_, boundary_check)
return func(query, feature) |
def register_Ns3TimeSeriesAdaptor_methods(root_module, cls):
cls.add_constructor([param('ns3::TimeSeriesAdaptor const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('TraceSinkBoolean', 'void', [param('bool', 'oldData'), param('bool', '... |
def qat():
import torchvision.models.quantization as models
model = models.resnet18(pretrained=True, progress=True, quantize=False)
num_ftrs = model.fc.in_features
model.train()
model.fuse_model()
model_ft = create_combined_model(model)
model_ft[0].qconfig = torch.quantization.default_qat_qc... |
class BagAverage(BagRE):
def __init__(self, sentence_encoder, num_class, rel2id):
super().__init__()
self.sentence_encoder = sentence_encoder
self.num_class = num_class
self.fc = nn.Linear(self.sentence_encoder.hidden_size, num_class)
self.softmax = nn.Softmax((- 1))
... |
def glorot(shape, name=None):
init_range = np.sqrt((6.0 / (shape[0] + shape[1])))
initial = tf.random_uniform(shape, minval=(- init_range), maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name) |
def clip_transform(n_px):
return Compose([Resize(n_px, interpolation=Image.BICUBIC), CenterCrop(n_px), to_rgb, ToTensor(), Normalize((0., 0.4578275, 0.), (0., 0., 0.))]) |
def make_rl_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args, training_args) -> dict:
prompt_dict = utils.jload(data_args.prompt_dict_path)
alpaca_instructions = datasets.load_dataset(data_args.dataset_path, data_args.dataset_name)
train_df = pd.concat([pd.DataFrame(alpaca_instructions[spl... |
def plothist(a, *args, **kw):
ind = kw.get('ind')
if (ind is None):
ret = idlplot.plothist(a, *args, **kw)
else:
weights = kw.get('weights')
if (weights is not None):
if (not np.isscalar(kw['weights'])):
kw['weights'] = kw['weights'][ind]
del kw['i... |
def add_mask2image_binary1(images_path, masks_path, masked_path):
for img_item in os.listdir(images_path):
print(img_item)
img_path = os.path.join(images_path, img_item)
img = cv2.imread(img_path)
mask_path = os.path.join(masks_path, (img_item[:(- 6)] + '.png'))
mask = cv2.im... |
def get_one_hot(feature_mix, mag_s1, mag_s2, db_threshold):
specs = np.asarray([mag_s1, mag_s2])
vals = np.argmax(specs, axis=0)
Y = np.zeros((mag_s1.shape + (2,)))
for i in range(2):
temp = np.zeros(2)
temp[i] = 1
Y[(vals == i)] = temp
m = (np.max(feature_mix) - (db_threshol... |
class TfGraphTestCase():
def setup_method(self):
self.graph = tf.Graph()
for c in self.graph.collections:
self.graph.clear_collection(c)
self.graph_manager = self.graph.as_default()
self.graph_manager.__enter__()
self.sess = tf.compat.v1.Session(graph=self.graph)
... |
def test_agrawal_generator_all_functions(test_path):
for f in range(10):
stream = AGRAWALGenerator(classification_function=f, random_state=1)
test_file = os.path.join(test_path, 'agrawal_stream_{}.npz'.format(f))
data = np.load(test_file)
X_expected = data['X']
y_expected = d... |
(config_path='configs/', config_name='config.yaml')
def main(config: DictConfig):
config = dictconfig_filter_key(config, (lambda k: (not k.startswith('__'))))
from src.train import train
from src.eval import evaluate
from src.utils import utils
utils.extras(config)
if config.get('print_config'):... |
_function_dispatch(_partition_dispatcher)
def rpartition(a, sep):
return _to_string_or_unicode_array(_vec_string(a, object_, 'rpartition', (sep,))) |
class welcome_page(tk.Toplevel):
def __init__(self, master, global_wd, *args, **kwargs):
super().__init__(master=master)
self.geometry('600x400+500+100')
self.title('BioNAS - Welcome')
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.gri... |
class ChunkSizeTuner():
def __init__(self, max_chunk_size: int=512):
self.max_chunk_size = max_chunk_size
self.cached_chunk_size: Optional[int] = None
self.cached_arg_data: Optional[tuple] = None
def _determine_favorable_chunk_size(self, fn: Callable, args: tuple, min_chunk_size: int) ->... |
class EllipticCurveTraces(Benchmark):
def __init__(self, B):
self.B = B
self.repr_str = ('Compute all a_p for the elliptic curve [1,2,3,4,5], for p < %s' % self.B)
def sage(self):
E = EllipticCurve([1, 2, 3, 4, 5])
t = cputime()
E.anlist(self.B, pari_ints=True)
re... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.