code stringlengths 101 5.91M |
|---|
class ZippedNode(Node):
def __init__(self, nodes, children):
super(ZippedNode, self).__init__(children)
self.nodes = nodes
def __repr__(self):
if (len(self.children) == 0):
return ('[%s]' % ','.join((repr(node) for node in self.nodes)))
return ('[%s](%s)' % (','.join(... |
class QDQBertPreTrainedModel(metaclass=DummyObject):
_backends = ['pytorch_quantization', 'torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['pytorch_quantization', 'torch']) |
def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str):
insertion_idx = bisect.bisect_left(token_list, new_token)
if ((insertion_idx < len(token_list)) and (token_list[insertion_idx] == new_token)):
return
else:
token_list.insert(insertion_idx, new_token) |
def save(model: BaseRecommender, path: Union[(str, Path)], overwrite: bool=False):
if isinstance(path, Path):
path = str(path)
spark = State().session
fs = get_fs(spark)
if (not overwrite):
is_exists = fs.exists(spark._jvm.org.apache.hadoop.fs.Path(path))
if is_exists:
... |
def load_folder(folder, suffix):
imgs = []
for f in sorted(os.listdir(folder)):
if f.endswith(suffix):
imgs.append(os.path.join(folder, f))
return imgs |
def densenet_cifar(nclass):
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=12, num_classes=nclass) |
def test_two_return():
x = ak.Array([1, 2, 3], behavior={'foo': 'BAR'}, attrs={'hello': 'world'})
(y, y_ret) = divmod(x, 2)
assert (y.attrs is y_ret.attrs)
assert (y.attrs is x.attrs)
assert (y.behavior is y_ret.behavior)
assert (y.behavior is x.behavior) |
class _Box2dEnvPoolCorrectnessTest(absltest.TestCase):
def run_space_check(self, env0: gym.Env, env1: Any) -> None:
(obs0, obs1) = (env0.observation_space, env1.observation_space)
np.testing.assert_allclose(obs0.shape, obs1.shape)
(act0, act1) = (env0.action_space, env1.action_space)
... |
def main(argv):
arg_parser = argparse.ArgumentParser(description='Dump raw strings from dataset. Same format as in search.')
arg_parser.add_argument('--config', help="filename to config-file. will use dataset 'eval' from it")
arg_parser.add_argument('--dataset', help='dataset, overwriting config')
arg_p... |
def add_self_loops(adjacency: sparse.csr_matrix) -> sparse.csr_matrix:
(n_row, n_col) = adjacency.shape
if is_square(adjacency):
adjacency = (sparse.diags(np.ones(n_col), format='csr') + adjacency)
else:
tmp = sparse.eye(n_row)
tmp.resize(n_row, n_col)
adjacency += tmp
re... |
def isolating_interval(intv_fn, pol):
dpol = pol.derivative()
for prec in prec_seq():
intv = intv_fn(prec)
if (not dpol(intv).contains_zero()):
return intv |
class PolynomialDecay(LearningRateSchedule):
def __init__(self, initial_rate, final_rate, decay_steps, power=1.0):
self.initial_rate = initial_rate
self.final_rate = final_rate
self.decay_steps = decay_steps
self.power = power
def _create_tensor(self, global_step):
return... |
class Sphere(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 5.12)] * self.N), ([5.12] * self.N)))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun... |
def get_dataset_by_just(d, just):
l = []
for name in just:
l.append(d[name])
if ('all_start_positions' in d):
l.append(d['all_start_positions'])
if ('all_end_positions' in d):
l.append(d['all_end_positions'])
if ('all_example_index' in d):
l.append(d['all_example_inde... |
class TestEnasConvModeler(testing_utils.TestCase):
def setUp(self):
self.session = tf.Session()
self.input_op = [architect.Operation('input', shape=(10, 4), name='input')]
self.output_op = architect.Operation('dense', units=1, activation='sigmoid', name='output')
self.x = np.random.c... |
class VqrgbNet(nn.Module):
def __init__(self):
super(VqrgbNet, self).__init__()
print('VqrgbNet...')
self.ind_pool = IndPool(10000)
num_hiddens = 128
num_residual_hiddens = 64
num_residual_layers = 3
embedding_dim = 64
commitment_cost = 0.25
se... |
def takes(*argkeys):
def decorator(obj):
if isinstance(obj, DynamicItem):
if obj.takes:
raise ValueError("Can't overwrite DynamicItem.takes")
obj.takes = argkeys
return obj
elif inspect.isgeneratorfunction(obj):
return GeneratorDynamicI... |
class Partition4(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/T5Block[16]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[17]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[18]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[19]']
TENSORS = []
def __init__(s... |
def GetSceneGraphOfImage(id=61512):
image = GetImageData(id=id)
data = utils.RetrieveData((('/api/v0/images/' + str(id)) + '/graph'))
if (('detail' in data) and (data['detail'] == 'Not found.')):
return None
return utils.ParseGraph(data, image) |
class SqueezeExcite():
def __init__(self, in_chs, rd_ratio=0.25, rd_channels=None, act_layer=tf.keras.layers.ReLU, gate_layer=tf.sigmoid, force_act_layer=None, rd_round_fn=None, name=None):
name = handle_name(name)
if (rd_channels is None):
rd_round_fn = (rd_round_fn or round)
... |
def register_Ns3FfMacScheduler_methods(root_module, cls):
cls.add_constructor([param('ns3::FfMacScheduler const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('GetFfMacCschedSapProvider', 'ns3::FfMacCschedSapProvider *', [], is_pure_virtual=... |
def handle_test_results(test_results):
expressions = test_results.split(' ')
failed = 0
success = 0
time_spent = (expressions[(- 2)] if ('=' in expressions[(- 1)]) else expressions[(- 1)])
for (i, expression) in enumerate(expressions):
if ('failed' in expression):
failed += int(e... |
class Inceptiontime_exp(Keras_DNN_exp):
def __init__(self, log_dir, data_path, param_dict, config):
super().__init__(log_dir, data_path, param_dict, config)
self.model = self.load_model()
def load_model(self):
checkpoint = super().load_model()
model = InceptionTimeClassifier_(nb_... |
def fetch_audio_start_end(example_id: str) -> Tuple[(float, float)]:
start_str = re.search('start(\\d+\\.\\d+)', example_id)
if (start_str is not None):
start_str = float(start_str.group(1))
end_str = re.search('end(\\d+\\.\\d+)', example_id)
if (end_str is not None):
end_str = float(end... |
def gen_cache_files(ids, skip_file):
configs = get_configs()
config = load_config(configs['path'], prefix=configs['par_path'])
render_height = configs['render_height']
render_width = configs['render_width']
with open(skip_file, 'r') as f:
skip_houses = json.load(f)
for idx in tqdm(ids):
... |
def get_trainable_vars(scope, keys=tuple()):
assert isinstance(keys, (tuple, list))
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
if (len(keys) == 0):
return trainable_vars
else:
regex_pattern = '.*{}.*'.format('.*'.join(keys))
new_trainable_vars = [... |
class ComplexNorm(nn.Module):
def __init__(self, mono: bool=False):
super(ComplexNorm, self).__init__()
self.mono = mono
def forward(self, spec: Tensor) -> Tensor:
spec = torch.abs(torch.view_as_complex(spec))
if self.mono:
spec = torch.mean(spec, 1, keepdim=True)
... |
class MBart50TokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = MBart50Tokenize... |
class NormalizeArea(object):
def __init__(self):
return
def __call__(self, data):
data.pos = (data.pos - ((torch.max(data.pos, dim=0)[0] + torch.min(data.pos, dim=0)[0]) / 2))
(pos_vh, face_vh) = (data.pos.cpu().numpy(), data.face.cpu().numpy().T)
area = (1 / np.sqrt(vh.surface_a... |
def _get_base_class_names(frame):
(co, lasti) = (frame.f_code, frame.f_lasti)
code = co.co_code
i = 0
extended_arg = 0
extends = []
while (i <= lasti):
c = code[i]
op = ord(c)
i += 1
if (op >= dis.HAVE_ARGUMENT):
oparg = ((ord(code[i]) + (ord(code[(i +... |
class MobilenetV2Test(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testCreation(self):
spec = dict(mobilenet_v2.V2_DEF)
(_, ep) = mobilenet.mobilenet(tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
se... |
def test_ior():
value = 2
copy = proxy = tt.ObjectProxy(value)
value |= 1
proxy |= 1
assert (value == proxy)
assert (int in tt.UsageTraceNode.from_proxy(copy).children['__ior__'].arg_types[0]) |
def calculate_roc(thresholds, distances, labels, nrof_folds=10):
nrof_pairs = min(len(labels), len(distances))
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accura... |
def eval_mode2(mode, measurements, label_file):
(run, qrels) = eval_mode(mode, measurements, label_file)
output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/caselaw/dpr/{}/eval'.format(mode[3])
output_file = 'eval_dpr_{}_{}_{}_aggregation_{}.txt'.format(mode[3], mode[0], mode[1], mode[2])
ranking_eva... |
(dace.float64, dace.float64[N], dace.float64[N])
def axpy(A, X, Y):
(_[0:N])
def multiplication(i):
(in_A << A)
(in_X << X[i])
(in_Y << Y[i])
(out >> Y[i])
out = ((in_A * in_X) + in_Y) |
def ufftn(inarray, dim=None):
if (dim is None):
dim = inarray.ndim
outarray = fft.fftn(inarray, axes=range((- dim), 0), norm='ortho')
return outarray |
def set_t_exp(value: float, unit: str) -> None:
global ENV
ENV['t_exp'] = (value * unt.time_list[unit]) |
class PrintTimestepCallback(BaseCallback):
def _on_step(self) -> bool:
print(self.model.num_timesteps, flush=True) |
def convert(src_path: str, map_location: str='cpu', save_path: Union[(str, None)]=None) -> None:
state_dict = torch.load(src_path, map_location=map_location)
for (k, v) in tqdm(state_dict.items()):
if (not isinstance(v, torch.Tensor)):
raise TypeError('FP16 conversion only works on paths tha... |
class _hash_encode_second_backward(Function):
def forward(ctx, grad, inputs, embeddings, offsets, B, D, C, L, S, H, calc_grad_inputs, dy_dx):
grad_inputs = torch.zeros_like(inputs)
grad_embeddings = torch.zeros_like(embeddings)
ctx.save_for_backward(grad, inputs, embeddings, offsets, dy_dx, ... |
class BorderAlign(nn.Module):
def __init__(self, pool_size):
super(BorderAlign, self).__init__()
self.pool_size = pool_size
def forward(self, input, boxes):
return border_align(input, boxes, self.pool_size)
def __repr__(self):
s = self.__class__.__name__
s += f'(pool_... |
def convert_to_wav(csv_file, target_dir):
wav_dir = os.path.join(target_dir, 'wav/')
txt_dir = os.path.join(target_dir, 'txt/')
os.makedirs(wav_dir, exist_ok=True)
os.makedirs(txt_dir, exist_ok=True)
path_to_data = os.path.dirname(csv_file)
def process(x):
(file_path, text) = x
f... |
.parametrize('seed', [313])
.parametrize('axis', [0, 1, 2, (- 1)])
.parametrize('decay_rate', [0.9])
.parametrize('eps', [1e-05])
.parametrize('output_stat, batch_stat', [[False, False], [False, True]])
.parametrize('no_scale, no_bias', [[False, False], [True, True]])
.parametrize('ctx, func_name', ctxs)
def test_batch... |
.unit
.cartographer
def test_cat_layer_dict_to_str():
min_zoom = 0
max_zoom = 2
name = 'test'
columns = 'a,b,c'
layer_dict = dict(directory=(name + '/{z}/{y}/{x}.png'), name=name, min_zoom=min_zoom, max_zoom=(max_zoom + 5), max_native_zoom=max_zoom, color='red', columns=[f'"{c}"' for c in columns.sp... |
class Win32CPUInfo(CPUInfoBase):
info = None
pkey = 'HARDWARE\\DESCRIPTION\\System\\CentralProcessor'
def __init__(self):
if (self.info is not None):
return
info = []
try:
if (sys.version_info[0] >= 3):
import winreg
else:
... |
class Smooth(nn.Module):
def __init__(self, base_classifier, sigma, n, alpha, mean, std):
super().__init__()
self.base_classifier = base_classifier
self.sigma = sigma
self.n = n
self.alpha = alpha
self.mean = nn.Parameter(torch.tensor(mean).float().view(3, 1, 1))
... |
def minmax_scale(tensor, range_min=0, range_max=1):
min_val = torch.amin(tensor, dim=(1, 2), keepdim=True)
max_val = torch.amax(tensor, dim=(1, 2), keepdim=True)
return (range_min + (((range_max - range_min) * (tensor - min_val)) / ((max_val - min_val) + 1e-06))) |
class GenerateParams(object):
def __init__(self, cfg, options, config, dev_trg, count, mode):
self.cfg = cfg
self.ENABLE = '1'
self.DISABLE = '0'
self.NUM_TEST = 3
self.options = options
self.perf_obj = Perf(cfg, self.options)
if (config is not None):
... |
def indirect_properties(indirect_class, indirect_function, override=False):
def indirection(cls):
inherited_props = {}
for base_cls in cls.__bases__:
if hasattr(base_cls, '__properties__'):
inherited_props.update(base_cls.__properties__)
for (name, prop) in indire... |
class XLNetModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def train(argv=None):
((x_train, y_train), (x_test, y_test)) = load_mnist()
model = build_model()
optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate)
loss = tf.keras.losses.SparseCategoricalCrossentropy()
metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]
model.compile(... |
class LabelChestXrayDataset(ChestXrayDataset):
def __init__(self, root: str, transforms: Optional[Compose]=None) -> None:
super().__init__(root, transforms)
keys = []
for key in self.keys:
if (self.index_dict[key].get('class_label') is not None):
keys.append(key)
... |
def _parse_codestream(fp):
hdr = fp.read(2)
lsiz = struct.unpack('>H', hdr)[0]
siz = (hdr + fp.read((lsiz - 2)))
(lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, _, _, _, _, csiz) = struct.unpack_from('>HHIIIIIIIIH', siz)
ssiz = ([None] * csiz)
xrsiz = ([None] * csiz)
yrsiz = ([None] * csiz)
for i... |
def get_xy_fd(hash_flag=False):
feature_columns = [SparseFeat('user', 3, embedding_dim=10), SparseFeat('gender', 2, embedding_dim=4), SparseFeat('item_id', (3 + 1), embedding_dim=8), SparseFeat('cate_id', (2 + 1), embedding_dim=4), DenseFeat('pay_score', 1)]
feature_columns += [VarLenSparseFeat(SparseFeat('hist... |
def register_types(module):
root_module = module.get_root()
module.add_enum('ReqType', ['DATA', 'UNICAST_POLLING'])
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_... |
def get_num_exps_for_instances(args):
import numpy as np
import math
if ((args.mode == 'ec2') and (not args.no_gpu)):
max_exps_per_instance = args.max_exps_per_instance
else:
max_exps_per_instance = 1
num_exps_for_instances = (np.ones(int(math.ceil((args.num_seeds / max_exps_per_inst... |
def get_document(instance, tokenizer, segment_len, add_speaker=False):
document_state = DocumentState(instance['scene_id'])
general_counter = 0
clusters = defaultdict(list)
token_counter = 0
for utterance in instance['utterances']:
speaker = tuple(sorted(utterance['speakers']))
if ad... |
class AdversarialLoss(object):
def __init__(self, z_gen=torch.randn, loss='L2'):
self.z_gen = z_gen
self.loss = loss
if (loss == 'L2'):
self.criterion = nn.MSELoss()
elif (loss == 'BCE'):
self.criterion = nn.BCEWithLogitsLoss()
else:
raise ... |
_SAMPLERS.register_module()
class PseudoSampler(BaseSampler):
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
raise NotImplementedError
def _sample_neg(self, **kwargs):
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwarg... |
class Decoder(nn.Module):
def __init__(self, nf=32, spn=1):
super(Decoder, self).__init__()
self.layer0 = nn.Conv2d((nf * 8), (nf * 4), 1, 1, 0)
self.layer1 = nn.Upsample(scale_factor=2, mode='bilinear')
self.layer2 = nn.Sequential(nn.Conv2d((nf * 4), (nf * 4), 3, 1, 1), nn.ELU(inpla... |
def test_data_pipeline():
from speechbrain.utils.data_pipeline import DataPipeline
pipeline = DataPipeline(['text'], dynamic_items=[{'func': (lambda x: x.lower()), 'takes': ['text'], 'provides': 'foo'}, {'func': (lambda x: x[::(- 1)]), 'takes': 'foo', 'provides': ['bar']}], output_keys=['text', 'foo', 'bar'])
... |
def test_check_null_weight_with_nonzeros() -> None:
sample_weight = np.ones_like(y_toy)
(sw_out, X_out, y_out) = check_null_weight(sample_weight, X_toy, y_toy)
np.testing.assert_almost_equal(np.array(sw_out), sample_weight)
np.testing.assert_almost_equal(np.array(X_out), X_toy)
np.testing.assert_alm... |
def _shrink_nnp(nnp, pos_start, pos_end):
if ((len(nnp.protobuf.executor) != 1) or (len(nnp.protobuf.network) != 1)):
print('[ERROR] Please make only one network in nnp.')
sys.exit((- 1))
from nnabla.utils import nnabla_pb2
class _nnp():
pass
_nnp.protobuf = nnabla_pb2.NNablaProt... |
def get_activation_fn(name: str) -> ty.Callable[([Tensor], Tensor)]:
return (reglu if (name == 'reglu') else (geglu if (name == 'geglu') else (torch.sigmoid if (name == 'sigmoid') else getattr(F, name)))) |
def test_pytest_parametrize_class_fixture(testdir):
testdir.make_test('\nfrom hypothesis import settings, HealthCheck\n\n\nclass TestAPI:\n\n def pytest_generate_tests(self, metafunc):\n metafunc.parametrize("inner", ("A", "B"))\n\n ()\n def param(self, inner):\n return inner * 2\n\n ()\n ... |
class GimpGradientFile(GradientFile):
def __init__(self, fp):
if (fp.readline()[:13] != b'GIMP Gradient'):
raise SyntaxError('not a GIMP gradient file')
line = fp.readline()
if line.startswith(b'Name: '):
line = fp.readline().strip()
count = int(line)
... |
class HuggingFaceWav2Vec2Pretrain(nn.Module):
def __init__(self, source, save_path, mask_prob=0.65, mask_length=10, normalize_wav=True):
super().__init__()
self.mask_prob = mask_prob
self.mask_length = mask_length
self.normalize_wav = normalize_wav
self.config = Wav2Vec2Confi... |
def make_parallel_dataset(image_roots, classification=False, intersection=False, filter_tuples=None, verbose=None):
image_roots = [os.path.expanduser(d) for d in image_roots]
image_sets = OrderedDict()
for (j, root) in enumerate(image_roots):
for path in walk_image_files(root, verbose=verbose):
... |
def register_Ns3IntToType__4_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::IntToType< 4 > const &', 'arg0')])
return |
class PlanePartitions_n(PlanePartitions):
def __init__(self, n):
super().__init__(category=FiniteEnumeratedSets())
self._n = n
def _repr_(self) -> str:
return 'Plane partitions of size {}'.format(self._n)
def __contains__(self, x) -> bool:
return (PlanePartitions.__contains__... |
def changeBipartiteDensity(mode, G, A, i):
return (1 if (G.bipartite_node_mode(i) == mode) else 0) |
.parametrize(['mu', 'beta', 'expected'], [(0.3, 0.2, 0.94), ((- 0.3), 0, 1.0), (0, 0.8, 1.0)])
def test_get_doppler_factor_partial_relativity(mu, beta, expected):
obtained = frame_transformations.get_doppler_factor_partial_relativity(mu, beta)
assert_almost_equal(obtained, expected) |
class CLIPFeatureExtractionTester(unittest.TestCase):
def __init__(self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=20, do_center_crop=True, crop_size=18, do_normalize=True, image_mean=[0., 0.4578275, 0.], image_std=[0., 0., 0.]):
self.pa... |
def _get_max_errors(errors, sequences, max_below):
max_errors = [{'max_error': max_below, 'start': (- 1), 'stop': (- 1)}]
for sequence in sequences:
(start, stop) = sequence
sequence_errors = errors[start:(stop + 1)]
max_errors.append({'start': start, 'stop': stop, 'max_error': max(seque... |
def register_Ns3WaveHelper_methods(root_module, cls):
cls.add_constructor([param('ns3::WaveHelper const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('ns3::NetDeviceContainer', 'c'), param('int64_t', 'stream')])
cls.add_method('CreateMacForChannel', 'void', [par... |
class PyLongObjectPtr(PyObjectPtr):
_typename = 'PyLongObject'
def proxyval(self, visited):
ob_size = long(self.field('ob_size'))
if (ob_size == 0):
return 0
ob_digit = self.field('ob_digit')
if (gdb.lookup_type('digit').sizeof == 2):
SHIFT = 15
el... |
def test_cant_select(with_common_metadata):
with pytest.raises(ValueError):
ak.metadata_from_parquet(with_common_metadata, scan_files=False, row_groups=[1]) |
def get_model(point_cloud, is_training, num_class, bn_decay=None, gripper_feat=None, env_feat=None):
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
end_points['l0_xyz'] = l0_xyz
(l1_xyz, l1_poin... |
def plot_VAE(dic, save_fig=False):
(_, axes) = plt.subplots(1, 3)
cmap = 'gray'
axes[0].imshow(dic['x'].reshape(28, 28), cmap=cmap, vmin=(- 1), vmax=1)
axes[1].imshow(dic['y'].reshape(28, 28), cmap=cmap, vmin=np.min(dic['y']), vmax=np.max(dic['y']))
axes[2].imshow(dic['x_pred'].reshape(28, 28), cmap... |
class SVHNPolicy(object):
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [SubPolicy(0.9, 'shearX', 4, 0.2, 'invert', 3, fillcolor), SubPolicy(0.9, 'shearY', 8, 0.7, 'invert', 5, fillcolor), SubPolicy(0.6, 'equalize', 5, 0.6, 'solarize', 6, fillcolor), SubPolicy(0.9, 'invert', 3, 0.6, 'equali... |
def process_timestamp_column(dataframe: SparkDataFrame, column_name: str, date_format: Optional[str]=None) -> SparkDataFrame:
if (column_name not in dataframe.columns):
raise ValueError(f'Column {column_name} not found')
if isinstance(dataframe.schema[column_name].dataType, st.TimestampType):
re... |
def typename(o):
if isinstance(o, torch.Tensor):
return o.type()
module = ''
class_name = ''
if (hasattr(o, '__module__') and (o.__module__ != 'builtins') and (o.__module__ != '__builtin__') and (o.__module__ is not None)):
module = (o.__module__ + '.')
if hasattr(o, '__qualname__'):... |
class QueryTop(Query):
def __init__(self, opts=None, **kwargs):
Query.__init__(self, opts)
def update_query_state(self, **kwargs):
pass
def get_next_query(self, **kwargs):
ordered_indexes = kwargs.get('ordered_indexes')
queried_items = kwargs.get('queried_items')
item... |
class InventoryManagementSystemSearchItems(VirtualFunctionTool):
name = 'InventoryManagementSystemSearchItems'
summary = 'Search for items in the inventory by keyword or category.'
parameters: List[ArgParameter] = [{'name': 'keyword', 'type': 'string', 'description': 'The keyword to search for in the item n... |
def update_class_from_dict(obj, dict):
for (key, val) in dict.items():
attr = getattr(obj, key, None)
if isinstance(attr, type):
update_class_from_dict(attr, val)
else:
setattr(obj, key, val)
return |
def lowpass_filter(n_taps, cutoff, band_half, sr):
window = kaiser_window(n_taps, band_half, sr)
ind = (torch.arange(n_taps) - ((n_taps - 1) / 2))
lowpass = ((((2 * cutoff) / sr) * sinc((((2 * cutoff) / sr) * ind))) * window)
return lowpass |
class Solver(Z3PPObject):
def __init__(self, solver=None, ctx=None, logFile=None):
assert ((solver is None) or (ctx is not None))
self.ctx = _get_ctx(ctx)
self.backtrack_level =
self.solver = None
if (solver is None):
self.solver = Z3_mk_solver(self.ctx.ref())
... |
def eval_exec_match(db, p_str, g_str, pred, gold):
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {... |
def _impl(array, highlevel, behavior, attrs):
from awkward._connect.pyarrow import import_pyarrow_compute
pc = import_pyarrow_compute('e')
with HighLevelContext(behavior=behavior, attrs=attrs) as ctx:
layout = ctx.unwrap(array)
out = ak._do.recursively_apply(layout, ak.operations.str._get_ufunc_... |
def get_param_space(trial):
trial.suggest_float('learning_rate', 0.0001, 0.001, log=True)
trial.suggest_float('lr_decay_rate', 0.7, 1.0, log=True)
trial.suggest_categorical('weight_decay', [1e-06, 1e-07, 0])
trial.suggest_categorical('batch_size', [16, 32, 64])
trial.suggest_int('pe_embed_k', 0, 20)... |
class branchTests(unittest.TestCase):
def setUp(self):
super(branchTests, self).setUp()
def tearDown(self):
super(branchTests, self).tearDown()
def test_inputs(self):
self.assertRaises(ValueError, qotree.Branch, [1])
def test_creation(self):
br = qotree.Branch([1.2, 3.4, ... |
class Trainer(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class StateProblem(pde_problem.PDEProblem):
def __init__(self, db: database.Database, state_form_handler: _forms.StateFormHandler, initial_guess: Optional[List[fenics.Function]]) -> None:
super().__init__(db)
self.state_form_handler = state_form_handler
self.initial_guess = initial_guess
... |
def _is_tf_symbolic_tensor(x):
import tensorflow as tf
if hasattr(tf, 'is_symbolic_tensor'):
return tf.is_symbolic_tensor(x)
return (type(x) == tf.Tensor) |
def test_require_proba():
X = np.random.randn(5, 5)
y = np.array([0, 1, 0, 0, 0])
clf1 = Perceptron()
clf1.fit(X, y)
DESMI([clf1, clf1, clf1]) |
class SubPixelConvolutionalBlock(nn.Module):
def __init__(self, kernel_size=3, n_channels=64, scaling_factor=2):
super(SubPixelConvolutionalBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=n_channels, out_channels=(n_channels * (scaling_factor ** 2)), kernel_size=kernel_size, padding=(kerne... |
def squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length, is_training):
features = []
if (is_training and (not example.is_impossible)):
start_position = example.start_position
end_position = example.end_position
actual_text = ' '.join(example.doc_tokens... |
class Timer():
def __init__(self, timeout, callback):
self._timeout = timeout
self._callback = callback
async def _job(self):
(await asyncio.sleep(self._timeout))
self._callback()
def start(self):
self._task = asyncio.ensure_future(self._job())
def cancel(self):
... |
class DetectionLoss(nn.Module):
def __init__(self, alpha, gamma, delta, box_loss_weight, num_classes=90, levels=5):
super(DetectionLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.delta = delta
self.box_loss_weight = box_loss_weight
self.num_classes =... |
class CORSResponseMixin(object):
def access_control_allow_credentials(self):
return ('Access-Control-Allow-Credentials' in self.headers)
_control_allow_credentials.setter
def access_control_allow_credentials(self, value):
if (value is True):
self.headers['Access-Control-Allow-Cre... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.