code stringlengths 101 5.91M |
|---|
class Perm0dbBenchmark(Benchmark):
def __init__(self, nb_features: int=2):
self.nb_features = nb_features
ind_domain = (float((- nb_features)), float(nb_features))
super().__init__(fn=algorithms.partial(illumination_perm0db, nb_features=nb_features), ind_domain=ind_domain, fitness_domain=((0... |
def k2mm_kernel(alpha: dc.float64, beta: dc.float64, A: dc.float64[(NI, NK)], B: dc.float64[(NK, NJ)], C: dc.float64[(NJ, NL)], D: dc.float64[(NI, NL)]):
D[:] = ((((alpha * A) B) C) + (beta * D)) |
def CB_loss(labels, logits, samples_per_cls, no_of_classes, loss_type, beta, gamma):
effective_num = (1.0 - np.power(beta, samples_per_cls))
weights = ((1.0 - beta) / np.array(effective_num))
weights = ((weights / np.sum(weights)) * no_of_classes)
labels_one_hot = F.one_hot(labels, no_of_classes).float(... |
def get_tl_dict_values(detection, withTranscription=False, withConfidence=False, imWidth=0, imHeight=0, validNumPoints=[], validate_cw=True):
confidence = 0.0
transcription = ''
points = []
if (isinstance(detection, dict) == False):
raise Exception('Incorrect format. Object has to be a dictionar... |
def mk_z3consts_ml_internal(api_files, output_dir):
assert os.path.isdir(output_dir)
assert isinstance(api_files, list)
blank_pat = re.compile('^ *$')
comment_pat = re.compile('^ *//.*$')
typedef_pat = re.compile('typedef enum *')
typedef2_pat = re.compile('typedef enum { *')
openbrace_pat =... |
class PickleExplainer():
def __init__(self, sib, in_current_sage=False, default_assumptions=False, pedantic=False):
self.sib = sib
self.in_current_sage = in_current_sage
self.default_assumptions = default_assumptions
self.pedantic = pedantic
self.stopped = False
self.... |
class DynamicalSemigroup_affine(DynamicalSemigroup):
def __classcall_private__(cls, ds_data):
systems = []
if isinstance(ds_data, Collection):
for ds_datum in ds_data:
if isinstance(ds_datum, DynamicalSystem_affine):
systems.append(ds_datum)
... |
class AverageMeter():
def __init__(self, ema=False):
self.ema = ema
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if isinstance(val, torch.Tensor):
val = val.item()
s... |
class Distance(object):
def __init__(self, name_or_handle: Union[(str, int)]):
raise PyRepError('Currently there is an error in CoppeliaSim with distance objects. As soon as CoppeliaSim resolves this issue, this error will be removed.')
self._handle: int
if isinstance(name_or_handle, int):
... |
def get_cast_dtype(precision: str):
cast_dtype = None
if (precision == 'bf16'):
cast_dtype = torch.bfloat16
elif (precision == 'fp16'):
cast_dtype = torch.float16
return cast_dtype |
def add_pointrend_config(cfg):
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0
cfg.INPUT.COLOR_AUG_SSD = False
cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES = ('p2',)
cfg.MODEL.ROI_MASK_HEAD.FC_DIM = 1024
cfg.MODEL.ROI_MASK_HEAD.NUM_FC = 2
cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION = 7
cfg.MODEL.ROI... |
def eval_model(args, test_dataloader, model, device, single=False):
model.eval()
if (device == 'pytorch'):
model.to('cpu')
device = 'cpu'
elif (device == 'dace'):
model.to('cpu')
dummy_input = next(iter(test_dataloader))
model = DaceModule(model, dummy_inputs=dummy_in... |
def load_state_ckpt(model_path, model):
checkpoint = torch.load(model_path, map_location='cuda:{}'.format(torch.cuda.current_device()))
load_ckpt = match_ckpt_key(model, checkpoint)
model.load_state_dict(load_ckpt, strict=False)
ckpt_keys = set(load_ckpt.keys())
own_keys = set(model.state_dict().key... |
(hash_funcs={torch.nn.parameter.Parameter: (lambda parameter: parameter.data.detach().cpu().numpy())}, allow_output_mutation=True)
def load_model_cache(name, model_type, is_eval, device):
return load_model(name, model_type, is_eval, device) |
def aslinearoperator(A):
if isinstance(A, LinearOperator):
return A
elif (isinstance(A, np.ndarray) or isinstance(A, np.matrix)):
if (A.ndim > 2):
raise ValueError('array must have ndim <= 2')
A = np.atleast_2d(np.asarray(A))
return MatrixLinearOperator(A)
elif (i... |
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, toTensor=True, normalized=True):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
if ('resize' in opt.preprocess):
osize = [opt.load_size, opt.load_size]
transform_list.append(t... |
def getFeature(filename):
frameCnt = frames[filename.split('.')[0]]
file = AudioSegment.from_file(((input_dir + '/') + filename), 'm4a')
filename = filename.split('.')[0]
file.export((filename + '.wav'), format='wav')
featureVec = tf.Variable([[0 for i in range(128)]], dtype='float32')
(audio, s... |
def _pickle_RegularSequenceRing(k, coefficients, category):
return RegularSequenceRing(k, coefficients, category=category) |
def get_text_mask(for_image, sz=20):
font_fname = '/usr/share/fonts/truetype/freefont/FreeSansBold.ttf'
font_size = sz
font = ImageFont.truetype(font_fname, font_size)
img_mask = Image.fromarray(((np.array(for_image) * 0) + 255))
draw = ImageDraw.Draw(img_mask)
draw.text((128, 128), 'hello world... |
def l2_params_all(model, shaps, grads):
all_params = torch.cat([x.view((- 1)) for x in model.parameters() if (len(x.shape) != 1)])
return torch.norm(all_params) |
def get_optimizer_opts(parser):
group = parser.add_argument_group('Optimizer options')
group.add_argument('--optim', default='sgd', type=str, choices=supported_optimziers, help='Optimizer')
group.add_argument('--adam-beta1', default=0.9, type=float, help='Beta1 for ADAM')
group.add_argument('--adam-beta... |
class FC(nn.Module):
def __init__(self, in_size, out_size, dropout_r=0.0, use_relu=True):
super(FC, self).__init__()
self.dropout_r = dropout_r
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
... |
def setup_for_distributed_mode(model: nn.Module, optimizer: torch.optim.Optimizer, device: object, n_gpu: int=1, local_rank: int=(- 1), fp16: bool=False, fp16_opt_level: str='O1') -> (nn.Module, torch.optim.Optimizer):
model.to(device)
if fp16:
try:
import apex
from apex import a... |
class CSVBatchLogger():
def __init__(self, csv_path, n_groups, mode='w'):
columns = ['epoch', 'batch']
for idx in range(n_groups):
columns.append(f'avg_loss_group:{idx}')
columns.append(f'exp_avg_loss_group:{idx}')
columns.append(f'avg_acc_group:{idx}')
... |
def find(tokens, tag):
for (i, t) in enumerate(tokens):
if (t == tag):
return i
assert False |
def weights_init_normal(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
init.normal(m.weight.data, 0.0, 0.02)
elif (classname.find('Linear') != (- 1)):
init.normal(m.weight.data, 0.0, 0.02)
elif (classname.find('BatchNorm2d') != (- 1)):
init.normal(m.we... |
_utils.test()
def test_matrix_and_func():
vec4d = ti.types.vector(4, float)
v = vec4d(1, 2, 3, 4)
def length(w: vec4d):
return w.norm()
def test() -> ti.f32:
return length(v)
approx(test(), 5.477226) |
def computeJGA(greedy, answer, example_ids, tasks):
assert (len(tasks) == 1)
dataset_class = getattr(dialogues, tasks[0].dataset_name)
dataset = dataset_class()
cur_dial_id = None
full_answer = []
full_greedy = []
assert (len(example_ids) == len(greedy) == len(answer))
for (id_, g, a) in... |
def brightness_down_mapping(level, src_img):
if (level == 1):
factor = 0.5
else:
factor = level
noisy_factor = (1 / ((1 + (factor * 0.4)) + np.random.uniform((- 0.01), 0.01)))
return ImageEnhance.Brightness(src_img).enhance(noisy_factor) |
class Task_Head(nn.Module):
def __init__(self, args, logger):
super(Task_Head, self).__init__()
self.args = args
self.logger = logger
self.cls_embed_layer = nn.Embedding(1, args.model_step_forecasting_segment_hidden_dim)
if (args.model_step_forecasting_time_pos_embed_type == ... |
def test_power_two_range_stmt_interactive():
group_pair = BilinearGroupPair()
group = group_pair.G1
value = Secret(value=Bn(10))
randomizer = Secret(value=group.order().random())
(g, h) = make_generators(2, group)
limit = 20
com = ((value * g) + (randomizer * h))
p1 = PowerTwoRangeStmt(c... |
def download_translations(path: str):
repo = '
if (not os.path.isdir(path)):
logger.info(f'Translation file not found. Downloading from {repo}.')
subprocess.run(['git', 'clone', repo])
subprocess.run(['mv', 'fisher-callhome-corpus', f'{path}']) |
def module_init():
root_module = Module('ns.nix_vector_routing', cpp_namespace='::ns3')
return root_module |
def best_known_covering_design_www(v, k, t, verbose=False):
v = int(v)
k = int(k)
t = int(t)
param = ('?v=%s&k=%s&t=%s' % (v, k, t))
url = (' + param)
if verbose:
print(('Looking up the bounds at %s' % url))
f = urlopen(url, context=default_context())
try:
s = bytes_to_st... |
.parametrize('media_type, expected', (('application/json', {'application/json'}), ('application/problem+json', {'application/problem+json'}), ('application/*', {'application/json', 'application/octet-stream', 'application/x-www-form-urlencoded', 'application/x-yaml', 'application/xml'}), ('*/form-data', {'multipart/for... |
def fibonacci(v):
if (v == 0):
return 0
if (v == 1):
return 1
return (fibonacci((v - 1)) + fibonacci((v - 2))) |
class DefaultLiteralArgNode(ExprNode):
subexprs = []
is_literal = True
is_temp = False
def __init__(self, pos, arg):
super(DefaultLiteralArgNode, self).__init__(pos)
self.arg = arg
self.type = self.arg.type
self.evaluated = False
def analyse_types(self, env):
... |
def _load_checkpoint_for_ema(model_ema, checkpoint):
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file) |
class ReformerTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['attention_mask']
slow_tokenizer_class = ReformerTokenizer
def _... |
def quant_score(score):
for element in score.flat:
onset = (np.ceil((element.offset / 0.25)) * 0.25)
if (isinstance(element, note.Note) or isinstance(element, note.Rest) or isinstance(element, chord.Chord)):
offset = (np.ceil(((element.offset + element.quarterLength) / 0.25)) * 0.25)
... |
_kl(Gamma, Beta)
_kl(Gamma, Pareto)
_kl(Gamma, Uniform)
def _kl_gamma_infinity(p, q):
return _infinite_like(p.concentration) |
def get_derangements(views, deranged_classes_ratio=0.5, shuffle_true_ids=True, class_datapoints_threshold=None, shuffle_datapoints=True, shuffle_each_cluster=False):
(all_features, keys, dataset_size, subset_size, num_matched_classes, nclasses) = match_classes_with_shuffle(views, deranged_classes_ratio, class_datap... |
def __add_file_handler(logger, file_name):
fh = logging.FileHandler(file_name, mode='a')
fh.setFormatter(__COLLECT_HANDLERS['file'].formatter)
logger.addHandler(fh) |
def format_model_inputs(sample):
original_input = sample['prompt_all']
if (original_input == ''):
original_input = ((sample['prompt_task'] + '\n\n') + sample['prompt_context'])
original_output = sample['output']
return (original_input, original_output) |
def split_disjunctions(task):
for proxy in tuple(all_conditions(task)):
if isinstance(proxy.condition, pddl.Disjunction):
for part in proxy.condition.parts:
new_proxy = proxy.clone_owner()
new_proxy.set(part)
new_proxy.register_owner(task)
... |
def register_Ns3ErrorChannel_methods(root_module, cls):
cls.add_constructor([param('ns3::ErrorChannel const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::SimpleNetDevice >', 'device')], is_virtual=True)
cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >'... |
def fix_png_file(filename, folder):
subprocess.call(f'pngfix --quiet --strip=color --prefix=fixed_ "{filename}"', cwd=f'{folder}', shell=True)
subprocess.call(f'mv "fixed_{filename}" "{filename}"', cwd=f'{folder}', shell=True) |
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.noise_g = args.noise_g
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
... |
_module()
class HVUDataset(BaseDataset):
def __init__(self, ann_file, pipeline, tag_categories, tag_category_nums, filename_tmpl=None, **kwargs):
assert (len(tag_categories) == len(tag_category_nums))
self.tag_categories = tag_categories
self.tag_category_nums = tag_category_nums
sel... |
def rgb_to_label(mask):
(h, w) = (mask.shape[0], mask.shape[1])
label = np.zeros(shape=(h, w), dtype=np.uint8)
label[np.all((mask == [0, 0, 0]), axis=(- 1))] = 1
label[np.all((mask == [255, 255, 255]), axis=(- 1))] = 0
return label |
def qepcad_console(memcells=None):
from sage.repl.rich_output.display_manager import get_display_manager
if (not get_display_manager().is_in_terminal()):
raise RuntimeError('Can use the console only in the terminal. Try %%qepcad magics instead.')
os.system(_qepcad_cmd(memcells)) |
('/predict', methods=['POST'])
def predict():
text = request.json['text']
try:
out = model.predict(text)
return jsonify({'result': out})
except Exception as e:
print(e)
return jsonify({'result': 'Model Failed'}) |
class LayerNormBench(NormalizationBench):
def forward(self):
y = self.layer_norm(self.data, [self.H, self.W])
return y
def module():
return 'layernorm' |
class TextSummarizationTool(PipelineTool):
default_checkpoint = 'philschmid/bart-large-cnn-samsum'
description = 'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, and returns a summary of the text.'
name = 'summarizer'
pre_processor_class = AutoT... |
def test_list_errors():
with pytest.raises(ValueError):
a = ak.highlevel.ArrayBuilder()
a.end_list()
with pytest.raises(ValueError):
a = ak.highlevel.ArrayBuilder()
a.real(3.14)
a.end_list()
with pytest.raises(ValueError):
a = ak.highlevel.ArrayBuilder()
... |
def clone_model(model, memo=None):
memo = ({} if (memo is None) else memo)
cloned = model.__new__(type(model))
cloned.__dict__ = model.__dict__.copy()
cloned._parameters = _clone_ordered_dict(model._parameters, memo)
cloned._buffers = _clone_ordered_dict(model._buffers, memo)
cloned._sub_layers ... |
def group_text_reports(groupframe):
groupframe = groupframe.sort_values(by=['DESCRIPTION', 'CHARTDATE'])
concat_text = ' '.join(groupframe['TEXT']).strip()
return pd.Series({'TEXT': concat_text}) |
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper'])
register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice'])
regis... |
def validate_nl_onderwijsnummer(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(onderwijsnummer.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
i... |
def get_collate(data_sources):
def collate_fn(batch):
return Batch(len(batch), {ds: ds.to_torch([elem[ds] for elem in batch]) for ds in data_sources})
return collate_fn |
def read_points3D_binary(path_to_model_file):
with open(path_to_model_file, 'rb') as fid:
num_points = read_next_bytes(fid, 8, 'Q')[0]
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
for p_id in range(num_points):
... |
def create_aa(aa_layer, channels, stride=2, enable=True):
if ((not aa_layer) or (not enable)):
return nn.Identity()
return (aa_layer(stride) if issubclass(aa_layer, nn.AvgPool2d) else aa_layer(channels=channels, stride=stride)) |
def test_nested_IndexedArray_NumpyArray():
v2a = ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 1, 8], dtype=np.int64)), ak.contents.indexedarray.IndexedArray(ak.index.Index(np.array([999, 2, 2, 0, 1, 4, 5, 4], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5]))))
... |
def brevity_penalty(closest_ref_len, hyp_len):
if (hyp_len > closest_ref_len):
return 1
elif (hyp_len == 0):
return 0
else:
return math.exp((1 - (closest_ref_len / hyp_len))) |
def test_tuple_elements_enumerate():
def tounroll(A: dace.float64[3]):
for (i, val) in enumerate([1, 2, 3]):
A[i] += val
a = np.zeros([3])
tounroll(a)
assert np.allclose(a, np.array([1, 2, 3])) |
()
('--data_path')
('--out_path')
('--r_in', default=0.02)
('--r_out', default=0.02)
('--mintime', default=42)
def main(data_path, out_path, r_in, r_out, mintime):
crawl_src = list()
s_sitenum = 0
e_sitenum = 100
instnum = 90
max_time = 0
if (not os.path.exists(out_path)):
os.makedirs(ou... |
def get_prior_grad_FG(prior, tx_hat):
def A_func(tx_hat):
return prior.prior_log_partition_FG(tx_hat)
grad_tx_hat_A = numerical_1st_derivative(tx_hat, A_func, EPSILON)
tx = prior.forward_second_moment_FG(tx_hat)
return {'grad_tx_hat_A': grad_tx_hat_A, 'tx': tx} |
class TensorWithIndices(SageObject):
def _parse_indices(indices, tensor_type=None, allow_contraction=True, allow_symmetries=True):
indices = indices.replace('{', '').replace('}', '')
allowed_pattern = (((((('(\\(' + _alph_or_dot_pattern) + '{2,}\\)|\\[') + _alph_or_dot_pattern) + '{2,}\\]|') + _alph... |
def derivative(signal, index=1):
d1 = np.array(([0] + [(b - a) for (a, b) in zip(signal, signal[1:])]))
if (index == 1):
return d1
elif (index == 2):
return np.array(([0] + [(b - a) for (a, b) in zip(d1, d1[1:])]))
else:
raise ValueError('Only support first or second derivatives'... |
class QuestionAskingAndAnswerCheckingSkill():
def __init__(self, qas, user):
self._user = user
self._factoid_qas = qas
self._question_asked = False
self._last_factoid_qas = {}
self._is_first_incorrect = True
def ask_question(self):
if (len(self._factoid_qas) == 0)... |
def test_getitem_list_slice():
proxy = tt.ObjectProxy(['a', 'b'])
element = proxy[1:]
assert (element == ['b'])
assert isinstance(element, tt.ObjectProxy)
assert (slice in tt.UsageTraceNode.from_proxy(proxy).children['__getitem__'].arg_types[0]) |
def create_app_logger(filename):
logger = logging.getLogger(filename)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
os.makedirs('logs', exist_ok=True)
rotateHandler = RotatingFileHandler(('logs/' + 'g-tracker-admin-api.log'), mode='a', maxB... |
class ResidualConvUnit_custom(nn.Module):
def __init__(self, features, activation, bn):
super().__init__()
self.bn = bn
self.groups = 1
self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=(not self.bn), groups=self.groups)
self.conv2 = nn.Conv2... |
def get_validation_transform():
val_transform = [albu.Normalize()]
return albu.Compose(val_transform) |
_properties
class WarpTiling(xf.SingleStateTransformation):
warp_size = properties.Property(dtype=int, default=32, desc='Hardware warp size')
replicate_maps = properties.Property(dtype=bool, default=True, desc='Replicate tiled maps that lead to multiple other tiled maps')
mapentry = xf.PatternNode(nodes.Map... |
class TTableRow(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_snap.TTableRow_swiginit(self, _snap.new_TTableRow())
def AddInt(self, Val):
return _snap.TTableRow_AddInt(self, ... |
class ConvVAE(PyTorchModule):
def __init__(self, representation_size, init_w=0.001, input_channels=1, imsize=84, added_fc_size=0, hidden_init=ptu.fanin_init, output_activation=identity, min_variance=0.0001, use_min_variance=True, state_size=0, action_dim=None, large_arch=False, n_imp=1, gaussian_decoder=True, use_s... |
.parametrize('axis', (0, 1))
.parametrize('family', ('chebyshev',))
def test_biharmonic2D(family, axis):
la = cla
N = (16, 16)
SD = FunctionSpace(N[axis], family=family, bc=(0, 0, 0, 0))
K1 = FunctionSpace(N[((axis + 1) % 2)], family='F', dtype='d')
subcomms = mpi4py_fft.pencil.Subcomm(MPI.COMM_WORL... |
def main(args):
languages = set()
for language_directory in os.listdir(DATADIR):
if ('_' in language_directory):
(src, tgt) = language_directory.split('_')
languages.add(LanguagePair(src=src, tgt=tgt))
data = existing_data()
train_languages = sorted(languages)
for lan... |
class NoiseInjection(object):
def __init__(self, path=None, noise_levels=(0, 0.5)):
self.paths = ((path is not None) and librosa.util.find_files(path))
self.noise_levels = noise_levels
def inject_noise(self, data):
noise_path = np.random.choice(self.paths)
noise_level = np.random... |
class TestOverleaf(TemporaryShowyourworkRepository, ShowyourworkRepositoryActions):
local_build_only = True
overleaf_id = '6409f16f438b5fb7c4dfa837'
auth_retries = 1
auth_sleep = 60
def startup(self):
for _n in range(self.auth_retries):
try:
overleaf.wipe_remote(s... |
def _absolute_dims(rank, dims):
return tuple([((rank + dim) if (dim < 0) else dim) for dim in dims]) |
class TestMobilesAndConfigurationPaths(TestCore):
def setUp(self):
self.pyrep = PyRep()
self.pyrep.launch(path.join(ASSET_DIR, 'test_scene_mobiles.ttt'), headless=True)
self.pyrep.step()
self.pyrep.start()
def test_get_mobile(self):
for (mobile_name, mobile_type) in MOBIL... |
def relocate_legend(fig: Figure, loc: str) -> Figure:
remains = []
targets = []
for layout in fig.center:
if isinstance(layout, Legend):
targets.append(layout)
else:
remains.append(layout)
fig.center = remains
for layout in targets:
fig.add_layout(layo... |
class IsotropicGaussian(nn.Module):
def __init__(self, net, sigma=1.0, sigma_trainable=False, error_normalize=True, deterministic=False):
super().__init__()
self.net = net
self.sigma_trainable = sigma_trainable
self.error_normalize = error_normalize
self.deterministic = deter... |
def _find_ruff() -> Path:
global _ruff_path
if (_ruff_path is not None):
return _ruff_path
try:
ruff = find_ruff_bin()
except FileNotFoundError as ex:
ruff = shutil.which('ruff')
if (ruff is None):
raise FileNotFoundError('Could not find ruff') from ex
_ru... |
class _ExtractModuleReferences(ast.NodeVisitor):
def run(cls, src: str, package: str) -> List[Tuple[(str, Optional[str])]]:
visitor = cls(package)
tree = ast.parse(src)
visitor.visit(tree)
return list(visitor.references.keys())
def __init__(self, package):
super().__init_... |
def blobs_potential(r_vectors, *args, **kwargs):
number_of_blobs = np.int32(len(r_vectors))
(threads_per_block, num_blocks) = set_number_of_threads_and_blocks(number_of_blobs)
periodic_length = kwargs.get('periodic_length')
debye_length_wall = kwargs.get('debye_length_wall')
eps_wall = kwargs.get('r... |
def preprocess(img_paths: list):
(images, sizes, scales) = ([], [], [])
resizer = Resizer(cfg.MODEL.IMAGE_SIZE)
to_numpy = ImageToNumpy()
normalizer = Normalizer()
to_tensor = NumpyToTensor()
for img_path in img_paths:
pil_img = Image.open(img_path).convert('RGB')
sizes.append(pi... |
def ulabel(label):
if (not isinstance(label, (bytes, bytearray))):
try:
label = label.encode('ascii')
except UnicodeEncodeError:
check_label(label)
return label
label = label.lower()
if label.startswith(_alabel_prefix):
label = label[len(_alabel_pr... |
def and_w_spk(w, spk):
synapse = 0
if (w == 1):
if (spk == 1):
synapse += 1
if (spk == 3):
synapse += 1
if (w == 2):
if (spk == 2):
synapse += 1
if (spk == 3):
synapse += 1
if (w == 3):
if (spk == 1):
syn... |
def bTree(allnodes, path, verbose=False):
allnodes = correctThiago(allnodes)
misplaced_children = findMisplacedChildren(allnodes)
misplaced_children.extend(findMisplacedChildren(allnodes))
misplaced_children.extend(findMisplacedChildren(allnodes))
parents = findLonelyParent(allnodes)
if verbose:... |
def connected_components(preds, args):
preds = torch.stack(preds)
preds = nn.Softmax(dim=1)(preds)
class_pred = torch.argmax(preds, dim=1).cpu().numpy()
resolution = int(args.resolution)
img = np.zeros((resolution, resolution)).astype(np.uint8)
img[(np.arange(resolution).repeat(resolution), np.t... |
class CanineForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def cython_aliases(required_modules=None, optional_modules=None):
import pkgconfig
import itertools
if (required_modules is None):
required_modules = default_required_modules
if (optional_modules is None):
optional_modules = default_optional_modules
aliases = {}
for (lib, require... |
class ExperimentVisual():
def __init__(self, df: pd.DataFrame, out_fullfn: Optional[str]=None):
self.df = df
self.out_fullfn = out_fullfn
def _adapt_agg(self, var: Optional[str]='val_rmse', only_mean: Optional[bool]=False) -> pd.DataFrame:
df1 = self.df
(a, b, c) = ([], [], [])
... |
def _key_is_deprecated(full_key):
if (full_key in _DEPCRECATED_KEYS):
return True
return False |
class DeconvolutionalDecoder(nn.Module):
def __init__(self, in_channels, out_channels, num_hiddens, num_residual_layers, num_residual_hiddens, use_kaiming_normal, use_jitter, jitter_probability, use_speaker_conditioning, device, verbose=False):
super(DeconvolutionalDecoder, self).__init__()
self._us... |
def get_layer_uid(layer_name=''):
if (layer_name not in _LAYER_UIDS):
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name] |
def get_sizes(t):
shape = []
for i in six.moves.range(len(t.get_shape().as_list()[:(- 1)])):
shape.append(tf.shape(t)[i])
shape.append(t.get_shape().as_list()[(- 1)])
return shape |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.