code stringlengths 101 5.91M |
|---|
def test_top_selector_find_top_k_binary_values_not_2D():
ts = TopSelector()
tst = TopSelectorTorch()
with pytest.raises(ValueError):
ts.find_top_k_binary(np.empty((2, 3, 3)), k)
with pytest.raises(ValueError):
ts.find_top_k_binary(np.empty(3), k)
with pytest.raises(ValueError):
... |
class Affine(Module):
def __init__(self, n_inmaps, n_outmaps, base_axis=1, w_init=None, b_init=None, fix_parameters=False, rng=None, with_bias=True):
if (not hasattr(n_outmaps, '__iter__')):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
... |
def selectCandidateFrames(files, numFrames, dataSet, frameSpacing=10, startFrame=0):
confThresh = 0.5
confCntThresh = 12
neighborThresh = 10
objSegThresh = 0.01
finalIndList = []
finalPklDataList = []
ind = startFrame
while (ind < len(files)):
file = files[ind]
resultsDir... |
def brier_score(y_hat: Tensor, y: Tensor) -> Tensor:
batch_size = y_hat.size(0)
if (batch_size == 0):
return torch.as_tensor(float('nan'))
prob = y_hat.clone()
indices = torch.arange(batch_size)
prob[(indices, y)] -= 1
return prob.norm(dim=(- 1), p=2).mean().detach().cpu() |
def get_geom_centroid(geom, return_lat_lng=False):
(lng, lat) = map((lambda x: x.pop()), geom.centroid.xy)
if return_lat_lng:
return [lat, lng]
else:
return [lng, lat] |
_utils.test(arch=get_host_arch_list())
def test_oop_memory_leak():
_oriented
class X():
def __init__(self):
self.py_l = ([0] * 5242880)
def run(self):
for i in range(1):
pass
def get_process_memory():
process = psutil.Process(os.getpid())
... |
def search_for_sp(args, document_sents, summary_sents, oracle_sent_indices, compression_model, compression_tokenizer, fusion_model, fusion_tokenizer, paraphrase_model, paraphrase_tokenizer):
top_programs = [[] for _ in range(args.best_programs)]
top_program_strings_with_intermediates = ([''] * args.best_program... |
def print_stats(name):
print(('%s:' % name))
filename = ('%s/%s.txt' % (DestDir, name))
assert os.path.isfile(filename)
data = eval(open(filename).read())
assert isinstance(data, list)
print(' num seqs:', len(data))
total_duration = 0.0
total_num_chars = 0
for seq in data:
t... |
def auto_find_start_epoch(args):
output_dir = Path(args.output_dir)
if (args.auto_resume and (len(args.resume) == 0)):
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
latest_ckpt = (- 1)
for ckpt in all_checkpoints:
t = ckpt.split... |
class ConvMLPStage(nn.Module):
def __init__(self, num_blocks, embed_dims, mlp_ratio=1, drop_path_rate=0.1, downsample=True):
super(ConvMLPStage, self).__init__()
self.blocks = nn.ModuleList()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_blocks)]
for i in range(num_b... |
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--trained-model-dir-path', required=True, type=str, help='The directory that contains the pre-trained model')
parser.add_argument('--output-dir-path', required=True, type=str, help='The directory that would contain the conver... |
.experimental
def test_map_metric_value_by_user():
assert (MAP._get_metric_value_by_user(2, (), (2, 3)) == 0)
assert (MAP._get_metric_value_by_user(2, (1, 2), (1,)) == 0.5) |
def classify(images, model, adversarial_attack):
images = TFHider.tf.convert_to_tensor(images.cpu().numpy().transpose(0, 2, 3, 1))
outputs = model(images)
outputs = torch.from_numpy(outputs.numpy()).cuda()
return outputs |
def squeezenet1_1(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> SqueezeNet:
return _squeezenet('1_1', pretrained, progress, **kwargs) |
def test_clone():
observer = FooObserver()
with mock.patch.object(observer._assertion_local_state, 'trace') as trace_mock:
clone = object()
trace_mock.clone.return_value = clone
cloned = observer.get_trace()
trace_mock.clone.assert_called_once()
assert (cloned == clone) |
_utils.test()
def test_advanced_unused_store_elimination_if():
val = ti.field(ti.i32)
ti.root.place(val)
def func():
a = 1
if val[None]:
a = 2
if val[None]:
a = 3
else:
a = 4
val[None] = a
else:
... |
def test_translation_factors_dataset():
source_text_per_factor = [dummy_source_text_factor_0, dummy_source_text_factor_1]
target_text_per_factor = [dummy_target_text_factor_0, dummy_target_text_factor_1, dummy_target_text_factor_2]
source_vocabulary_names = ['source.vocab.pkl', 'source_factor1.vocab.pkl']
... |
def parse_args():
parser = argparse.ArgumentParser(description='script to compute all statistics')
parser.add_argument('--data-path', help='Path to ground truth data', type=str)
parser.add_argument('--output-path', help='Path to output data', type=str)
parser.add_argument('--debug', default=0, help='Deb... |
_module()
class ClsHead(nn.Module):
def __init__(self, num_classes: int, in_channels: int, mlps: List[int]=[256], norm_args: dict=None, act_args: dict={'act': 'relu'}, dropout: float=0.5, global_feat: str=None, point_dim: int=2, **kwargs):
super().__init__()
if kwargs:
logging.warning(f'... |
_utils.test(require=ti.extension.quant_basic, arch=[ti.cpu, ti.cuda, ti.vulkan], exclude=[vk_on_mac, cuda_on_windows], debug=True)
def test_quant_store_no_fusion(capfd):
x = ti.field(dtype=ti.types.quant.int(16, True))
y = ti.field(dtype=ti.types.quant.int(16, True))
v = ti.BitpackedFields(max_num_bits=32)
... |
def extract_cnn_feature(model, inputs, modules=None, return_mask=False):
model.eval()
inputs = to_torch(inputs)
inputs = Variable(inputs, volatile=True)
if (modules is None):
tmp = model(inputs)
outputs = tmp[0]
outputs = outputs.data.cpu()
if return_mask:
mas... |
def _calc_same_pad(i: int, k: int, s: int, d: int):
return max(((((((- (i // (- s))) - 1) * s) + ((k - 1) * d)) + 1) - i), 0) |
class IdentitySet(collections.abc.MutableSet):
def __init__(self, iterable=()):
self.map = {id(x): x for x in iterable}
def __contains__(self, value):
return (id(value) in self.map)
def __iter__(self):
return self.map.values()
def __len__(self):
return len(self.map)
d... |
def calc_scores(scene_ids, obj_ids, matches, n_top, do_print=True, dataset=''):
insts = {i: {j: defaultdict((lambda : 0)) for j in scene_ids} for i in obj_ids}
for m in matches:
if m['valid']:
insts[m['obj_id']][m['scene_id']][m['im_id']] += 1
tars = 0
obj_tars = {i: 0 for i in obj_i... |
def main(args):
utils.import_user_module(args)
if (args.buffer_size < 1):
args.buffer_size = 1
if ((args.max_tokens is None) and (args.max_sentences is None)):
args.max_sentences = 1
assert ((not args.sampling) or (args.nbest == args.beam)), '--sampling requires --nbest to be equal to --... |
def use_spectral_norm(module, use_sn=False):
if use_sn:
return spectral_norm(module)
return module |
def save_checkpoint(work_dir, interval, model, model_ema, optimizer, scheduler, checkpoint):
epoch = (checkpoint['epoch'] + 1)
logger = get_root_logger()
use_fp16 = checkpoint.pop('use_fp16', False)
if use_fp16:
checkpoint.update({'amp': apex.amp.state_dict()})
checkpoint.update({'state_dict... |
class OperationKind(enum.Enum):
Gemm = enum_auto()
Conv2d = enum_auto()
Conv3d = enum_auto() |
class FiniteWordPath_hexagonal_grid_callable(WordDatatype_callable, FiniteWordPath_hexagonal_grid, FiniteWord_class):
pass |
class AlbertConfig(PretrainedConfig):
model_type = 'albert'
def __init__(self, vocab_size=30000, embedding_size=128, hidden_size=4096, num_hidden_layers=12, num_hidden_groups=1, num_attention_heads=64, intermediate_size=16384, inner_group_num=1, hidden_act='gelu_new', hidden_dropout_prob=0, attention_probs_drop... |
def register_Ns3Dot11sIeBeaconTimingUnit_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_constructor([param('ns3::dot11s::IeBeaconTimingUnit const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetAid', 'uint8_t', [], is_const=True)
cls.add_method('GetBeaconInterva... |
class RemoteLock(object):
def __init__(self):
self.locked = False
def acquire(self):
mpi_comm.isend(None, dest=mpi_master, tag=tags.LOCK)
self.locked = True
def release(self):
mpi_comm.isend(None, dest=mpi_master, tag=tags.UNLOCK)
self.locked = False |
class MultigridSchedule(object):
def init_multigrid(self, cfg):
self.schedule = None
cfg.MULTIGRID.DEFAULT_B = cfg.TRAIN.BATCH_SIZE
cfg.MULTIGRID.DEFAULT_T = cfg.DATA.NUM_FRAMES
cfg.MULTIGRID.DEFAULT_S = cfg.DATA.TRAIN_CROP_SIZE
if cfg.MULTIGRID.LONG_CYCLE:
self.s... |
class Attention(torch.nn.Module):
def __init__(self, pointer):
super().__init__()
self.pointer = pointer
self.softmax = torch.nn.Softmax(dim=(- 1))
def forward(self, query, values, attn_mask=None):
attn_logits = self.pointer(query, values, attn_mask)
attn = self.softmax(a... |
class SplinterForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def ml_minus_type(ts):
if ((ts == 'Z3_ast') or (ts == 'Z3_sort') or (ts == 'Z3_func_decl') or (ts == 'Z3_app') or (ts == 'Z3_pattern')):
return 'Z3_ast'
if ((ts == 'Z3_ast_plus') or (ts == 'Z3_sort_plus') or (ts == 'Z3_func_decl_plus') or (ts == 'Z3_app_plus') or (ts == 'Z3_pattern_plus')):
retu... |
def get_rx_feature_vector(taken_times, RX2id, size):
feature_vector = ([0] * size)
for rx in taken_times:
if (rx in RX2id):
id = RX2id.get(rx)
feature_vector[id] = 1
return feature_vector |
(nopython=True)
def bound_points_jit(points, upper_bound, lower_bound):
N = points.shape[0]
ndim = points.shape[1]
keep_indices = np.zeros((N,), dtype=np.int32)
success = 0
for i in range(N):
success = 1
for j in range((ndim - 1)):
if ((points[(i, j)] < lower_bound[j]) or... |
def relational_create_graph_features(is_directed=False, edge_weights=False):
r1 = {'label': 'r1'}
r2 = {'label': 'r2'}
features = np.array([[1, 1], [1, 0], [0, 1]])
nodes = pd.DataFrame(features, index=['a', 'b', 'c'])
edges = {'r1': pd.DataFrame([('a', 'b'), ('b', 'c')], columns=['source', 'target'... |
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
(wavs, wav_lens) = batch.sig
(wavs, wav_lens) = (wavs.to(self.device), wav_lens.to(self.device))
feats = self.modules.weighted_ssl_model(wavs)
y = self.modules.enc(feats)
p_tok... |
def TCliqueOverlap_GetOverlapCliques(*args):
return _snap.TCliqueOverlap_GetOverlapCliques(*args) |
def add_bias(tensor, init=None, name=None):
if (init is None):
init = tf.zeros([tensor.get_shape()[(- 1)].value])
with tf.name_scope(name, tensor.op.name, [tensor]):
b = tf.Variable(init, name='b')
return tf.nn.bias_add(tensor, b) |
def imshow_collection(ic, *args, **kwargs):
import matplotlib.pyplot as plt
if (len(ic) < 1):
raise ValueError('Number of images to plot must be greater than 0')
num_images = len(ic)
k = ((num_images * 12) ** 0.5)
r1 = max(1, floor((k / 4)))
r2 = ceil((k / 4))
c1 = ceil((num_images /... |
class Combiner():
def __init__(self, cfg, device):
self.cfg = cfg
self.type = cfg.TRAIN.COMBINER.TYPE
self.device = device
self.epoch_number = cfg.TRAIN.N_EPOCH
self.func = torch.nn.Softmax(dim=1)
self.initilize_all_parameters()
def initilize_all_parameters(self):... |
class DataGenerator(Dataset):
def __init__(self, img_dir, split_file, transform):
self.img_name_list = []
self.transform = transform
with open(split_file, 'r') as split_name:
img_and_label_list = split_name.readlines()
for index in img_and_label_list:
img_path... |
class Fpr(Critic):
def __init__(self, recall_level=0.95):
super().__init__()
self.recall_level = recall_level
def get_name(self):
return (('FPR(' + str((self.recall_level * 100))) + ')')
def stable_cumsum(self, arr, rtol=1e-05, atol=1e-08):
out = np.cumsum(arr, dtype=np.float... |
def tree_to_token_index(root_node):
if (((len(root_node.children) == 0) or (root_node.type == 'string')) and (root_node.type != 'comment')):
return [(root_node.start_point, root_node.end_point)]
else:
code_tokens = []
for child in root_node.children:
code_tokens += tree_to_to... |
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--gpu', action='store_true')
args = parser.parse_args()
env = d3rlpy.envs.Atari(gym.make(args.en... |
def get_offsets_for_rewrites(func: LeanFunctionInfo, rewrites: List[str], rewrite_types: Dict[(str, Tuple[(CairoType, str)])]) -> List[str]:
rw_simp_offsets = []
for rw in rewrites:
if (not (rw in rewrite_types)):
continue
(rw_type, _) = rewrite_types[rw]
rw_offsets = func.st... |
.parametrize('function_name', get_all_functions_names())
def test_function_docstring(function_name, request):
res = numpydoc_validation.validate(function_name)
res['errors'] = list(filter_errors(res['errors'], method='function'))
if res['errors']:
msg = repr_errors(res, method=f'Tested function: {fu... |
def main(addr):
ssl_cert_off = {'cert_reqs': CERT_NONE}
ws = WebSocket(sslopt=ssl_cert_off)
try:
ws.connect('wss://{address}/ControlApi/socket.io/?EIO=3&transport=websocket'.format(address=addr))
except ws_exception.WebSocketBadStatusException:
return
except Exception:
return... |
class Discriminator(Component):
def __init__(self, tensor_in, config, condition=None, name='Discriminator', reuse=None):
super().__init__(tensor_in, condition)
with tf.variable_scope(name, reuse=reuse) as scope:
self.scope = scope
(self.tensor_out, self.nets) = self.build(con... |
class ExampleModel(nn.Module):
def __init__(self):
super(ExampleModel, self).__init__()
self.test_cfg = None
self.conv = nn.Conv2d(3, 3, 3)
def forward(self, img, img_metas, test_mode=False, **kwargs):
return img
def train_step(self, data_batch, optimizer):
loss = sel... |
.spark
def test_it_works(log):
model = ALSWrap()
dataset = create_dataset(log)
assert (model._params_tried() is False)
res = model.optimize(dataset, dataset, k=2, budget=1)
assert isinstance(res['rank'], int)
assert (model._params_tried() is True)
model.optimize(dataset, dataset, k=2, budget... |
class TFVisionEncoderDecoderModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def Select(a, *args):
args = _get_args(args)
if z3_debug():
_z3_assert(is_array_sort(a), 'First argument must be a Z3 array expression')
return a[args] |
.parametrize('device', ['cpu', 'cuda'])
def test_compatibility(device, f_min=100, sr=16000, T=1000):
if ((device == 'cuda') and (not torch.cuda.is_available())):
return
x = diffsptk.sin((T - 1), period=(sr / f_min)).to(device)
cqt = diffsptk.ConstantQTransform(T, sr, f_min=f_min).to(device)
X = ... |
class Rainbow():
buffer: Union[(UniformReplayBuffer, PrioritizedReplayBuffer)]
def __init__(self, env, args: SimpleNamespace) -> None:
self.env = env
self.save_dir = args.save_dir
self.use_amp = args.use_amp
net = networks.get_model(args.network_arch, args.spectral_norm)
... |
def __relay_to_torrc_default_include(relay):
if ('exitguard' in relay['nickname']):
return TORRC_RELAY_EXITGUARD_FILENAME
elif ('exit' in relay['nickname']):
return TORRC_RELAY_EXITONLY_FILENAME
elif ('guard' in relay['nickname']):
return TORRC_RELAY_GUARDONLY_FILENAME
else:
... |
def test_constructor_statement_accept(test_case_mock, variable_reference_mock, field_mock):
statement = stmt.FieldStatement(test_case_mock, field_mock, variable_reference_mock)
visitor = MagicMock(stmt.StatementVisitor)
statement.accept(visitor)
visitor.visit_field_statement.assert_called_once_with(stat... |
.lower_builtin('datetime', ArrayBuilderType, numba.types.NPDatetime)
def lower_datetime(context, builder, sig, args):
(arraybuildertype, xtype) = sig.args
(arraybuilderval, xval) = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
unit = globalstring(context, builder, f'date... |
def test_points2boundary():
points = np.array([[1, 2]])
text_repr_type = 'quad'
text_score = None
with pytest.raises(AssertionError):
mask_utils.points2boundary([], text_repr_type, text_score)
with pytest.raises(AssertionError):
mask_utils.points2boundary(points, '', text_score)
... |
class MotherCell(Cell):
def __init__(self, gameServer, owner, position, radius):
Cell.__init__(self, gameServer, owner, position, radius)
self.cellType = 2
self.isSpiked = True
self.isMotherCell = True
self.color = Color(206, 99, 99)
self.motherCellMinRadius = 149
... |
def _chunk_iterator(itr, chunk_size):
chunk = []
for x in itr:
chunk.append(x)
if (len(chunk) == chunk_size):
(yield chunk)
chunk = []
if (len(chunk) > 0):
(yield chunk) |
class NLLLoss(LossBase):
def __init__(self, pred=None, target=None, ignore_idx=(- 100), reduction='mean'):
super(NLLLoss, self).__init__()
self._init_param_map(pred=pred, target=target)
assert (reduction in ('mean', 'sum', 'none'))
self.reduction = reduction
self.ignore_idx =... |
def test_legacy_cast():
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, 'floating point number truncated to an integer')
res = sc.bdtrc(np.nan, 1, 0.5)
assert_(np.isnan(res)) |
def get_detectron_ops_lib():
prefixes = ([_CMAKE_INSTALL_PREFIX, sys.prefix, sys.exec_prefix] + sys.path)
subdirs = ['lib', 'torch/lib']
for prefix in prefixes:
for subdir in subdirs:
ops_path = os.path.join(prefix, subdir, _DETECTRON_OPS_LIB)
if os.path.exists(ops_path):
... |
def sampling(mean, log_var):
epsilon = torch.randn_like(mean)
return (mean + (torch.exp((log_var / 2)) * epsilon)) |
class VanillaBackprop():
def __init__(self, model):
self.model = model
self.gradients = None
self.model.eval()
self.hook_layers()
def hook_layers(self):
def hook_function(module, grad_in, grad_out):
self.gradients = grad_in[0]
self.grad_out = grad_... |
def add_user_location_interaction_into_graph(positive_rating):
Graph = nx.DiGraph()
for pair in positive_rating:
user = pair[0]
location = pair[1]
user_node = ('u' + user)
location_node = ('i' + location)
Graph.add_node(user_node)
Graph.add_node(location_node)
... |
def publishResult(ctxObj):
global brokerURL
if (brokerURL.endswith('/ngsi10') == True):
brokerURL = brokerURL.rsplit('/', 1)[0]
if (brokerURL == ''):
return
ctxObj['id'] = ('urn:ngsi-ld:Device.' + outputs[0]['id'])
ctxObj['type'] = outputs[0]['type']
print(' published result ')
... |
def attributes_to_tokens(attributes, staff=None):
tokens = []
divisions = None
for child in attributes.contents:
type_ = child.name
if (type_ == 'divisions'):
divisions = int(child.text)
elif (type_ in ('clef', 'key', 'time')):
if (staff is not None):
... |
def get_target_kpi(model, weights_compression, representative_data_gen, core_config, tpc):
kpi_data = mct.core.pytorch_kpi_data_experimental(model, representative_data_gen, core_config=core_config, target_platform_capabilities=tpc)
weights_kpi = ((BYTES_TO_FP32 * kpi_data.weights_memory) / weights_compression)
... |
def test_get_parents(digraph_2d):
ground_truth = np.array(['a', 'b', 'd', 'e'])
nodes = digraph_2d._get_parents()
assert_array_equal(ground_truth, nodes) |
def mape(ref_data, test_data):
if (ref_data.size != test_data.size):
Warning('The data shape does not match!')
ref_d_vec = ref_data.flatten()
test_d_vec = test_data.flatten()
diff = (ref_d_vec - test_d_vec)
use_abs_ind = (np.abs(ref_d_vec) < 1)
mape_abs = np.abs(diff[use_abs_ind])
ma... |
def calc_FID(gen, batchsize=100, stat_file=('%s/cifar-10-fid.npz' % os.path.dirname(__file__)), dst=None, path=None, n_ims=5000):
'Frechet Inception Distance proposed by
.make_extension()
def evaluation(trainer=None):
model = load_inception_model(path)
stat = np.load(stat_file)
ims ... |
_module()
class GLEncoderDecoder(nn.Module):
def __init__(self, encoder=dict(type='GLEncoder'), decoder=dict(type='GLDecoder'), dilation_neck=dict(type='GLDilationNeck')):
super().__init__()
self.encoder = build_component(encoder)
self.decoder = build_component(decoder)
self.dilation... |
def compute_structure_indicator(mat_file, low_bound_sec=0, upp_bound_sec=128, sample_rate=2):
assert ((low_bound_sec > 0) and (upp_bound_sec > 0)), '`low_bound_sec` and `upp_bound_sec` should be positive, got: low_bound_sec={}, upp_bound_sec={}.'.format(low_bound_sec, upp_bound_sec)
low_bound_ts = (int((low_bou... |
class BaseEstimator():
def _get_param_names(cls):
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if (init is object.__init__):
return []
init_signature = signature(init)
parameters = [p for p in init_signature.parameters.values() if ((p.name != 'self') ... |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', default=None, type=str, required=True, help=('Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys())))
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Check path to pre-t... |
def filter_clusters(clusters, min_size=None, max_size=None):
if (min_size is not None):
clusters = [c for c in clusters if (len(c) >= min_size)]
if (max_size is not None):
clusters = [c for c in clusters if (len(c) <= max_size)]
return clusters |
def bbox_map_eval(det_result, annotation):
if isinstance(det_result, tuple):
bbox_det_result = [det_result[0]]
else:
bbox_det_result = [det_result]
iou_thrs = np.linspace(0.5, 0.95, (int(np.round(((0.95 - 0.5) / 0.05))) + 1), endpoint=True)
mean_aps = []
for thr in iou_thrs:
... |
('This function is now deprecated and will be removed in future releases of Pinocchio. Please change for the new function computeKineticEnergy.')
def kineticEnergy(model, data, q, v, update_kinematics=True):
if update_kinematics:
return pin.computeKineticEnergy(model, data, q, v)
else:
return pi... |
class RecordingGUI(QWidget):
def __init__(self, names, contactdb_recording_cb, hand_pose_recording_cb, *args, **kwargs):
super(RecordingGUI, self).__init__(*args, **kwargs)
ncols = 5
self.contactdb_recording_cb = contactdb_recording_cb
self.hand_pose_recording_cb = hand_pose_recordin... |
_staging_test
class ConfigPushToHubTester(unittest.TestCase):
def setUpClass(cls):
cls._token = login(username=USER, password=PASS)
def tearDownClass(cls):
try:
delete_repo(token=cls._token, name='test-config')
except HTTPError:
pass
try:
delet... |
def test_experiment_with_not_callable_task():
with pytest.raises(ValueError):
run_experiment(1) |
def _ensure_exprdict(r):
if isinstance(r, int):
return {'typespec': 'integer'}
if isinstance(r, float):
return {'typespec': 'real'}
if isinstance(r, complex):
return {'typespec': 'complex'}
if isinstance(r, dict):
return r
raise AssertionError(repr(r)) |
def get_flag(var, fallback, expected=True, warn=True):
val = get_config_var(var)
if (val is None):
if warn:
warnings.warn("Config variable '{0}' is unset, Python ABI tag may be incorrect".format(var), RuntimeWarning, 2)
return fallback()
return (val == expected) |
def main():
args = parse_arguments()
assert (args.n_jobs >= 1), '`n_jobs` must be a positive integer.'
args.output_dir.mkdir(exist_ok=True)
(args.output_dir / 'samples').mkdir(exist_ok=True)
for subdir in ('json', 'mid', 'png'):
((args.output_dir / 'samples') / subdir).mkdir(exist_ok=True)
... |
def save_checkpoint(args, trainer, epoch_itr, val_loss):
if (args.no_save or (not distributed_utils.is_master(args))):
return
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds[... |
def _batched(item_iter, size):
batch = []
for item in item_iter:
batch.append(item)
if (len(batch) == size):
(yield batch)
batch = [] |
def merge_cfg_from_file(cfg_filename, global_config):
with open(cfg_filename, 'r') as f:
yaml_cfg = AttrDict(load_cfg(f))
_merge_a_into_b(yaml_cfg, global_config) |
class AbstractSingleCrystalElement(Element):
def __lt__(self, other):
return False
def __hash__(self):
return hash(self.parent())
def __eq__(self, other):
if isinstance(other, AbstractSingleCrystalElement):
return (self.parent() is other.parent())
return False
... |
class GumbelVQ(VQModel):
def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, temperature_scheduler_config, ckpt_path=None, ignore_keys=[], image_key='image', colorize_nlabels=None, monitor=None, kl_weight=1e-08, remap=None):
z_channels = ddconfig['z_channels']
super().__init__(ddconfig, los... |
class FinalFiniteAugmentedValuation(FiniteAugmentedValuation, FinalAugmentedValuation):
def __init__(self, parent, v, phi, mu):
FiniteAugmentedValuation.__init__(self, parent, v, phi, mu)
FinalAugmentedValuation.__init__(self, parent, v, phi, mu) |
class CreateZeros(object):
def test_zeros0D(self):
h = np.zeros((), dtype=self._descr)
assert_((normalize_descr(self._descr) == h.dtype.descr))
assert_((h.dtype.fields['x'][0].name[:4] == 'void'))
assert_((h.dtype.fields['x'][0].char == 'V'))
assert_((h.dtype.fields['x'][0].t... |
class EgcZincNet(ZincNet):
def __init__(self, hidden_dim, num_graph_layers, in_feat_drop, residual, readout='mean', activation=nn.ReLU, heads=8, bases=4, softmax=False, sigmoid=False, hardtanh=False, aggrs=None):
assert (aggrs is not None)
self.heads = heads
self.bases = bases
self.s... |
def generate_prompt(category_name: str):
return f'''Q: What are useful visual features for distinguishing a lemur in a photo?
A: There are several useful visual features to tell there is a lemur in a photo:
- four-limbed primate
- black, grey, white, brown, or red-brown
- wet and hairless nose with curved nostrils
... |
def support_false_positive_count(m, m_hat):
(m_nnz, m_hat_nnz, intersection_nnz) = _nonzero_intersection(m, m_hat)
return int(((m_hat_nnz - intersection_nnz) / 2.0)) |
class DataCollatorWithPadding():
def __init__(self, *args, **kwargs):
requires_pytorch(self) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.