code stringlengths 101 5.91M |
|---|
def main():
args = parser.parse_args()
num_classes = 1000
model = model_factory.create_model(args.model, num_classes=num_classes, pretrained=args.pretrained, test_time_pool=args.test_time_pool)
if (args.restore_checkpoint and os.path.isfile(args.restore_checkpoint)):
print("=> loading checkpoint... |
('/direct')
def direct():
pattern = request.args.get('pattern')
pattern = re.compile(pattern)
return render_template('direct.html', pattern=pattern) |
def array_processing_vis(t, clip_max=2000):
t = np.clip(t, np.nanmin(t), clip_max)
t = ((t - np.nanmin(t)) / np.nanmax(t))
t = np.nan_to_num(t)
z = (t * 255).astype(np.uint8)
return z |
def test_forms():
form = ak.forms.NumpyForm('float64')
assert (form == form)
assert (pickle.loads(pickle.dumps(form, (- 1))) == form)
assert (ak.forms.from_json(form.to_json()) == form)
assert (form.inner_shape == ())
assert (form.itemsize == 8)
assert (form.primitive == 'float64')
asser... |
(scope='session')
def warning_calls():
base = Path(scipy.__file__).parent
bad_filters = []
bad_stacklevels = []
for path in base.rglob('*.py'):
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read(), filename=str(path))
finder = FindFuncs(path.relative_to(bas... |
def normalize_dataset(env_name, dataset):
if ('antmaze' in env_name):
return dataset.copy({'rewards': (dataset['rewards'] - 1.0)})
else:
normalizing_factor = get_normalization(dataset)
dataset = dataset.copy({'rewards': (dataset['rewards'] / normalizing_factor)})
return dataset |
(nopython=True)
def _rouge_submodular(candidate_indices, rouge_vals):
best = (- 1)
best_val = (- numpy.inf)
for i in candidate_indices:
my_sum = rouge_vals[i]
if (my_sum > best_val):
best = i
best_val = my_sum
return (best, best_val) |
def noise_like(shape, device, repeat=False):
repeat_noise = (lambda : torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))))
noise = (lambda : torch.randn(shape, device=device))
return (repeat_noise() if repeat else noise()) |
_processor('bert_tokenizer')
class BertTokenizer(MaskedTokenProcessor):
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self._probability = config.get('mask_probability', 0)
def __call__(self, item: Dict[(str, Any)]):
if ('text' in item):
... |
def antecedent_is_object(anaphor, antecedent):
return ((anaphor.attributes['type'] == 'PRO') and (anaphor.attributes['citation_form'] in ['he', 'she', 'it', 'they']) and ((antecedent.attributes['type'] != 'PRO') or (antecedent.attributes['citation_form'] in ['he', 'she', 'it', 'they'])) and (antecedent.attributes['... |
def build_vgg19(input, model_filepath, reuse=False):
with tf.variable_scope('vgg', reuse=reuse):
net = {}
input = tf.cast(input, tf.float32)
import scipy.io as sio
vgg_rawnet = sio.loadmat(model_filepath)
vgg_layers = vgg_rawnet['layers'][0]
imagenet_mean = tf.constan... |
def run_HF_check(recipe_folder='tests/recipes', field='HF_repo', output_folder='tests/tmp'):
HF_repos = repo_list(recipe_folder, field)
os.makedirs(output_folder, exist_ok=True)
os.chdir(output_folder)
check = True
for (i, repo) in enumerate(HF_repos):
print(('(%i/%i) Checking %s...' % ((i +... |
def top_sources_male(args: Dict[(str, Any)]) -> List[object]:
query = [{'$match': {'body': {'$ne': ''}, 'quotesUpdated': {'$exists': True}, 'outlet': {'$in': args['outlets']}, 'publishedAt': {'$gte': args['begin_date'], '$lt': (args['end_date'] + timedelta(days=1))}}}, {'$project': {'outlet': 1.0, 'sourcesMale': 1.... |
def instances2dict_with_polygons(imageFileList, verbose=False):
imgCount = 0
instanceDict = {}
if (not isinstance(imageFileList, list)):
imageFileList = [imageFileList]
if verbose:
print('Processing {} images...'.format(len(imageFileList)))
for imageFileName in imageFileList:
... |
class ExplanationError():
def __init__(self, masker, model, *model_args, batch_size=500, num_permutations=10, link=links.identity, linearize_link=True, seed=38923):
self.masker = masker
self.model = model
self.model_args = model_args
self.num_permutations = num_permutations
s... |
def scale_boxes(boxes: np.ndarray, h_image: int, w_image: int, h_model: int, w_model: int, preserve_aspect_ratio: bool) -> np.ndarray:
(deltaH, deltaW) = (0, 0)
(H, W) = (h_model, w_model)
(scale_H, scale_W) = ((h_image / H), (w_image / W))
if preserve_aspect_ratio:
scale_H = scale_W = max((h_im... |
def xavier_normal_param_init_fn_(module: nn.Module, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[(int, float, str, bool)]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[(Tuple[(float, float)], float)]]=None, init_gain: float=0, verbose: int=0, **kwargs):
xa... |
def pointnet_sa_module(xyz, points, npoint, radius, nsample, mlp, mlp2, group_all, is_training, bn_decay, scope, bn=True, pooling='max', knn=False, use_xyz=True, use_nchw=False, bn2=False):
data_format = ('NCHW' if use_nchw else 'NHWC')
with tf.variable_scope(scope) as sc:
if group_all:
nsam... |
class ZFilter():
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
self.fix = False
def __call__(self, x, update=True):
if (update and (not self.fix)):
s... |
class ThroughputSolverRON(ThroughputSolver):
def solve(self, p: ThroughputProblem) -> ThroughputSolution:
regions = self.get_regions()
best_throughput = self.get_path_throughput(p.src, p.dst)
best_path = [p.src, p.dst]
for inter in regions:
if ((inter == p.src) or (inter ... |
class AntMazeEnv(MazeEnv):
MODEL_CLASS = AntEnv
ORI_IND = 6
MAZE_HEIGHT = 2
MAZE_SIZE_SCALING = 3.0 |
def parse_config_file(file_dir):
with open(file_dir, 'r') as file:
args = yaml.safe_load(file)
return args |
class Residual_Block(Module):
def __init__(self, input_nc, output_nc):
super(Residual_Block, self).__init__()
activation = nn.ReLU(True)
self.left = nn.Sequential(*[spectral_norm(nn.Conv2d(input_nc, output_nc, 1, 1, padding=0, bias=False)), nn.InstanceNorm2d(output_nc, affine=True), activati... |
class FcmpInst(FastMathInst):
__slots__ = ('pred', 'x', 'y', 'ty', 'flags', 'name')
def __init__(self, pred, arg1, arg2, ty=None, flags=(), name=''):
self.pred = pred
self.ty = ty
self.flags = flags
self.x = arg1
self.y = arg2
self.name = name
def args(self):
... |
def tia_stretch(src, segment=4, scale=1):
(img_h, img_w) = src.shape[:2]
cut = (img_w // segment)
thresh = (((cut * 4) // 5) + 1)
half_thresh = (thresh * scale)
(mean, std) = cv2.meanStdDev(src)
src = cv2.copyMakeBorder(src, 0, 0, int((half_thresh * 0.25)), 0, cv2.BORDER_CONSTANT, value=np.mean(... |
def train(model, trainloader, valloader, n_epochs, optimizer=None, lr=0.001, scheduler=None, criterion=nn.CrossEntropyLoss(), device=torch.device(('cuda' if torch.cuda.is_available() else 'cpu')), save_path='trained_model.pt'):
model.to(device)
criterion.to(device)
if (optimizer == None):
optimizer ... |
def main(args, override_args=None):
utils.import_user_module(args)
assert ((args.max_tokens is not None) or (args.max_sentences is not None)), 'Must specify batch size either with --max-tokens or --max-sentences'
use_fp16 = args.fp16
use_cuda = (torch.cuda.is_available() and (not args.cpu))
if (over... |
def ConsonniTodeschiniII_calc(TP, FP, FN, TN):
try:
n = (((TP + FP) + FN) + TN)
part1 = (math.log((1 + n)) - math.log(((1 + FP) + FN)))
return (part1 / math.log((1 + n)))
except Exception:
return 'None' |
def _build_tensor(size, value=None, dtype=torch.float):
if (value is None):
value = size
return torch.empty(size, size, size, dtype=dtype).fill_(value) |
class TestScriptModuleFromString(TestScriptModule):
def _createFeedModule(self):
workspace.RunOperatorOnce(core.CreateOperator('ScriptModuleLoad', [], ['m'], serialized_binary=self._get_modules_bytes(MyModule())))
def _get_modules_bytes(self, the_module):
import io
buffer = io.BytesIO()
... |
class ResourceContainer(ResourceBase):
is_container = True
_property
def resources(self):
return self.finder.get_resources(self) |
def test_batchdistributedsampler_indices(batch_size: int=128, n_batches: int=3, num_replicas: int=2):
adata = scvi.data.synthetic_iid(batch_size=batch_size, n_batches=n_batches)
manager = generic_setup_adata_manager(adata)
dataset = manager.create_torch_dataset()
samplers = [BatchDistributedSampler(data... |
def main(config):
device = torch.device(('cuda' if config.is_gpu else 'cpu'))
print(('using ' + str(device)))
model = UGCVQA_FR_model.ResNet50()
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load('ckpts/UGCVQA_FR_model.pth', map_location=device))
if (config.method_name == 'sin... |
def is_pruned(layer):
try:
layer.mask
return True
except AttributeError:
return False |
def read_matroska_number(f, unmodified=False, signed=False):
if (unmodified and signed):
raise Exception('Contradictary arguments')
first_byte = f.read(1)
if (first_byte == ''):
raise StopIteration
r = ord(first_byte)
(n, r2) = get_major_bit_number(r)
if (not unmodified):
... |
_spec_function('relational_understanding')
def get_relational_understanding_spec(run_human_eval: bool=False) -> RunSpec:
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.image_generation.relational_understanding_scenario.RelationalUnderstandingScenario', args={})
adapter_spec = get_image_genera... |
def get_manager_distributed(train_data, val_data, controller, model_space, wd, data_description, verbose=0, devices=None, train_data_kwargs=None, validate_data_kwargs=None, **kwargs):
reward_fn = LossAucReward(method='auc')
input_node = State('input', shape=(1000, 4), name='input', dtype='float32')
output_n... |
def test_f1_macro_2d_np_array():
y_true = np.array([[1, 2, 3, 4], [1, 2, 5, 6]])
y_pred = np.array([[1, 5, 6], [1, 2, 3]])
assert (0.4285714 == approx(f1(y_true, y_pred, 'macro'))) |
def captured_output(stream_name):
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
(yield getattr(sys, stream_name))
finally:
setattr(sys, stream_name, orig_stdout) |
def generate_configs(**configs):
assert ('sample_func' in configs), 'Missing sample_func to generat configs'
result = []
for (key, values) in configs.items():
if (key == 'sample_func'):
continue
tmp_result = []
for value in values:
tmp_result.append({key: valu... |
def test_schema_changeable(datadir, monkeypatch, self_restoring_schema_globals):
monkeypatch.setattr(pyhf.schema.variables, 'schemas', pyhf.schema.variables.schemas, raising=True)
(old_path, old_cache) = self_restoring_schema_globals
new_path = (datadir / 'customschema')
with pytest.raises(pyhf.exceptio... |
def stochastic_forcing_eig(mobility, factor=1.0, z=None):
(eig_values, eig_vectors) = np.linalg.eigh(mobility)
eig_values_sqrt = np.array([(np.sqrt(x) if (x > 0) else 0) for x in eig_values])
if (z is None):
eig_values_sqrt *= np.random.normal(0.0, 1.0, len(mobility))
else:
eig_values_sq... |
class ELogS():
def __init__(self):
self.aromatic_query = Chem.MolFromSmarts('a')
self.Descriptor = namedtuple('Descriptor', 'mw logp rotors ap')
def calc_ap(self, mol):
matches = mol.GetSubstructMatches(self.aromatic_query)
return (len(matches) / mol.GetNumAtoms())
def calc_e... |
('ReLU')
def TranslateRelu(layer, pretrained_blobs, is_test, **kwargs):
return (BaseTranslate(layer, 'Relu'), []) |
class AlbertOnnxConfig(OnnxConfig):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('token_type_ids', {0: 'batch', 1: 'sequence'})]) |
def construct_graph(base_path, indexified_files):
(ent_in, ent_out) = (defaultdict((lambda : defaultdict(set))), defaultdict((lambda : defaultdict(set))))
for indexified_p in indexified_files:
with open(osp.join(base_path, indexified_p)) as f:
for (i, line) in enumerate(f):
i... |
def get_links(response: GenericResponse, operation: APIOperation, field: str) -> Sequence[Link]:
responses = operation.definition.resolved['responses']
if (str(response.status_code) in responses):
response_definition = responses[str(response.status_code)]
elif (response.status_code in responses):
... |
def test_patchset_verify(datadir):
with open(datadir.joinpath('example_patchset.json'), encoding='utf-8') as patch_file:
patchset = pyhf.PatchSet(json.load(patch_file))
with open(datadir.joinpath('example_bkgonly.json'), encoding='utf-8') as ws_file:
ws = pyhf.Workspace(json.load(ws_file))
a... |
def numba_test_func(x):
if (not ((x.shape == (3, 1)) or (x.shape == (3,)))):
raise IndexError('x is expected to have shape (3, 1) or (3,)')
x = x.reshape((3, 1))
_res = numpy.zeros(2)
_res[0] = x[(0, 0)]
_res[1] = x[(1, 0)]
return _res |
def conv1d(ni: int, no: int, ks: int=1, stride: int=1, padding: int=0, bias: bool=False):
conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias)
nn.init.kaiming_normal_(conv.weight)
if bias:
conv.bias.data.zero_()
return spectral_norm(conv) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str, default='configs/base.yml')
parser.add_argument('--gpu', '-g', type=int, default=0)
parser.add_argument('--results_dir', type=str, default='./results/gans')
parser.add_argument('--snapshot', type=str, defau... |
def Rotate2D(pts, cnt, ang=(np.pi / 4)):
m1 = (pts - cnt)
m2 = np.array([[np.cos(ang), np.sin(ang)], [(- np.sin(ang)), np.cos(ang)]])
return (np.dot(m1, m2) + cnt) |
class roi_Xconv1fc_head(nn.Module):
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.dim_in = dim_in
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
hidden_dim = cfg.FAST_RCNN.CONV_HEAD_DIM
module_list = []
... |
class ConstTree(object):
def __init__(self):
self.left = None
self.right = None
def size(self):
self.size = 1
if (self.left is not None):
self.size += self.left.size()
if (self.right is not None):
self.size += self.right.size()
return self.... |
def load_object_placing(file_name='../../resources/object_script_placing.json'):
abs_dir_path = os.path.dirname(os.path.abspath(__file__))
file_name_all = os.path.join(abs_dir_path, file_name)
with open(file_name_all, 'r') as f:
return json.load(f) |
def test_L3EthStarAttackDoubleAp():
topo = L3EthStar(add_attacker=True)
net = Mininet(topo=topo, link=TCLink, controller=None, listenPort=OF_MISC['switch_debug_port'])
net.addController('c0', controller=RemoteController, ip='127.0.0.1', port=OF_MISC['controller_port'])
net.start()
(plc1, attacker, h... |
class MSBlending(nn.Module):
def __init__(self, n_pyramids: int=3, n_feats: int=16, kernel_size: int=3, depth: int=6) -> None:
super().__init__()
self.ms_feature = ContentExtractor(n_pyramids=n_pyramids, n_feats=64, kernel_size=kernel_size, depth=depth)
n_feats_ex = (n_pyramids * n_feats)
... |
def log_bernoulli(x, mean, average=False, reduce=True, dim=None):
probs = torch.clamp(mean, min=MIN_EPSILON, max=MAX_EPSILON)
log_bern = ((x * torch.log(probs)) + ((1.0 - x) * torch.log((1.0 - probs))))
if reduce:
if average:
return torch.mean(log_bern, dim)
else:
ret... |
def preprocess(visual, audio):
(data, fps) = audio
if torch.is_tensor(data):
data = data.numpy()
if (data.shape[0] == 0):
print('To short a video (< 1 min). Skipping the video.')
preprocessed = None
else:
try:
preprocessed = _preprocess(data, fps)
exce... |
class MgpstrA3Module(nn.Module):
def __init__(self, config: MgpstrConfig):
super().__init__()
self.token_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.tokenLearner = nn.Sequential(nn.Conv2d(config.hidden_size, config.hidden_size, kernel_size=(1, 1), stride=1, groups... |
def test_non_existing_attribute():
proxy = tt.ObjectProxy(42)
with pytest.raises(AttributeError):
proxy.foo()
assert ('foo' in tt.UsageTraceNode.from_proxy(proxy).children) |
def get_transforms(transform_variant, out_size, easy=False):
assert (transform_variant == 'distortions')
if (transform_variant == 'default'):
transform = A.Compose([A.RandomScale(scale_limit=0.2), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.RandomCrop(height=out_size, width=out_size), A.Ho... |
def infer_beam_search_lm(files, asr_model, beam_search_lm):
hyps = []
logits = torch.tensor(asr_model.transcribe(files, batch_size=20, logprobs=True))
log_probs_length = torch.tensor([logit.shape[0] for logit in logits])
logits_tensor = torch.nn.utils.rnn.pad_sequence(logits, batch_first=True)
for j... |
def cf(filename):
if (filename in _cf_cache):
return _cf_cache[filename]
cached_fn = check_output(['cf', filename]).strip().decode('utf8')
assert os.path.exists(cached_fn)
_cf_cache[filename] = cached_fn
return cached_fn |
def test_is_better():
better = MIOPopulationPair(1.0, MagicMock())
worse = MIOPopulationPair(0.9, MagicMock())
assert (MIOPopulation._is_pair_better_than_current(worse, better) is True) |
def resnet_v1_200(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, reuse=None, scope='resnet_v1_200'):
blocks = [resnet_utils.Block('block1', bottleneck, (([(256, 64, 1)] * 2) + [(256, 64, 2)])), resnet_utils.Block('block2', bottleneck, (([(512, 128, 1)] * 23) + [(512, 128, 2)])), r... |
def test_with_bert_finetune(pretrain_file, tmp_path):
trainer = run_training(pretrain_file, tmp_path, '--bert_model', 'hf-internal-testing/tiny-bert', '--bert_finetune')
model_file = os.path.join(trainer.args['save_dir'], trainer.args['save_name'])
assert model_file_has_bert(model_file)
foo_save_filenam... |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--macro_eval', action='store_true')
args = add_args(parser)
logger.info(args)
set_dist(args)
set_seed(args)
(config, model, tokenizer) = build_or_load_gen_model(args)
model.to(args.device)
if (args.n_gpu > 1):
... |
('model_name', models.__all__)
('output_model', output_modules.__all__)
def test_forward_output_modules(model_name, output_model):
(z, pos, batch) = create_example_batch()
args = load_example_args(model_name, remove_prior=True, output_model=output_model)
model = create_model(args)
model(z, pos, batch=ba... |
def check_matmul(x, y):
assert_tensor(x, f'left hand side is not a matrix: {type(x)}')
assert_tensor(y, f'right hand side is not a matrix: {type(y)}')
x_shape = x.get_shape()
y_shape = y.get_shape()
if (len(x_shape) == 1):
if (len(y_shape) == 1):
return (True, None)
if (x... |
def convert_roberta_checkpoint_to_tf(roberta_checkpoint_path, ckpt_dir, model_name):
roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
roberta.eval()
config = BertConfig(vocab_size_or_config_json_file=50265, hidden_size=roberta.args.encoder_embed_dim, num_hidden_layers=roberta.args.enco... |
def generate_module_header(module):
if module.is_built_in:
return
print(f"processing module '{module.name}'")
assert re.match('taichi/\\w+.h', module.name)
module_name = module.name[len('taichi/'):(- len('.h'))]
path = f'c_api/unity/{module_name}.cs'
with open(path, 'w') as f:
f.... |
class ParallelGatedMlp(nn.Module):
def __init__(self, in_features, process_group, hidden_features=None, out_features=None, activation=F.sigmoid, bias1=True, bias2=True, multiple_of=256, sequence_parallel=True, device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
super().__in... |
def update_moving_avg(avg_so_far, new_val, n):
new_avg = (((avg_so_far * (n - 1)) / float(n)) + (new_val / float(n)))
return new_avg |
class _DistributedDataParallelC10d(Module):
def __init__(self, module, process_group, device_ids=None, output_device=None, dim=0, broadcast_buffers=True, bucket_cap_mb=25):
super(_DistributedDataParallelC10d, self).__init__()
if (device_ids is None):
device_ids = list(range(torch.cuda.de... |
class HyperParameterStudy():
def __init__(self, rel_threshold=1):
self.trials = {}
self.ks = set()
self.rel_threshold = rel_threshold
def add_trials(self, obj):
self.ks.update(set(obj.results[0]['test_results'].keys()))
name = obj.results[0]['params']['name'].split('_')[0... |
class ReformerModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def register_Ns3MmWaveMacSchedSapProviderSchedDlCqiInfoReqParameters_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::MmWaveMacSchedSapProvider::SchedDlCqiInfoReqParameters const &', 'arg0')])
cls.add_instance_attribute('m_cqiList', 'std::vector< ns3::DlCqiInfo >', is_cons... |
class VariableTimeStepper(TimeStepper):
def from_conf(conf):
return VariableTimeStepper(conf.t0, conf.t1, dt=conf.dt, n_step=conf.n_step, is_quasistatic=conf.quasistatic)
def set_from_data(self, t0, t1, dt=None, n_step=None, step=None):
(self.t0, self.t1) = (t0, t1)
self.dtime = (self.t1... |
.skipif(IS_PYPY, reason='Test not meaningful on PyPy')
def test_assert_deallocated_circular2():
class C(object):
def __init__(self):
self._circular = self
with pytest.raises(ReferenceError):
with assert_deallocated(C):
pass |
def vgg16_mura_model():
path_weights = get_file('tf_keras_vgg16_mura_model.h5', WEIGHTS_PATH_VGG16_MURA, cache_subdir='models')
model = load_model(path_weights)
return model |
def load_config(config_path):
module = importlib.import_module(config_path)
return module.config() |
def build_combined_dataset(base_output_path, short_name):
convert_ontonotes_file(os.path.join(base_output_path, 'en_ontonotes.train.json'), short_name)
convert_ontonotes_file(os.path.join(base_output_path, 'en_ontonotes.dev.json'), short_name)
convert_ontonotes_file(os.path.join(base_output_path, 'en_ontono... |
def register_Ns3UdpSocket_methods(root_module, cls):
cls.add_constructor([param('ns3::UdpSocket const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('MulticastJoinGroup', 'int', [param('uint32_t', 'interface'), param('ns3::Address cons... |
def register_Ns3ConstantRateWifiManager_methods(root_module, cls):
cls.add_constructor([param('ns3::ConstantRateWifiManager const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('DoCreateStation', 'ns3::WifiRemoteStation *', [], is_cons... |
class SawyerDrawerCloseV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'drwr_pos': obs[3:6], 'unused_info': obs[6:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['... |
def main(arguments):
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', help='Path of the file containing the parameters of the experiment', type=str, default='cfg/a.json')
args = parser.parse_args(arguments)
cfg_file =... |
class HyperplaneArrangements(Parent, UniqueRepresentation):
Element = HyperplaneArrangementElement
def __init__(self, base_ring, names=tuple()):
from sage.categories.sets_cat import Sets
from sage.rings.ring import _Fields
if (base_ring not in _Fields):
raise ValueError('base... |
def main():
lfw_dataroot = args.lfw
model_path = args.model_path
far_target = args.far_target
batch_size = args.batch_size
flag_gpu_available = torch.cuda.is_available()
if flag_gpu_available:
device = torch.device('cuda')
print('Using GPU')
else:
device = torch.devic... |
def get_next_sentence(file):
sent = ''
while True:
line = file.readline().strip()
if (line == ''):
break
sent = (' ' + line)
sent = sent.strip()
sentence_return = Sentence(sent)
assert file.readline().strip().startswith('Tokens'), 'parsing error tokens'
to... |
class D3NetOpenVinoWrapper(object):
def __init__(self, args, source):
if (not openvino_enabled):
raise ValueError('Failed to import openvino! Please make sure you have installed openvino.')
weight = os.path.join(args.model_dir, (source + '.onnx'))
if (not os.path.exists(weight)):... |
class Coco2017Cfg(CocoCfg):
variant: str = '2017'
splits: Dict[(str, dict)] = field(default_factory=(lambda : dict(train=dict(ann_filename='annotations/instances_train2017.json', img_dir='train2017', has_labels=True), val=dict(ann_filename='annotations/instances_val2017.json', img_dir='val2017', has_labels=True... |
def gen_session_list_dsin(uid, t):
t.sort_values('time_stamp', inplace=True, ascending=True)
last_time =
session_list = []
session = []
for row in t.iterrows():
time_stamp = row[1]['time_stamp']
delta = (time_stamp - last_time)
cate_id = row[1]['cate']
brand = row[1]... |
class Timer():
def __init__(self):
self.reset()
def reset(self):
self._start = perf_counter()
self._paused: Optional[float] = None
self._total_paused = 0
self._count_start = 1
def pause(self):
if (self._paused is not None):
raise ValueError('Trying... |
class TAAConv2d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, padding, num_past_frames, dk, dv, Nh, width, height, attention_input_mode, relative=True):
super(TAAConv2d, self).__init__()
self.in_channels = input_channels
self.out_channels = output_channels
... |
def binary_round(x):
g = tf.get_default_graph()
with ops.name_scope('BinaryRound') as name:
with g.gradient_override_map({'Round': 'Identity'}):
return tf.round(x, name=name) |
def skyline_input_provider(batch_size=16):
return (torch.randn((batch_size, 3, 224, 224)).cuda(), torch.randint(low=0, high=1000, size=(batch_size,)).cuda()) |
def mlp(inputs, layer_sizes, nonlinearity=tf.nn.relu, output_nonlinearity=tf.nn.tanh, W_initializer=None, b_initializer=None):
if (type(inputs) is tf.Tensor):
inputs = [inputs]
squeeze_output = False
if (layer_sizes[(- 1)] is None):
squeeze_output = True
layer_sizes = list(layer_size... |
def check_clusterings(labels_true, labels_pred):
labels_true = check_array(labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None)
labels_pred = check_array(labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None)
type_label = type_of_target(labels_true)
type_pred = type_of_target(labels_... |
class ArchSearchConfig():
def __init__(self, arch_init_type, arch_init_ratio, arch_opt_type, arch_lr, arch_opt_param, arch_weight_decay, target_hardware, ref_value):
self.arch_init_type = arch_init_type
self.arch_init_ratio = arch_init_ratio
self.opt_type = arch_opt_type
self.lr = ar... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.