code stringlengths 101 5.91M |
|---|
def match_sentences(con_tree_map, con_vit_ngrams, dep_sentences, split_name, debug_sentence=None):
con_to_dep_matches = {}
dep_ngram_map = build_ngrams(dep_sentences, DEP_PROCESS_FUNC, DEP_ID_FUNC)
unmatched = 0
bad_match = 0
for sentence in dep_sentences:
sentence_ngrams = extract_ngrams(se... |
class ComputeNormForBlobs(NetModifier):
def __init__(self, blobs, logging_frequency, p=2, compute_averaged_norm=False, row_index=None):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._p = p
self._compute_averaged_norm = compute_averaged_norm
self._field_... |
def _validate_output_list_for_rank(my_rank, dst, gather_list):
if (dst == my_rank):
if (not gather_list):
raise ValueError('Argument ``gather_list`` must be specified on destination rank.')
elif gather_list:
raise ValueError('Argument ``gather_list`` must NOT be specified on non-dest... |
class SurfaceDiceOverlap(DistanceMetric):
def __init__(self, tolerance: float=1, metric: str='SURFDICE'):
super().__init__(metric)
self.tolerance = tolerance
def calculate(self):
if (self.distances.surfel_areas_pred is None):
warnings.warn('Unable to compute surface Dice coef... |
def p_with_gil(s):
if (s.sy == 'with'):
s.next()
s.expect_keyword('gil')
return 1
else:
return 0 |
def simple_accuracy(preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_sklearn(simple_accuracy)
return (preds == labels).mean() |
def job_mmd_opt(p, data_source, tr, te, r):
data = (tr + te)
X = data.data()
with util.ContextTimer() as t:
pds = p.get_datasource()
datY = pds.sample(data.sample_size(), seed=(r + 294))
Y = datY.data()
XY = np.vstack((X, Y))
med = util.meddistance(XY, subsample=1000)... |
class NominalConversor():
def __init__(self, values):
self.values = set(values)
self.zero_value = values[0]
def __call__(self, value):
if (value not in self.values):
if (value == 0):
return self.zero_value
raise BadNominalValue(value)
retur... |
class Notation(AstNode):
NOTATION_SPECS = {'#djinni': NotationSpec(allowed={'enum'}, properties=[NotationSpecProperty(name='idl_name', type='string')]), '#protobuf': NotationSpec(allowed={'enum', 'struct'}, properties=[NotationSpecProperty(name='typename', type='string'), NotationSpecProperty(name='filename', type=... |
def test_meta_estimated_rewards_by_reg_model_inputs(synthetic_bandit_feedback: BanditFeedback) -> None:
kdr = KernelizedDoublyRobust(kernel='cosine', bandwidth=0.1)
ope_ = ContinuousOffPolicyEvaluation(bandit_feedback=synthetic_bandit_feedback, ope_estimators=[kdr])
action_by_evaluation_policy = np.zeros((s... |
('/benchmark_output/<filename:path>')
def serve_benchmark_output(filename):
response = static_file(filename, root=app.config['helm.outputpath'])
response.set_header('Cache-Control', 'no-cache, no-store, must-revalidate')
response.set_header('Expires', '0')
return response |
.parametrize('seed', [412])
.parametrize('batch_size', [2, 16])
.parametrize('grid_size', [2, 8])
.parametrize('feature_size', [4])
.parametrize('m, M', [((- 1), 1)])
def test_query_on_triline_forward_backward(seed, batch_size, grid_size, feature_size, m, M):
nn.clear_parameters()
ctx = get_extension_context('c... |
class MovieLens20M(DatasetLoader):
def __init__(self, data_dir):
self.fpath = os.path.join(data_dir, 'ratings.csv')
def load(self):
df = pd.read_csv(self.fpath, sep=',', names=['user', 'item', 'rate', 'time'], usecols=['user', 'item', 'time'], skiprows=1)
return df |
def convert_tf2_checkpoint_to_pytorch(tf_checkpoint_path, config_path, pytorch_dump_path):
logger.info(f'Loading model based on config from {config_path}...')
config = BertConfig.from_json_file(config_path)
model = BertModel(config)
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...')... |
def register_Ns3TimeProbe_methods(root_module, cls):
cls.add_constructor([param('ns3::TimeProbe const &', 'arg0')])
cls.add_constructor([])
cls.add_method('ConnectByObject', 'bool', [param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')], is_virtual=True)
cls.add_method('ConnectBy... |
class ImageFolder(DatasetFolder):
def __init__(self, root: str, transform: Optional[Callable]=None, target_transform: Optional[Callable]=None, loader: Callable[([str], Any)]=default_loader, is_valid_file: Optional[Callable[([str], bool)]]=None, class_num=10):
super(ImageFolder, self).__init__(root, loader, ... |
def proxify(log_arg_types=False, no_wrap_return=False):
def wrap(function):
def wrapped(*args, **kwargs):
self = args[0]
knowledge = UsageTraceNode.from_proxy(self)
nested_knowledge = knowledge.children[function.__name__]
if (len(args) > 1):
if... |
def get_inverse_square_root_decay(optimizer, num_warmup_steps=0, last_epoch=(- 1)):
def lr_lambda(current_step):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
elif (num_warmup_steps > 0):
return ((num_warmup_steps / curre... |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='datasets/FAS', help='YOUR_Data_Dir')
parser.add_argument('--result_path', type=str, default='./results', help='root result directory')
parser.add_argument('--protocol', type=str, default='O_C_I_to_M... |
def load_qas_(path):
print_message('#> Loading the reference QAs from', path)
triples = []
with open(path) as f:
for line in f:
qa = ujson.loads(line)
triples.append((qa['qid'], qa['question'], qa['answers']))
return triples |
def test_get_info_outlet_name_mapping_in_list():
with TestClient(app) as client:
begin = '2021-09-29'
end = '2021-09-30'
response = client.get(f'/{PREFIX}/info_by_date?begin={begin}&end={end}')
outlet_list = [item.get('_id') for item in response.json().get('sources')]
for out... |
def _g_div_gp(r, n, p, x, y, w):
t1 = ((r + 1) ** n)
t2 = ((r + 1) ** (n - 1))
return (((y + (t1 * x)) + (((p * (t1 - 1)) * ((r * w) + 1)) / r)) / (((((n * t2) * x) - (((p * (t1 - 1)) * ((r * w) + 1)) / (r ** 2))) + ((((n * p) * t2) * ((r * w) + 1)) / r)) + (((p * (t1 - 1)) * w) / r))) |
class BatchNorm2d(_BatchNorm):
def __init__(self, num_features: int, momentum: float=0.9, eps: float=1e-05):
super().__init__(num_features, (0, 2, 3), momentum, eps) |
class Module(Node):
def __init__(self, name: str, attrs: Attributes, funcs: List[Func], sub_modules: List['Module']) -> None:
super().__init__()
self.name = name
self.funcs = funcs
self.attrs = attrs
self.sub_module = sub_modules
for sub in sub_modules:
se... |
.skip(reason='need credential')
class TestCloudWatch(unittest.TestCase):
def setUp(self):
print('test cloud watch...')
log_name = 'chunkflow-test'
self.cloud_watch = CloudWatch(log_name)
def test_put_metric_data(self):
log = {'compute_device': 'X86-64', 'timer': {'cutout': 24, 'i... |
def _load_image_morethan_2_29(buffer, size):
MAX_PIXELS_PER_LOAD = ((1 << 29) - 1)
PIXELS_PER_LOAD = (1 << 26)
def do_load(buf, size):
rawmode = (((sys.byteorder == 'little') and 'BGRA') or 'ARGB')
buf = PIL.Image.frombuffer('RGBA', size, buf, 'raw', rawmode, 0, 1)
buf = (getattr(buf... |
class Token():
value: str
type_: TokenType
def variable(cls, value: str) -> 'Token':
return cls(value, TokenType.VARIABLE)
def string(cls, value: str) -> 'Token':
return cls(value, TokenType.STRING)
def pointer(cls, value: str) -> 'Token':
return cls(value, TokenType.POINTER)... |
def getConvection(convection):
if (convection == 'Standard'):
def Conv(rhs, u_hat, work, Tp, VTp, K, u_dealias):
u_dealias = VTp.backward(u_hat, u_dealias)
rhs = standard_convection(rhs, u_dealias, u_hat, work, Tp, K)
rhs[:] *= (- 1)
return rhs
elif (conve... |
class ModularIVAE(nn.Module):
def __init__(self, latent_dim, data_dim, aux_dim, prior=None, decoder=None, encoder=None, n_layers=3, hidden_dim=50, activation='lrelu', slope=0.1, device='cpu', anneal=False):
super().__init__()
self.data_dim = data_dim
self.latent_dim = latent_dim
self... |
.overload_attribute(BitMaskedType, '_cast')
def BitMaskedType_cast(builder):
def get_cast(builder):
if builder._lsb_order:
return np.array([np.uint8((1 << 0)), np.uint8((1 << 1)), np.uint8((1 << 2)), np.uint8((1 << 3)), np.uint8((1 << 4)), np.uint8((1 << 5)), np.uint8((1 << 6)), np.uint8((1 << 7... |
def ref_all_reduce(x_data_list, size, division):
f = (reduce((lambda x, y: (x + y)), np.arange(size)) + size)
results = []
for x_data in x_data_list:
result = (x_data * f)
if division:
result /= size
results.append(result)
return results |
class BioGptPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def _pad_sent(text_wd, sent_max_len):
pad_text_wd = []
for sent_wd in text_wd:
if (len(sent_wd) < sent_max_len):
pad_num = (sent_max_len - len(sent_wd))
sent_wd.extend(([WORD_PAD] * pad_num))
else:
sent_wd = sent_wd[:sent_max_len]
pad_text_wd.append(se... |
def direction_performance(pred, label):
pred = pred.cpu().detach().numpy()
label = label.cpu().detach().numpy()
pred = pred.tolist()
label = label.tolist()
angle = math.fabs(angle_difference(pred, label))
start = math.fabs(startpoint_difference(pred, label))
end = math.fabs(endpoint_differen... |
def _wrap_traced_layers(module: nn.Module, depth=1000, basic_blocks=(), allow_ModuleList_ModuleDict=True):
layers_dict = dict()
layers_to_patch = dict()
patched_layers_to_scope = dict()
for (sub_layer, scope, parent, terminal) in traverse_model(module, depth=depth, basic_blocks=basic_blocks, full=True):... |
class MyModule(torch.jit.ScriptModule):
def __init__(self):
super(MyModule, self).__init__()
self.mult = torch.nn.Parameter(torch.tensor([[1, 2, 3, 4, 5.0]]))
.script_method
def forward(self, x):
return self.mult.mm(x)
.script_method
def multi_input(self, x, y, z=2):
... |
def test_ByteMaskedArray_NumpyArray():
v1 = json.loads('{"class":"ByteMaskedArray","mask":"i8","content":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","parameters":{},"form_key":null},"valid_when":true,"parameters":{},"form_key":null}')
v2 = ak.forms.from_dict(v1).to_dic... |
class SummaryEncoder(BaseEstimator, util.TransformerWithTargetMixin):
encoding_relation = util.EncodingRelation.ONE_TO_M
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_missing='value', handle_unknown='value', quantiles=(0.25, 0.75), m=1.0):
self.return_df = return_... |
def copy_source(file, output_dir):
with tf.io.gfile.GFile(os.path.join(output_dir, os.path.basename(file)), mode='wb') as f:
with tf.io.gfile.GFile(file, mode='rb') as f0:
shutil.copyfileobj(f0, f) |
class SMPLJoint(enum.Enum):
ROOT = 0
PELVIS = 0
SPINE = 0
LHIP = 1
RHIP = 2
SPINE1 = 3
LKNEE = 4
RKNEE = 5
SPINE2 = 6
LANKLE = 7
RANKLE = 8
SPINE3 = 9
LFOOT = 10
RFOOT = 11
NECK = 12
LCLAVICLE = 13
RCLAVICLE = 14
HEAD = 15
LSHOULDER = 16
RS... |
class DMA_masked_select_reg(atomic_reg):
OP_NAME = 'DMA_masked_select'
_fields_ = [('intr_en', ctypes.c_uint64, 1), ('stride_enable', ctypes.c_uint64, 1), ('nchw_copy', ctypes.c_uint64, 1), ('cmd_short', ctypes.c_uint64, 1), ('reserved', ctypes.c_uint64, 1), ('reserved', ctypes.c_uint64, 4), ('reserved', ctypes... |
def mlp_network(fcs, use_lstm, inpt, masks, rnn_state, num_actions, lstm_unit, nenvs, step_size, scope):
(policy_rnn_state, value_rnn_state) = tf.split(rnn_state, 2, axis=(- 1))
inpt = layers.flatten(inpt)
input_dim = (inpt.get_shape().as_list()[1] + 1)
def initializer(scale):
return tf.random_n... |
class RunningMeanStd(object):
def __init__(self, epsilon=0.01, shape=()):
self._sum = tf.compat.v1.get_variable(dtype=tf.float64, shape=shape, initializer=tf.compat.v1.constant_initializer(0.0), name='runningsum', trainable=False)
self._sumsq = tf.compat.v1.get_variable(dtype=tf.float64, shape=shape... |
def resnetish34(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNetish:
return _resnetish('resnetish34', BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs) |
class CleansedLines(object):
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
... |
class CamembertModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def merge_named_payload(name_to_merge_op):
def merge(p1, p2):
p = {}
for (name, op) in name_to_merge_op.items():
p[name] = op(p1[name], p2[name])
return p
return merge |
def hist2d_plot(dataset, data_range, title, axis_name, file_path):
matplotlib.use('pdf')
import seaborn as sns
sns.set_theme(style='white', palette='viridis', font_scale=1.5)
plt.figure(figsize=(10, 8))
nbins = 1024
edges = np.linspace(data_range[0], data_range[1], (nbins + 1))
bin_area = np... |
def gather_last(batch_hidden_states, batch_lengths, bidirectional=True):
(seq_len, batch_size, hidden_x_dirs) = batch_hidden_states.size()
if bidirectional:
assert ((hidden_x_dirs % 2) == 0)
single_dir_hidden = int((hidden_x_dirs / 2))
else:
single_dir_hidden = int(hidden_x_dirs)
... |
def inverse_jacobi_f(kind, x, m):
from mpmath import mp as ctx
prec = ctx.prec
try:
x = ctx.convert(x)
m = ctx.convert(m)
if ((not isinstance(x, ctx.mpf)) or (not isinstance(x, ctx.mpf))):
raise ValueError('arguments must be real')
if (kind == 'sn'):
i... |
def random_feature(df: pd.DataFrame):
feature = np.random.normal(0, 1, size=len(df))
validate = 1
df['random_feature'] = feature
return (df, validate) |
class _BModelContext():
def __call__(self, bmodel_net: 'BModel'):
self.bmodel_net = bmodel_net
return self
def __enter__(self):
pass
def __exit__(self, *exc_info):
self.bmodel_net = None |
class RobertaConfig(BertConfig):
pretrained_config_archive_map = ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = 'roberta' |
def load_checkpoint(fpath):
if osp.isfile(fpath):
checkpoint = torch.load(fpath)
print("=> Loaded checkpoint '{}'".format(fpath))
return checkpoint
else:
raise ValueError("=> No checkpoint found at '{}'".format(fpath)) |
def get_toolkit_names(full, subset=None):
if (subset is None):
return full
return [name for name in full if (name in subset)] |
class ActionOnFqf(Action):
def __init__(self, orthogonal_grp, fqf, on_subquotient=False, is_left=False):
import operator
self._on_subquotient = on_subquotient
if is_left:
raise ValueError('the action is from the right')
Action.__init__(self, orthogonal_grp, fqf, is_left, ... |
def test_simple_sdfg_map():
(sdfg, state, t, me, mx) = create_sdfg()
nest_state_subgraph(sdfg, state, SubgraphView(state, [me, t, mx]))
sdfg.validate() |
class WebVideoCaptionDataset(BaseDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def _get_video(self, index):
max_retries = 3
for _ in range(max_retries):
ann = self.annotat... |
def clip_grad_value(params, clip_value=10):
clip_grad.clip_grad_value_(filter((lambda p: p.requires_grad), params), clip_value=clip_value) |
.benchmark(group='generator')
def test_benchmark_setup_generator_large(benchmark):
n_feat = 1024
n_edges = 100
batch_size = 10
num_samples = [20, 10]
G = example_Graph_2(n_feat, 5000, 20000)
nodes = list(G.nodes())
edges_to_sample = np.reshape(random.choices(nodes, k=(2 * n_edges)), (n_edges... |
def _print_indented_docs(lines, prefix: str, include_stringtags, out):
num_empty_lines = 0
for i in range(len(lines)):
if (len(lines[i].strip()) != 0):
break
num_empty_lines += 1
lines = lines[num_empty_lines:]
num_empty_lines = 0
for i in range((len(lines) - 1), 0, (- 1)... |
class Partition4(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[16]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[17]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[18]']
TENSORS = []
def __init__(self, ... |
def convert_fc_shapes(arg):
arg = arg.strip()
if (not arg):
return ()
arg = ast.literal_eval(arg)
if isinstance(arg, int):
return (arg,)
if isinstance(arg, tuple):
return arg
return tuple(arg) |
class TestGarageEnv():
def test_wraps_env_spec(self):
garage_env = GarageEnv(env_name='Pendulum-v0')
assert isinstance(garage_env.spec, EnvSpec)
def test_closes_box2d(self):
garage_env = GarageEnv(env_name='CarRacing-v0')
garage_env.render()
assert (garage_env.env.viewer ... |
def get_test_set(opt, spatial_transform, temporal_transform):
if (opt.dataset == 'VideoDecaptionData'):
test_data = VideoDecaptionData(opt.video_path, 'testing', 0, spatial_transform=spatial_transform, temporal_transform=temporal_transform, sample_duration=opt.sample_duration, opt=opt)
return test_data |
def test_offsets_to_raveled_neighbors_explicit_0():
image_shape = (100, 200, 3)
footprint = np.ones((3, 3, 3), dtype=bool)
center = (1, 1, 1)
offsets = _util._offsets_to_raveled_neighbors(image_shape, footprint, center)
desired = np.array([(- 600), (- 3), (- 1), 1, 3, 600, (- 603), (- 601), (- 599),... |
_start_docstrings('The bare VAN model outputting raw features without any specific head on top. Note, VAN does not have an embedding layer.', VAN_START_DOCSTRING)
class VanModel(VanPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.encoder = VanE... |
def set_template(args):
if (args.template == 'D2NET'):
args.task = 'VideoDeblur'
args.model = 'D2NET'
args.n_sequence = 3
args.n_frames_per_video = 200
args.n_feat = 32
args.n_resblock = 3
args.size_must_mode = 4
args.loss = '1*L1+2*HEM'
args.l... |
def symmetrized_coordinate_sums(dim, n):
from sage.structure.formal_sum import FormalSum
coordinates = [list(range(dim)) for i in range(n)]
table = defaultdict(list)
for i in product(*coordinates):
sort_i = tuple(sorted(i))
table[sort_i].append([1, tuple(i)])
return tuple(sorted((For... |
def get_extra_layer_scopes(last_layers_contain_logits_only=False):
if last_layers_contain_logits_only:
return [LOGITS_SCOPE_NAME]
else:
return [LOGITS_SCOPE_NAME, IMAGE_POOLING_SCOPE, ASPP_SCOPE, CONCAT_PROJECTION_SCOPE, DECODER_SCOPE, META_ARCHITECTURE_SCOPE] |
class ReshapeWrapper(RNNCell):
def __init__(self, cell, shape='flatten', apply_to='output'):
self._cell = cell
self._shape = shape
self._apply_to = apply_to
def state_size(self):
return self._cell.state_size
def output_size(self):
return self._cell.output_size
def... |
def remote_exec(model, execution_context):
pynq_ip = model.get_metadata_prop('pynq_ip')
pynq_port = int(model.get_metadata_prop('pynq_port'))
pynq_username = model.get_metadata_prop('pynq_username')
pynq_password = model.get_metadata_prop('pynq_password')
pynq_target_dir = model.get_metadata_prop('p... |
class HeckeOperator(HeckeAlgebraElement):
def __init__(self, parent, n):
HeckeAlgebraElement.__init__(self, parent)
if (not isinstance(n, (int, Integer))):
raise TypeError('n must be an int')
self.__n = int(n)
def _richcmp_(self, other, op):
if (not isinstance(other, ... |
class PostgresDemoDatabase(DemoDatabase):
def __init__(self, dbname: str, host: str, port: str, user: str, password: str) -> None:
self.dbname = dbname
self.host = host
self.port = port
self.user = user
self.password = password
self.conn: Optional[psycopg2.extensions.... |
def pile_transform(tokenizer, max_length, seed=None):
def transform(batch):
examples = tokenizer(batch['text'])
examples = {k: list(chain(*examples[k])) for k in examples.keys() if (k != 'attention_mask')}
total_length = len(examples[list(examples.keys())[0]])
if (total_length >= max... |
def get_run_time(opt_out: list):
opt_time = []
cumulative = 0
for i in range(len(opt_out['run_time'])):
cumulative += opt_out['run_time'][i]
opt_time.append(cumulative)
return opt_time |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('x_shape, s_shape', [((2, 4, 8, 8), (1, 1, 1, 1)), ((2, 4, 8, 8), (1, 4, 1, 1)), ((2, 8, 8, 4), (1, 1, 1, 4))])
.parametrize('round_mode', ['HALF_AWAY_FROM_ZERO', 'HALF_TO_EVEN'])
.parametrize('narrow_range', [False, True])
.parametrize('dtyp... |
def make_cuda_ext(name, module, sources, sources_cuda=None):
if (sources_cuda is None):
sources_cuda = []
define_macros = []
extra_compile_args = {'cxx': []}
if (torch.cuda.is_available() or (os.getenv('FORCE_CUDA', '0') == '1')):
define_macros += [('WITH_CUDA', None)]
extension ... |
class MeanVarNormalize(Rescale):
def __init__(self, bias=None, scale=None, normalize_bias=True, normalize_scale=True):
super().__init__(bias, scale, normalize_bias, normalize_scale)
def train(self, time_series: TimeSeries):
(bias, scale) = ({}, {})
for (name, var) in time_series.items():... |
.environment
class cuDNN():
cmake_minimum_version = None
cmake_packages = []
cmake_variables = {}
cmake_compile_flags = []
cmake_link_flags = []
cmake_files = []
state_fields = ['daceml::cudnn::CudnnHandle *cudnn_handle;']
dependencies = [CUDA]
headers = {'cuda': ['../include/daceml_... |
class Dataset():
def get_examples(self, split):
raise NotImplementedError
def get_size(self, split):
raise NotImplementedError |
def main(args):
cfg = setup(args)
if args.eval_only:
cfg.defrost()
cfg.MODEL.BACKBONE.PRETRAIN = False
model = Trainer.build_model(cfg)
Checkpointer(model).load(cfg.MODEL.WEIGHTS)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.res... |
def _nan_mask(a, out=None):
if (a.dtype.kind not in 'fc'):
return True
y = np.isnan(a, out=out)
y = np.invert(y, out=y)
return y |
def HMC(experiment: Experiment, nsteps: int=10, beta: float=1.0, nlog: int=1, nprint: int=1, x: Optional[torch.Tensor]=None, eps: Optional[float]=None, nleapfrog: Optional[int]=None) -> tuple[(torch.Tensor, BaseHistory)]:
history_hmc = BaseHistory()
if (x is None):
state = experiment.trainer.dynamics.ra... |
class _NLICls(nn.Module):
def __init__(self, in_dim, num_cls, hid_dim, dropout=0.1):
super(_NLICls, self).__init__()
self.fc = nn.Sequential(nn.Dropout(dropout), nn.Linear((in_dim * 4), hid_dim), nn.LeakyReLU(), nn.Dropout(dropout), nn.Linear(hid_dim, num_cls))
def forward(self, x1, x2):
... |
def sample_noise(batch_size):
if (args.mixing and (random.random() < 0.9)):
(gen_in11, gen_in12, gen_in21, gen_in22) = torch.randn(4, batch_size, code_size, device='cuda').chunk(4, 0)
gen_in1 = [gen_in11.squeeze(0), gen_in12.squeeze(0)]
gen_in2 = [gen_in21.squeeze(0), gen_in22.squeeze(0)]
... |
_level_function()
def std(x, weight=None, ddof=0, axis=None, *, keepdims=False, mask_identity=False, highlevel=True, behavior=None, attrs=None):
(yield (x, weight))
return _impl(x, weight, ddof, axis, keepdims, mask_identity, highlevel, behavior, attrs) |
def get_prefix_samples(root, folder_to_idx, extensions, shuffle=False):
samples = []
root = os.path.expanduser(root)
for folder_name in sorted(os.listdir(root)):
_dir = os.path.join(root, folder_name)
if (not os.path.isdir(_dir)):
continue
for (_, _, fns) in sorted(os.wal... |
def random_boxes(mean_box, stdev, N):
boxes = ((np.random.randn(N, 4) * stdev) + mean_box)
return boxes.astype(dtype=np.float32) |
class DecoderModel(PreTrainedModel):
def __init__(self, config, decoder_word_embeddings_weight, decoder_position_embeddings_weight):
super(DecoderModel, self).__init__(config)
self.config = config
self.max_target_length = config.max_target_embeddings
self.embeddings = DecoderEmbeddin... |
class Ensembler(BaseEstimator, ClassifierMixin):
def __init__(self, base_model):
self.base_model = base_model
self.lr = LogisticRegression(random_state=0, C=1.0, solver='lbfgs', multi_class='multinomial')
def fit(self, X, X_val, X_tst, verbose, **params):
self.X_val = X_val
C = p... |
def register_Ns3LteEnbCphySapProvider_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteEnbCphySapProvider const &', 'arg0')])
cls.add_method('AddUe', 'void', [param('uint16_t', 'rnti')], is_pure_virtual=True, is_virtual=True)
cls.add_method('GetReferenceSignalPower'... |
class AttentionLayer(layers.Layer):
def __init__(self, input_dim: int, num_nodes: int, attention_size: int, v_type: str='relu', bias: bool=True, **kwargs: Optional) -> None:
super().__init__(**kwargs)
self.w_omega = tf.Variable(tf.random.uniform([(num_nodes * input_dim), attention_size]))
se... |
def split_time(g, train_year=2016, val_year=2017):
np.random.seed(42)
year = list(np.array(g.ndata['year']))
indices = np.arange(g.num_nodes())
print(f'train year: {train_year}')
valid_indices = [i for i in indices if (g.ndata['label'][i] != (- 1))]
train_ids = [i for i in valid_indices if (year... |
class SignatureVisualizer():
def __init__(self, path_to_template, model_type, models_path):
self.path_to_template = path_to_template
if (model_type == 'GHUM'):
from util.ghum_util import GHUMHelper
ghum_helper = GHUMHelper(models_path)
self.mesh_template = ghum_he... |
def test_case52():
url = (brokerIp + '/ngsi-ld/v1/entities/')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata43), headers=headers)
print(r.content)
print(r.s... |
class OnnxExportTestCaseV2(TestCase):
def _onnx_export(self, test_name, name, model_name, feature, onnx_config_class_constructor, device='cpu', framework='pt'):
from transformers.onnx import export
model_class = FeaturesManager.get_model_class_for_feature(feature, framework=framework)
config... |
def test_changed_only():
lr = LogisticRegression(C=99)
expected = 'LogisticRegression(C=99)'
assert (lr.__repr__() == expected)
lr = LogisticRegression(C=99, class_weight=0.4, fit_intercept=False, tol=1234, verbose=True)
expected = '\nLogisticRegression(C=99, class_weight=0.4, fit_intercept=False, t... |
def main():
args = parse_args()
if (len(args.shape) == 1):
input_shape = (3, args.shape[0], args.shape[0])
elif (len(args.shape) == 2):
input_shape = ((3,) + tuple(args.shape))
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
cfg.model.pret... |
def save_video(video_array, video_save_path):
import cv2
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
output_movie = cv2.VideoWriter(video_save_path, fourcc, 10, (640, 360))
for frame in video_array:
output_movie.write(frame)
out.release()
cv2.destroyAllWindows() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.