code stringlengths 101 5.91M |
|---|
def project_masks_on_boxes(segmentation_masks, proposals, resolution):
masks = []
(h, w) = resolution
device = proposals.bbox.device
proposals = proposals.convert('xyxy')
assert (segmentation_masks.size == proposals.size), '{}, {}'.format(segmentation_masks, proposals)
proposals = proposals.bbox... |
class MetricsTop():
def __init__(self, train_mode):
if (train_mode == 'regression'):
self.metrics_dict = {'MOSI': self.__eval_mosi_regression, 'MOSEI': self.__eval_mosei_regression, 'SIMS': self.__eval_sims_regression}
else:
self.metrics_dict = {'MOSI': self.__eval_mosi_class... |
_utils.test()
def test_static_grouped_ndrange():
val = ti.field(ti.i32)
n = 4
m = 8
ti.root.dense(ti.ij, (n, m)).place(val)
x0 = 2
y0 = 3
x1 = 1
y1 = 6
def test():
for I in ti.static(ti.grouped(ti.ndrange((x0, y0), (x1, y1)))):
val[I] = (I[0] + (I[1] * 2))
tes... |
class ResNetLW(nn.Module):
def __init__(self, block, layers, num_classes=21):
self.inplanes = 64
super(ResNetLW, self).__init__()
self.do = nn.Dropout(p=0.5)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
fo... |
def _setup_r_to_sage_converter():
from rpy2.rinterface import SexpVector, ListSexpVector, FloatSexpVector
from rpy2.robjects.conversion import Converter
cv = Converter('r to sage converter')
try:
rpy2py = cv.rpy2py
except AttributeError:
rpy2py = cv.ri2py
rpy2py.register(object, ... |
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
start_epoch = 0
max_epoch = config['schedular']['epochs']
warmu... |
def test(data, test_mask, neg_sampler, split_mode):
num_batches = math.ceil((len(data['sources'][test_mask]) / BATCH_SIZE))
perf_list = []
for batch_idx in tqdm(range(num_batches)):
start_idx = (batch_idx * BATCH_SIZE)
end_idx = min((start_idx + BATCH_SIZE), len(data['sources'][test_mask]))
... |
def getrgb(color):
color = color.lower()
rgb = colormap.get(color, None)
if rgb:
if isinstance(rgb, tuple):
return rgb
colormap[color] = rgb = getrgb(rgb)
return rgb
if re.match('#[a-f0-9]{3}$', color):
return (int((color[1] * 2), 16), int((color[2] * 2), 16),... |
def run_forward(unit_test_class, test_params):
device = test_params.device
inputs = set_python_tensors_requires_grad(move_python_tensors_to_device([arg_value for (_, arg_value) in test_params.arg_dict['input']], device))
inputs += move_python_tensors_to_device([arg_value for (_, arg_value) in test_params.ar... |
_task('multilingual_masked_lm')
class MultiLingualMaskedLMTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner')
parser.add_argument('--sample-br... |
def im_detect_bbox(model, images, target_scale, target_max_size, device):
transform = TT.Compose([T.Resize(target_scale, target_max_size), TT.ToTensor(), T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255)])
images = [transform(image) for image in images]
images = ... |
def threshold_otsu(image=None, nbins=256, *, hist=None):
if ((image is not None) and (image.ndim > 2) and (image.shape[(- 1)] in (3, 4))):
warn(f'threshold_otsu is expected to work correctly only for grayscale images; image shape {image.shape} looks like that of an RGB image.')
if (image is not None):
... |
def get_quantile_interval(data, nbins):
quantiles = get_uniform_interval(0, 1, nbins)
return list(data.quantile(quantiles)) |
def get_trainer_cls(args) -> Type[PipelineSupportedTrainerType]:
trainer_cls = AVAILABLE_TRAINERS.get(args.trainer['type'])
assert (trainer_cls is not None)
return trainer_cls |
_utils.test(require=ti.extension.assertion, debug=True, gdb_trigger=False)
def test_not_out_of_bound():
x = ti.field(ti.i32, shape=(8, 16))
def func():
x[(7, 15)] = 1
func() |
def full_eval(args=None):
if (args is None):
args = command_parser.parse_arguments()
create_shared_model = model_class(args.model)
init_agent = agent_class(args.agent_type)
args.phase = 'eval'
args.episode_type = 'TestValEpisode'
args.test_or_val = 'val'
start_time = time.time()
... |
class InternalMethodSlot(SlotDescriptor):
def __init__(self, slot_name, **kargs):
SlotDescriptor.__init__(self, slot_name, **kargs)
def slot_code(self, scope):
return scope.mangle_internal(self.slot_name) |
def ReflectedLightBarycentricCorrection(SolSystemTarget, JDUTC, loc, zmeas=0, HorizonsID_type='smallbody', ephemeris='de430', leap_dir=os.path.join(os.path.dirname(__file__), 'data'), leap_update=True, predictive=False):
(JDTDB, JDTT, warning, error) = utc_tdb.JDUTC_to_JDTDB(JDUTC)
try:
TargetObj1 = Hor... |
def test_get_workspace_model_overridepoi(workspace_factory):
w = workspace_factory()
m = w.model(poi_name='lumi')
assert (m.config.poi_name == 'lumi') |
def build_desc_graph(desc, file=None):
try:
if str(desc).endswith('.'):
desc = desc[0:(len(desc) - 1)]
desc = ' '.join(desc.split())
doc = NLP(desc)
g_features = []
dep_tree = defaultdict(list)
boundary_nodes = []
for sent in doc.sents:
... |
def test_emanet_head():
head = EMAHead(in_channels=4, ema_channels=3, channels=2, num_stages=3, num_bases=2, num_classes=19)
for param in head.ema_mid_conv.parameters():
assert (not param.requires_grad)
assert hasattr(head, 'ema_module')
inputs = [torch.randn(1, 4, 23, 23)]
if torch.cuda.is_... |
def capture_utterances(dialogue):
dialogue = dialogue.replace(':\n', ': ')
re_pattern = '(?<=:)(.*)'
utterances = re.findall(re_pattern, ('\n' + dialogue))
utterances = [u.strip() for u in utterances]
return utterances |
class SymplecticMatrixGroup_gap(SymplecticMatrixGroup_generic, NamedMatrixGroup_gap, FinitelyGeneratedMatrixGroup_gap):
_method
def invariant_form(self):
m = self.gap().InvariantBilinearForm()['matrix'].matrix()
m.set_immutable()
return m |
class sage_build_ext_minimal(build_ext):
def initialize_options(self):
build_ext.initialize_options(self)
self.parallel = self.get_default_number_build_jobs()
def get_default_number_build_jobs() -> int:
try:
cpu_count = len(os.sched_getaffinity(0))
except AttributeErr... |
def compute_score_with_logits(logits, labels):
logits = torch.max(logits, 1)[1].data
one_hots = torch.zeros(*labels.size()).to(logits.device)
one_hots.scatter_(1, logits.view((- 1), 1), 1)
scores = (one_hots * labels)
return (scores, logits) |
def read_annotation(annotation, base_index, stopwords, tokens, entities, postags, corefs, num_sen):
sentences = annotation['sentences']
for (i, sentence) in enumerate(sentences):
for entity in sentence['entitymentions']:
head_idx = (base_index[(i + num_sen)] + entity['tokenBegin'])
... |
def pytest_addoption(parser):
parser.addoption('--nnabla-ext', type=str, default='cpu', help='Extension path, e.g. "cpu", "cuda", "cudnn".')
parser.addoption('--nnabla-ext-type-config', type=str, default='float', help='Extension type-config, e.g. "float", "half".')
parser.addoption('--nnabla-ext-device-id',... |
def recall_batch(y_true: np.ndarray, y_pred: np.ndarray) -> float:
true_positives = K.sum(K.round((y_true * y_pred)))
all_positives = K.sum(y_true)
return (true_positives / (all_positives + K.epsilon())) |
class FieldsBuilder():
def __init__(self):
self.ptr = _snode_registry.create_root(impl.get_runtime().prog)
self.root = snode.SNode(self.ptr)
self.finalized = False
self.empty = True
impl.get_runtime().initialize_fields_builder(self)
def _finalized_roots(cls):
root... |
def read_rationales(path):
data = []
fopen = (gzip.open if path.endswith('.gz') else open)
with fopen(path) as fin:
for line in fin:
item = json.loads(line)
data.append(item)
return data |
def register_Ns3LteHexGridEnbTopologyHelper_methods(root_module, cls):
cls.add_constructor([param('ns3::LteHexGridEnbTopologyHelper const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.... |
class PieriFactors_type_B_affine(PieriFactors_affine_type):
def __init__(self, W):
Parent.__init__(self, category=FiniteEnumeratedSets())
self.W = W
_method
def maximal_elements_combinatorial(self):
n = self.W.n
rho = (self.W.from_reduced_word(range(2, (n - 1))) * self.W.from... |
def _collect_contrastive_inputs(feat, num_sample, dummy_inputs, selected_negative):
input_ids = []
token_type_ids = []
sample_mask = []
input_ids.append(feat.gt_input_ids)
token_type_ids.append(feat.gt_token_type_ids)
for idx in selected_negative:
input_ids.append(feat.candidate_input_id... |
class Smaller(AttributeFilter):
def __init__(self, attr: str, value: Any):
super().__init__(attr=attr, value=value, op=operator.lt)
def op_as_str(self):
return '<' |
.expansion
class ExpandStencilCPU(dace.library.ExpandTransformation):
environments = []
def expansion(node, parent_state, parent_sdfg):
sdfg = dace.SDFG((node.label + '_outer'))
state = sdfg.add_state((node.label + '_outer'))
(inputs, outputs, shape, field_to_data, field_to_desc, _, vect... |
('word_emb', 'glove')
class GloVe(Embedder):
def __init__(self, kind, lemmatize=False):
cache = os.path.join(os.environ.get('CACHE_DIR', os.getcwd()), '.vector_cache')
self.glove = torchtext.vocab.GloVe(name=kind, cache=cache)
self.dim = self.glove.dim
self.vectors = self.glove.vecto... |
def evaluation(args, models):
feature_extractor = create_feature_extractor(**args)
dataset = ImageLabelDataset(data_dir=args['testing_path'], resolution=args['image_size'], num_images=args['testing_number'], transform=make_transform(args['model_type'], args['image_size']))
if (('share_noise' in args) and ar... |
.parametrize('b0,b1', (some_cbases2 + some_lbases2))
def test_stencil(b0, b1):
N = 14
b0 = b0(N)
b1 = b1(N)
u = shenfun.TrialFunction(b1)
v = shenfun.TestFunction(b0)
B0 = inner(v, u, kind='vandermonde')
B1 = inner(v, u, kind='stencil')
C = (B0 - B1)
C.incorporate_scale()
assert ... |
def getConvection(convection):
if (convection in ('Standard', 'Divergence', 'Skewed')):
raise NotImplementedError
elif (convection == 'Vortex'):
def Conv(rhs, u_hat, work, Tp, VTp, K, u_dealias):
curl_dealias = work[(u_dealias[0], 0, False)]
curl_hat = work[(rhs[0], 0, Fa... |
def load_path(args, out_path, model_classes):
not_loaded = True
if out_path.is_file():
clusterings = torch.load(str(out_path))
clusterings = _load_clusterings(args, clusterings)
if (len((set(model_classes.keys()) - set(clusterings.keys()))) == 0):
print('loading from clusteri... |
def create_metadata_speechbrain_file(data_folder):
import pandas as pd
urban_sound_8k_metadata_csv_path = os.path.join(os.path.abspath(data_folder), 'metadata/UrbanSound8K.csv')
if (not os.path.exists(urban_sound_8k_metadata_csv_path)):
return None
urbansound_metadata_df = pd.read_csv(urban_soun... |
class FacesHQValidation(Dataset):
def __init__(self, size, keys=None, crop_size=None, coord=False):
d1 = CelebAHQValidation(size=size, keys=keys)
d2 = FFHQValidation(size=size, keys=keys)
self.data = ConcatDatasetWithIndex([d1, d2])
self.coord = coord
if (crop_size is not Non... |
def test_copy_with_new_structure_lattn(pretrain_file):
check_structure_test(pretrain_file, ['--pattn_num_layers', '1', '--lattn_d_proj', '0', '--hidden_size', '20', '--delta_embedding_dim', '10', '--pattn_d_model', '20', '--pattn_num_heads', '2'], ['--pattn_num_layers', '1', '--lattn_d_proj', '32', '--hidden_size',... |
def _get_attr_docstring(attr: ONNXAttribute) -> str:
param_doc = ':param {}: {}'.format(attr.name, attr.description)
if (attr.attribute_type is ONNXAttributeType.Unsupported):
return ''
if (attr.attribute_type is ONNXAttributeType.Tensor):
type_string = 'numpy.ndarray'
else:
type... |
def flatten_params(params):
return {'/'.join(k): v for (k, v) in traverse_util.flatten_dict(unfreeze(params)).items()} |
class BlenderbotSmallForCausalLM():
def __init__(self, *args, **kwargs):
requires_pytorch(self) |
class Normalize(object):
def __init__(self, bands_mean, bands_std):
self.bands_s1_mean = bands_mean['s1_mean']
self.bands_s1_std = bands_std['s1_std']
self.bands_s2_mean = bands_mean['s2_mean']
self.bands_s2_std = bands_std['s2_std']
self.bands_RGB_mean = bands_mean['s2_mean'... |
def test_retry_with_clean_cache(tmpdir):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
location = _get_local_path(openml_path, cache_directory)
os.makedirs(os.path.dirname(location))
with open(location, 'w'... |
def copy_func(tsk):
env = tsk.env
infile = tsk.inputs[0].abspath()
outfile = tsk.outputs[0].abspath()
try:
shutil.copy2(infile, outfile)
except EnvironmentError:
return 1
else:
if tsk.chmod:
os.chmod(outfile, tsk.chmod)
return 0 |
def test_forward(unit_test_class, test_params):
functional_variant_name = test_params.functional_variant_name
cpp_tmp_folder = test_params.cpp_tmp_folder
try_remove_folder(cpp_tmp_folder)
os.mkdir(cpp_tmp_folder)
python_output = run_forward(unit_test_class, test_params)
arg_dict_file_path = comp... |
class ConjugacyClassGAP(ConjugacyClass):
def __init__(self, group, element):
try:
self._gap_group = group.gap()
self._gap_representative = element.gap()
except (AttributeError, TypeError):
try:
self._gap_group = group._gap_()
self._... |
def batch_sample_from_distribution(X, distribution_args):
raise NotImplemented('Sampling from distribution in batch is not implemented.') |
def common_sign2map(a, var):
ret = {'varname': a, 'ctype': getctype(var)}
if isstringarray(var):
ret['ctype'] = 'char'
if (ret['ctype'] in c2capi_map):
ret['atype'] = c2capi_map[ret['ctype']]
if (ret['ctype'] in cformat_map):
ret['showvalueformat'] = ('%s' % cformat_map[ret['ctyp... |
def test_default_pickler():
assert (_pickle_complex_array_and_return_form_impl() == ak.forms.from_dict({'class': 'ListOffsetArray', 'offsets': 'i64', 'content': 'int64'})) |
def load_weights(model, optimizer):
if hyp.total_init:
print('TOTAL INIT')
print(hyp.total_init)
start_iter = load(hyp.total_init, model, optimizer)
if start_iter:
print(('loaded full model. resuming from iter %08d' % start_iter))
else:
print('could no... |
def fusion_re_re(**kwargs):
sq = squeezenet1_1(pretrained=True)
model = CreateNetFusion_re4(sq, stack=True)
return model |
class QUESST14Dataset(Dataset):
def __init__(self, split, **kwargs):
assert (split in ['dev', 'eval'])
dataset_root = Path(kwargs['quesst2014_root'])
doc_paths = get_audio_paths(dataset_root, 'language_key_utterances.lst')
query_paths = get_audio_paths(dataset_root, f'language_key_{s... |
class Bottleneck(nn.Module):
def __init__(self):
nf = 8
super().__init__()
self.block0 = nn.Sequential(make_conv((8 + nf4), nf3, 2))
def forward(self, x):
x = self.block0(x)
return x |
.experimental
def test_all_to_numeric_threshold(item_features):
processor = ToNumericFeatureTransformer(threshold=1)
processor.fit(item_features.filter((sf.col('class') != 'dog')))
transformed = processor.transform(item_features)
assert (('iq' in transformed.columns) and ('color' not in transformed.colu... |
class CommonTestCases():
class CommonTokenizerTester(unittest.TestCase):
tokenizer_class = None
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_tokenizer(self, **kwargs):
raise NotImp... |
def logger_fn(exp_name: str, label: str, save_data: bool=False, use_tb: bool=True, use_wb: bool=True, config: Optional[dict]=None, time_delta: float=15.0) -> Logger:
tb_path = os.path.join('./tblogs', exp_name)
return make_sail_logger(exp_name=exp_name, label=label, save_data=save_data, save_dir='./logs', use_t... |
def _loadarff(ofile):
try:
(rel, attr) = read_header(ofile)
except ValueError as e:
msg = ('Error while parsing header, error was: ' + str(e))
raise ParseArffError(msg) from e
hasstr = False
for a in attr:
if isinstance(a, StringAttribute):
hasstr = True
m... |
class NMFBrain(sb.core.Brain):
def compute_forward(self, batch, stage=sb.Stage.TRAIN):
batch = batch.to(self.device)
(wavs, lens) = batch.sig
X_stft = self.hparams.compute_stft(wavs)
X_stft_power = self.hparams.compute_stft_mag(X_stft)
X_stft_tf = torch.log1p(X_stft_power)
... |
def register_Ns3BuildingsObstaclePropagationLossModel_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=T... |
def test_solve_generalized_discrete_are():
mat = _load_data('gendare__data.npz')
cases = [(np.array([[0.276923, 0.8234578, 0.950222], [0., 0.6948286, 0.], [0., 0.3170995, 0.4387444]]), np.array([[0.3815585, 0.1868726], [0.7655168, 0.4897644], [0.7951999, 0.4455862]]), np.eye(3), np.eye(2), np.array([[0.646313, ... |
class Lambda(nn.Module):
def __init__(self, f):
super(Lambda, self).__init__()
self.f = f
def forward(self, x):
return self.f(x) |
def fpPlusInfinity(s):
_z3_assert(isinstance(s, FPSortRef), 'sort mismatch')
return FPNumRef(Z3_mk_fpa_inf(s.ctx_ref(), s.ast, False), s.ctx) |
_module()
class DetectoRS_ResNet(ResNet):
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, sac=None, stage_with_sac=(False, False, False, False), rfp_inplanes=None, output_img=False, pretrained=None, **kwargs):
self.s... |
class ResDisOptimizedBlock(torch.nn.Module):
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=torch.nn.functional.relu):
super().__init__()
self.activation = activation
self.c1 = torch.nn.Conv2d(in_channels, out_channels, ksize, padding=pad)
torch.nn.init.xavi... |
def render_token_classification(tokens, options, labels):
prefix = f'''With no explanation, label each line with {render_options(options)} preceded by ":".
'''
inputs = (prefix + '\n'.join(tokens))
targets = '\n'.join([':'.join(x) for x in zip(tokens, labels)])
return dict_of(inputs, targets) |
class MT5Model(T5Model):
model_type = 'mt5'
config_class = MT5Config
_keys_to_ignore_on_load_missing = ['encoder\\.embed_tokens\\.weight', 'decoder\\.embed_tokens\\.weight', 'decoder\\.block\\.0\\.layer\\.1\\.EncDecAttention\\.relative_attention_bias\\.weight']
_keys_to_ignore_on_save = ['encoder\\.embe... |
def same_shape(shape1, shape2):
if (len(shape1) != len(shape2)):
return False
for i in range(len(shape1)):
if (shape1[i] != shape2[i]):
return False
return True |
class NativeCodeGenerator(CodeGenerator):
def _default_finalize(value):
return value
def _output_const_repr(self, group):
return repr(u''.join([text_type(v) for v in group]))
def _output_child_to_const(self, node, frame, finalize):
const = node.as_const(frame.eval_ctx)
if (no... |
def mapLabels(tree, mappingDict):
if (mappingDict == None):
return
for st in tree.subtrees():
label = st.label()
if (not (label.lower() == 'edu')):
(relation, nuc) = getRelation(label)
if (not (relation in mappingDict)):
sys.exit(((('Unknow label: ... |
def add_args(parser):
parser.add_argument('-o', metavar='filename', action='store', dest='output_filename', default='out.vtk', help=helps['filename'])
parser.add_argument('-f', '--format', metavar='format', action='store', type=str, dest='format', default=None, help=helps['format'])
parser.add_argument('-a'... |
.parametrize('observation_shape', [(100,), ((100,), (200,))])
.parametrize('batch_size', [32])
def test_value_function(observation_shape: Shape, batch_size: int) -> None:
encoder = DummyEncoder(observation_shape)
v_func = ValueFunction(encoder, encoder.get_feature_size())
x = create_torch_observations(obser... |
def maybe_check_py_error(code, check_py_exception, pos, nogil):
if check_py_exception:
if nogil:
code.putln(code.error_goto_if('__Pyx_ErrOccurredWithGIL()', pos))
else:
code.putln(code.error_goto_if('PyErr_Occurred()', pos)) |
def reference_game_train(gen_func):
def generate_refgame_train(listener=False):
return reference_game(get_training_instances(listener=listener), gen_func, listener=listener)
return generate_refgame_train |
class SphericalBasisLayer(torch.nn.Module):
def __init__(self, num_spherical, num_radial, cutoff=5.0, envelope_exponent=5):
super(SphericalBasisLayer, self).__init__()
assert (num_radial <= 64)
self.num_spherical = num_spherical
self.num_radial = num_radial
self.cutoff = cuto... |
_utils.test(arch=ti.cpu)
def test_primitives():
x = ti.field(dtype=ti.i16)
y = ti.field(dtype=ti.f32)
z = ti.field(dtype=ti.f64)
p = ti.field(dtype=ti.f32)
q = ti.field(dtype=ti.f32)
r = ti.field(dtype=ti.f64)
n1 = ti.root.dense(ti.i, 32)
n1.place(x)
n2 = ti.root.dense(ti.i, 32)
... |
def bar():
with scorep.instrumenter.enable():
foo()
with scorep.instrumenter.disable():
foo() |
def trace_module(mod, inputs, optimize=None, check_trace=True, check_inputs=None, check_tolerance=1e-05, strict=True, _force_outplace=False, _module_class=None, _compilation_unit=_python_cu):
if (not _enabled):
return mod
if (optimize is not None):
warnings.warn('`optimize` is deprecated and has... |
class WeightedMinFill(BaseEliminationOrder):
def cost(self, node):
edges = combinations(self.moralized_model.neighbors(node), 2)
return sum([(self.bayesian_model.get_cardinality(edge[0]) * self.bayesian_model.get_cardinality(edge[1])) for edge in edges]) |
class EfficientNet(nn.Module):
def __init__(self, block_args, num_classes=10, num_features=1280, in_chans=3, stem_size=32, channel_multiplier=1.0, channel_divisor=8, channel_min=None, output_stride=32, pad_type='', fix_stem=False, act_layer=nn.ReLU, drop_rate=0.0, drop_path_rate=0.0, se_kwargs=None, norm_layer=nn.B... |
class InpaintingModel(BaseModel):
def __init__(self, config):
super(InpaintingModel, self).__init__('InpaintingModel', config)
generator = InpaintGenerator()
discriminator = Discriminator(in_channels=3, use_sigmoid=(config.GAN_LOSS != 'hinge'))
if (len(config.GPU) > 1):
g... |
class TubeMaskingGenerator():
def __init__(self, input_size, mask_ratio):
(self.frames, self.height, self.width) = input_size
self.num_patches_per_frame = (self.height * self.width)
self.total_patches = (self.frames * self.num_patches_per_frame)
self.num_masks_per_frame = int((mask_r... |
def soft_augment(candidate_data=None, num_mixup=None, hyper_alpha=8, score_limit_upper=500, score_limit_low=0):
global GUID_COUNT
print('Implementing soft mixup augmentation, which may take hundreds of seconds')
time_start = time.time()
new_sample_count = 0
mixup_data = []
mixup_label = []
c... |
class DisorderLabelingFunctions(object):
def __init__(self, data_root):
self.data_root = data_root
self.class_map = self.load_class_map()
def load_class_map(self):
sem_types = list(itertools.chain.from_iterable(load_sem_groups(f'{self.data_root}/SemGroups.txt', groupby='GUI').values()))
... |
def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format: bool):
if (not is_torch_available()):
raise Exception('Cannot convert because PyTorch is not installed. Please install torch first.')
import torch
from torch.onnx import export
print(f'Using framework PyTorch: {tor... |
.parametrize('observation_shape', [(100,)])
.parametrize('batch_size', [32])
.parametrize('eps', [32])
def test_standard_observation_scaler_with_transition_picker(observation_shape: Sequence[int], batch_size: int, eps: float) -> None:
shape = (batch_size, *observation_shape)
observations = np.random.random(shap... |
def split_multi_answer(ans, sep=';', close=True):
answers = ans.strip().split(sep)
split_answers = []
for a in answers:
a = a.strip()
if len(a):
if close:
if (a[(- 1)] != '.'):
split_answers.append((a + '.'))
else:
... |
_model
def tf_mixnet_s(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_s('tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs)
return model |
_model_architecture('transformer_lm', 'transformer_lm_gpt2_small')
def transformer_lm_gpt2_small(args):
args.decoder_embed_dim = safe_getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = safe_getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_layers = safe_getattr(args, 'decoder_laye... |
def preprocess_for_train(image, output_height, output_width, padding=_PADDING):
tf.image_summary('image', tf.expand_dims(image, 0))
image = tf.to_float(image)
if (padding > 0):
image = tf.pad(image, [[padding, padding], [padding, padding], [0, 0]])
distorted_image = tf.random_crop(image, [output... |
class Hidden2Gaussian(nn.Module):
def __init__(self, input_size, output_size, is_lstm=False, has_bias=True):
super(Hidden2Gaussian, self).__init__()
if is_lstm:
self.mu_h = nn.Linear(input_size, output_size, bias=has_bias)
self.logvar_h = nn.Linear(input_size, output_size, bi... |
class Net(object):
_net_names_used = set()
operator_registry_ = {}
def current_prefix():
from caffe2.python.net_builder import NetBuilder
builder = NetBuilder.current(required=False)
return (builder.name if builder else '')
def _get_next_net_name(basename):
name = basenam... |
class PieriFactors_type_D_affine(PieriFactors_affine_type):
def __init__(self, W):
Parent.__init__(self, category=FiniteEnumeratedSets())
self.W = W
_method
def maximal_elements_combinatorial(self):
n = self.W.n
rho = (self.W.from_reduced_word(range(2, n)) * self.W.from_reduc... |
class RecurrentCategorical(Distribution):
def __init__(self, dim):
self._cat = Categorical(dim)
self._dim = dim
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_... |
def annotate_example(example, table):
ann = {'table_id': example['table_id']}
ann['question'] = annotate(example['question'])
ann['table'] = {'header': [annotate(h) for h in table['header']]}
ann['query'] = sql = copy.deepcopy(example['sql'])
for c in ann['query']['conds']:
c[(- 1)] = annota... |
def echelon_QQ(n=100, min=0, max=9, system='sage'):
if (system == 'sage'):
A = random_matrix(ZZ, n, (2 * n), x=min, y=(max + 1)).change_ring(QQ)
t = cputime()
v = A.echelon_form()
return cputime(t)
elif (system == 'magma'):
code = ('\nn := %s;\nA := RMatrixSpace(RationalF... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.