code stringlengths 101 5.91M |
|---|
class BaseDetector(nn.Module, metaclass=ABCMeta):
def __init__(self):
super(BaseDetector, self).__init__()
self.fp16_enabled = False
def with_neck(self):
return (hasattr(self, 'neck') and (self.neck is not None))
def with_shared_head(self):
return (hasattr(self, 'roi_head') a... |
def tokenize(sent, vocab, depth=FLAGS.num_layers):
align = pow(2, (depth - 1))
token_ids = nlc_data.sentence_to_token_ids(sent, vocab, get_tokenizer(FLAGS))
ones = ([1] * len(token_ids))
pad = ((align - len(token_ids)) % align)
token_ids += ([nlc_data.PAD_ID] * pad)
ones += ([0] * pad)
sourc... |
class Evaluator(object):
def __init__(self, logger=None, **kwargs):
self.logger = logger
self.kwargs = kwargs
if ('operating_points_path' in kwargs):
self.rad_perf = pd.read_csv(kwargs['operating_points_path'])
else:
self.rad_perf = None
self.set_eval_... |
def augment(image_current, image_next):
brightness = np.random.uniform(0.5, 1.5)
img1 = ImageEnhance.Brightness(image_current).enhance(brightness)
img2 = ImageEnhance.Brightness(image_next).enhance(brightness)
color = np.random.uniform(0.5, 1.5)
img1 = ImageEnhance.Brightness(img1).enhance(color)
... |
def write_tfrecords(data, num_shards, output_dir, split_name, resize_max_side=0, check_bad_images=False):
if resize_max_side:
logging.warning('Resize max side images to {}'.format(resize_max_side))
tfrecord_writer = TFrecordWriter(n_samples=len(data), n_shards=num_shards, output_dir=output_dir, prefix=s... |
class ReductionData(SageObject):
def __init__(self, pari_result, P, Q, Pmin, Qmin, minimal_disc, local_data, conductor):
self.pari_result = pari_result
self.P = P
self.Q = Q
self.Pmin = Pmin
self.Qmin = Qmin
self.minimal_disc = minimal_disc
self.local_data = l... |
class BenchmarkSuites():
def __init__(self):
self._suites = []
for suite in benchmark_suites:
self._suites.append(suite())
def run(self):
for suite in self._suites:
suite.run()
def save(self, benchmark_dir='./'):
for suite in self._suites:
... |
class GPT2Tokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, errors='replace', unk_token='<|endoftext|>', bos_token='<|end... |
def pointing(gen_mapping, gt_mapping, type_ids=None):
pointings = []
count = 0
if type_ids:
type_ids = set([str(int(id.split('.')[0])) for id in type_ids])
for (id, (gen_before, gen_after)) in gen_mapping.items():
if (type_ids and (id not in type_ids)):
continue
(gt_b... |
def register_Ns3LteFrNoOpAlgorithm_methods(root_module, cls):
cls.add_constructor([param('ns3::LteFrNoOpAlgorithm const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetLteFfrRrcSapProvider', 'ns3::LteFfrRrcSapProvider *', [], is_virtual=True)
cls.add_method('GetLteFfrSapProvider', 'ns3::LteFfrS... |
def get_iterator(dataset, vocab_table, sos, eos, batch_size=8, num_parallel_calls=32, random_seed=42):
output_buffer_size = (batch_size * 1000)
sos_id = tf.cast(vocab_table.lookup(tf.constant(sos)), tf.int32)
eos_id = tf.cast(vocab_table.lookup(tf.constant(eos)), tf.int32)
dataset = dataset.shuffle(outp... |
def is_short_form(text, min_length=2):
accept_rgx = '[0-9A-Z-]{2,8}[s]*'
reject_rgx = '([0-9]+/[0-9]+|[0-9]+[-][0-7]+)'
keep = (re.search(accept_rgx, text) is not None)
keep &= (re.search(reject_rgx, text) is None)
keep &= (not text.strip('-').isdigit())
keep &= (',' not in text)
keep &= (le... |
def train_size_if_remove_in_otherset(data_sizes, mess_up_train):
counts_in_other = count_train_in_other_set(mess_up_train)
remain_sizes = []
for (direction, count) in counts_in_other.items():
remain_sizes.append((direction, (data_sizes[direction] - count), data_sizes[direction], count, ((100 * count... |
class ErnieForPreTraining(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def inner_loop(model, optim, imgs, poses, hwf, bound, num_samples, raybatch_size, inner_steps):
pixels = imgs.reshape((- 1), 3)
(rays_o, rays_d) = get_rays_shapenet(hwf, poses)
(rays_o, rays_d) = (rays_o.reshape((- 1), 3), rays_d.reshape((- 1), 3))
num_rays = rays_d.shape[0]
for step in range(inner_... |
class SLU(sb.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
(wavs, wav_lens) = batch.sig
(tokens_bos, tokens_bos_lens) = batch.tokens_bos
if (stage == sb.Stage.TRAIN):
if hasattr(self.hparams, 'env_corrupt'):
wavs_noise = se... |
def get_dataset_name(mode):
if (mode == 'ade20k'):
return 'Ade20kDataset'
if (mode == 'cityscapes'):
return 'CityscapesDataset'
else:
ValueError(('There is no such dataset regime as %s' % mode)) |
class RedisCache(BaseCache):
def __init__(self, conn):
self.conn = conn
def get(self, key):
return self.conn.get(key)
def set(self, key, value, expires=None):
if (not expires):
self.conn.set(key, value)
else:
expires = (expires - datetime.utcnow())
... |
def test_IndexedOptionArray():
content = ak.highlevel.Array([1.1, 2.2, 3.3, 4.4, 5.5]).layout
index = ak.index.Index64(np.array([4, 2, (- 1), (- 1), 1, 0, 1]))
array = ak.highlevel.Array(ak.contents.IndexedOptionArray(index, content))
assert (array.to_list() == [5.5, 3.3, None, None, 2.2, 1.1, 2.2])
... |
def test_deprecated_pickleable():
dep_hann2 = pickle.loads(pickle.dumps(dep_hann))
assert_((dep_hann2 is dep_hann)) |
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'src')
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))
source_cuda = glob.glob(os.path.join(extensions... |
class StorageTypeAssignable(StorageType):
def __init__(self, ty):
StorageType.__init__(self)
self.type = ty
def cheap_copies(self):
return True
def c_decl_type(self):
return self.type
def c_local_type(self):
return self.type |
def test_conventions():
data_names = list(KEYPOINTS_FACTORY.keys())
f = 10
n_person = 3
with pytest.raises(KeyError):
(keypoints_dst, mask) = convert_kps(np.zeros((f, 17, 3)), '1', '2')
with pytest.raises(AssertionError):
(keypoints_dst, mask) = convert_kps(np.zeros((17, 3)), 'coco',... |
def calc_stats(df, col):
stats = df.groupby(['method'])[col].agg(['mean', 'count', 'std'])
ci95_hi = []
ci95_lo = []
for i in stats.index:
(m, c, s) = stats.loc[i]
ci95_hi.append((m + ((1.96 * s) / math.sqrt(c))))
ci95_lo.append((m - ((1.96 * s) / math.sqrt(c))))
stats['ci95_... |
def validate(val_loader, model, criterion, epoch, writer):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
world_size = args.world_size
rank = args.rank
sync_bn_stat(model, world_size)
with torch.no_grad():
end = ti... |
def encode_mock(segment, x2, x3, x4, x5, x6, x7, glosses):
if (segment in glosses):
return (segment,)
else:
l = len(segment)
return (segment[:(l // 2)], segment[(l // 2):]) |
def ones(*sizes, torch_device=None, **kwargs):
if (torch_device is None):
torch_device = device
return torch.ones(*sizes, **kwargs, device=torch_device) |
class ErnieMPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def _check_output_format(output_format: str) -> ParsedTargetFormat:
result = ParsedTargetFormat()
target_tokens = split(output_format, JUMP)
remain_tokens = deepcopy(target_tokens)
(result, remain_tokens) = _figure_output_format_timezone(result, target_tokens, remain_tokens)
(result, remain_tokens) ... |
def test_logsumexp_shape():
a = np.ones((1, 2, 3, 4))
b = np.ones_like(a)
r = logsumexp(a, axis=2, b=b)
assert_equal(r.shape, (1, 2, 4))
r = logsumexp(a, axis=(1, 3), b=b)
assert_equal(r.shape, (1, 3)) |
def _get_version_and_resources(item):
assert ('version' in item), "'version' key should be present in zoo config {}".format(item._get_full_key(''))
assert ('resources' in item), "'resources' key should be present in zoo config {}".format(item._get_full_key(''))
return (item.version, item.resources) |
def load_model_ensemble(filenames, arg_overrides: Optional[Dict[(str, Any)]]=None, task=None, strict=True, suffix='', num_shards=1, state=None):
assert (not (strict and (num_shards > 1))), 'Cannot load state dict with strict=True and checkpoint shards > 1'
(ensemble, args, _task) = load_model_ensemble_and_task(... |
class PolynomialQuotientRingFactory(UniqueFactory):
def create_key(self, ring, polynomial, names=None):
if (not isinstance(ring, PolynomialRing_commutative)):
raise TypeError('ring must be a polynomial ring')
if (not isinstance(polynomial, polynomial_element.Polynomial)):
rai... |
class DifferentialPrecisionGeneric(SageObject):
def __init__(self, p, label):
self._p = p
self._label = label
self._elements = []
self._matrix = {}
self._collected_references = []
self._marked_for_deletion = []
self._approx_zero = pRational(p, ZZ(0))
s... |
def make_gcn_model():
return GCN(layer_sizes=[16, 16], activations=['relu', 'relu'], generator=fullbatch_generator, dropout=0.4) |
class OperationErrorContext(ErrorContext):
_width = (80 - 8)
def any_backend_is_delayed(self, iterable: Iterable, *, depth: int=1, depth_limit: int=2) -> bool:
from awkward._backends.dispatch import backend_of_obj
for obj in iterable:
backend = backend_of_obj(obj, default=None)
... |
def colored(msg, color=None, style=None):
colors = {'red': colorama.Fore.RED, 'green': colorama.Fore.GREEN, 'cyan': colorama.Fore.CYAN, 'yellow': colorama.Fore.YELLOW, 'magenta': colorama.Fore.MAGENTA, None: ''}
styles = {'bright': colorama.Style.BRIGHT, 'dim': colorama.Style.DIM, None: ''}
pre = (colors[co... |
class CartanType(cartan_type.CartanType_decorator, cartan_type.CartanType_crystallographic):
def __init__(self, type):
if (not type.is_crystallographic()):
raise NotImplementedError('only implemented for crystallographic Cartan types')
cartan_type.CartanType_decorator.__init__(self, type... |
def data_iterator_mnist(batch_size, train=True, rng=None, shuffle=True, with_memory_cache=False, with_file_cache=False):
return data_iterator(MnistDataSource(train=train, shuffle=shuffle, rng=rng), batch_size, rng, with_memory_cache, with_file_cache) |
def watershed_ift(input, markers, structure=None, output=None):
input = numpy.asarray(input)
if (input.dtype.type not in [numpy.uint8, numpy.uint16]):
raise TypeError('only 8 and 16 unsigned inputs are supported')
if (structure is None):
structure = morphology.generate_binary_structure(input... |
def Linear(in_features, out_features, dropout=0.0):
m = nn.Linear(in_features, out_features)
m.weight.data.normal_(mean=0, std=math.sqrt(((1 - dropout) / in_features)))
m.bias.data.zero_()
return m |
def write_pkl(content, path):
with open(path, 'wb') as f:
print(('Pickle is written on %s' % path))
try:
pickle.dump(content, f)
except OverflowError:
pickle.dump(content, f, protocol=4) |
def test_arraytype_2():
text = str(ak.with_parameter(ak.Array([[1, 2, 3], [], [4, 5]]), 'wonky', 'string').type)
parsedtype = ak.types.from_datashape(text, highlevel=True)
assert isinstance(parsedtype, ak.types.ArrayType)
assert (str(parsedtype) == text) |
def test_display_statistic(capsys, swagger_20, execution_context, operation, response):
success = models.Check('not_a_server_error', models.Status.success, response, 0, models.Case(operation))
failure = models.Check('not_a_server_error', models.Status.failure, response, 0, models.Case(operation))
single_tes... |
def _vggish_from_torch_hub(urls, *args, **kwargs):
kwargs['ckpt'] = {'vggish': _load_state_dict_from_url(urls['vggish']), 'pca': _load_state_dict_from_url(urls['pca'])}
return _UpstreamExpert(*args, **kwargs) |
def tolookup(layout, positions):
if isinstance(layout, ak.contents.EmptyArray):
return tolookup(layout.to_NumpyArray(np.dtype(np.float64)), positions)
elif isinstance(layout, ak.contents.NumpyArray):
if (len(layout.shape) == 1):
return NumpyLookup.tolookup(layout, positions)
... |
class ForceDictObservation(EnvironmentWrapper):
def __init__(self, env):
super().__init__(env)
self.env = env
self.time_limit = 300
def reset(self):
return self.env.reset(project=False)
def step(self, action):
return self.env.step(action, project=False) |
def get_layer(x, state, with_bn=False):
if (state.Layer_type == 'dense'):
if (with_bn is True):
actv_fn = state.Layer_attributes.pop('activation', 'linear')
x = Dense(**state.Layer_attributes)(x)
x = BatchNormalization()(x)
x = Activation(actv_fn)(x)
... |
.parametrize('device', ['cpu', 'cuda'])
.parametrize('unit', [0, 1, 2])
def test_compatibility(device, unit, L=5, B=2):
entropy = diffsptk.Entropy(unit)
U.check_compatibility(device, entropy, [], f'nrand -l {(B * L)} -d 0.5 | sopr -ABS', f'entropy -l {L} -o {unit} -f', [], dx=L)
U.check_differentiable(devic... |
def list_plot3d_array_of_arrays(v, interpolation_type, **kwds):
m = matrix(RDF, len(v), len(v[0]), v)
G = list_plot3d(m, interpolation_type, **kwds)
G._set_extra_kwds(kwds)
return G |
_module()
class GFL(SingleStageDetector):
'Implementation of `GFL <
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) |
class TUndirNet(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_snap.TUndirNet_swiginit(self, _snap.new_TUndirNet(*args))
def Save(self, SOut):
return _snap.TUndirNet_Sa... |
_connect.numpy.implements('nanargmax')
def _nep_18_impl_nanargmax(a, axis=None, out=UNSUPPORTED, *, keepdims=False):
return nanargmax(a, axis=axis, keepdims=keepdims) |
def jsonify(obj, outFile):
json.dump(obj, codecs.open(outFile, 'w', encoding='utf-8'), separators=(',', ':'), indent=4, sort_keys=True) |
def _get_triplet_mask(labels):
indices_equal = torch.eye(labels.size(0)).byte()
print(indices_equal)
if labels.is_cuda:
indices_equal = indices_equal.cuda()
indices_not_equal = (~ indices_equal)
i_not_equal_j = torch.unsqueeze(indices_not_equal, 2)
i_not_equal_k = torch.unsqueeze(indices... |
_model('xm_transformer')
class XMTransformerModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def add_args(cls, parser):
Wav2VecEncoderWithAdaptor.add_args(parser)
add_decoder_args(parser)
def build_encoder(cls, args):
... |
def test_paramset_constrained_custom_factors():
pset = paramsets.constrained_by_poisson(name='foo', is_scalar=False, n_parameters=5, inits=[0, 1, 2, 3, 4], bounds=[((- 1), 1), ((- 2), 2), ((- 3), 3), ((- 4), 4)], fixed=False, auxdata=[0, 0, 0, 0, 0], factors=[100, 400, 900, 1600, 2500])
assert (pset.suggested_i... |
def centropyd(x, y, base=2):
(x, y) = flatten(*to_np_array(x, y))
return (entropyd(zip(x, y), base) - entropyd(y, base)) |
class BotConfig(FixedKeyConfigDictionary):
_OPTIONAL_ATTRIBUTES = {'text_bot': True, 'bot_name': 'your Converse bot'}
def __init__(self, taskYamlFile: str):
self.bot_name = None
self.text_bot = None
dictionary = load_bot_config(taskYamlFile)
super().__init__(dictionary) |
.parametrize('seed', [313])
.parametrize('axis', [1, 3])
.parametrize('decay_rate', [0.9])
.parametrize('eps', [1e-05])
.parametrize('nonlinearity', ['relu'])
.parametrize('output_stat, batch_stat', [[False, True]])
.parametrize('add', [True, False])
.parametrize('ctx, func_name', ctxs)
.parametrize('no_scale, no_bias'... |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
if (os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and (not training_a... |
def remove_color_codes(s: str) -> str:
ansi_escape = re.compile('\\x1B(?:[-Z\\\\-_]|\\[[0-?]*[ -/]*[-~])')
return ansi_escape.sub('', s) |
class VAE(nn.Module):
def __init__(self, state_dim, action_dim, latent_dim, max_action, device):
super(VAE, self).__init__()
self.e1 = nn.Linear((state_dim + action_dim), 750)
self.e2 = nn.Linear(750, 750)
self.mean = nn.Linear(750, latent_dim)
self.log_std = nn.Linear(750, l... |
class ValueNetwork(nn.Module):
def __init__(self, state_shape, action_shape, hidden_size, v_min, v_max, num_atoms, device='cuda'):
super().__init__()
self.linear1 = nn.Linear((state_shape[0] + action_shape[0]), hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.line... |
class TestChannelBackpropStats(serial.SerializedTestCase):
(size=st.integers(7, 10), inputChannels=st.integers(1, 10), batchSize=st.integers(1, 3), **hu.gcs)
(deadline=10000)
def testChannelBackpropStats(self, size, inputChannels, batchSize, gc, dc):
op = core.CreateOperator('ChannelBackpropStats', ... |
class PerceiverTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = PerceiverTokenizer
test_rust_tokenizer = False
def setUp(self):
super().setUp()
tokenizer = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
_property
def perceiver_toke... |
def SaveMatlabSparseMtx_PNGraph(Graph, OutFNm):
return _snap.SaveMatlabSparseMtx_PNGraph(Graph, OutFNm) |
class SummarizationDataProcessingTest(unittest.TestCase):
def setUp(self):
self.block_size = 10
def test_fit_to_block_sequence_too_small(self):
sequence = [1, 2, 3, 4]
expected_output = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(sequence, self.block_size, 0),... |
def grid_search_gradient_boosting(config, num_files):
interval_multiplier = prediction_interval_multiplier[str(config['exp_params']['prediction_interval'])]
np.random.seed(config['logging_params']['manual_seed'])
saved_folder = os.path.join(config['logging_params']['save_dir'], config['logging_params']['nam... |
class NonLocalAttention(nn.Module):
def __init__(self, in_channels=256, inter_channels=None, bn_layer=True):
super(NonLocalAttention, self).__init__()
self.in_channels = in_channels
self.inter_channels = inter_channels
if (self.inter_channels is None):
self.inter_channels... |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('inshape, kernel, pad, stride, dilation, divisor', [((2, 2, 10, 10), (3, 2), (3, 0), (1, 2), (2, 1), 1), ((2, 3, 10, 10), (3, 2), (3, 0), (1, 2), (2, 1), 3), ((2, 4, 10, 10), (3, 2), (0, 0), (1, 1), (1, 1), 1), ((2, 6, 10, 10), (3, 2), (0, 0)... |
class PyPrint(gdb.Command):
def __init__(self):
gdb.Command.__init__(self, 'py-print', gdb.COMMAND_DATA, gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if (not frame):
print('Unable to locate python fram... |
def is_fpga_array(array: dt.Data):
return (isinstance(array, dt.Array) and (array.storage in _FPGA_STORAGE_TYPES)) |
class FDTD(Element):
def __init__(self, **kwargs):
Element.__init__(self, 'FDTD')
d = {}
for k in kwargs:
if (type(kwargs[k]) != str):
d[k] = str(kwargs[k])
else:
d[k] = kwargs[k]
self.attrib.update(d)
def __repr__(self):
... |
def module_inputs_torch_nn_CELU(module_info, device, dtype, requires_grad, **kwargs):
make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
return [ModuleInput(constructor_input=FunctionInput(alpha=2.0), forward_input=FunctionInput(make_input(shape=(3, 2, 5))), reference_fn=... |
def _neutralize(word):
if word.startswith(''):
return 'number'
if word.startswith(''):
return 'statement'
return word |
def threshold(image, footprint, out=None, mask=None, shift_x=False, shift_y=False, shift_z=False):
np_image = np.asanyarray(image)
if (np_image.ndim == 2):
return _apply_scalar_per_pixel(generic_cy._threshold, image, footprint, out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
elif (np_image.ndi... |
def random_rotate_image(image):
angle = np.random.normal(0.0, 8.0)
return misc.imrotate(image, angle, 'bicubic') |
def _topy(arr):
if (not isinstance(arr, list)):
return cppunparse.pyexpr2cpp(symbolic.symstr(arr, cpp_mode=True))
return [cppunparse.pyexpr2cpp(symbolic.symstr(d, cpp_mode=True)) for d in arr] |
class TransformerModelBase(FairseqEncoderDecoderModel):
def __init__(self, cfg, encoder, decoder):
super().__init__(encoder, decoder)
self.cfg = cfg
self.supports_align_args = True
def add_args(cls, parser):
gen_parser_from_dataclass(parser, TransformerConfig(), delete_default=Fa... |
_iterator
class LimitedStream(io.IOBase):
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
def is_exhausted(self):
return (self._pos >= self.limit)
... |
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if (part not in _valid_symbol_set):
return None
return ' '.join(parts) |
def _serialize_openapi3(definitions: DefinitionList) -> Generator[((Callable | None), None, None)]:
for definition in definitions:
name = definition['name']
if ('content' in definition):
options = iter(definition['content'].keys())
media_type = next(options, None)
... |
class _SequenceProcessor():
def __init__(self, tensor_schema: TensorSchema, query_id_column: str, item_id_column: str, grouped_interactions: PandasDataFrame, query_features: Optional[PandasDataFrame]=None, item_features: Optional[PandasDataFrame]=None) -> None:
self._tensor_schema = tensor_schema
se... |
def _griffin_lim(S):
angles = np.exp(((2j * np.pi) * np.random.rand(*S.shape)))
S_complex = np.abs(S).astype(np.complex)
y = _istft((S_complex * angles))
for i in range(hparams.griffin_lim_iters):
angles = np.exp((1j * np.angle(_stft(y))))
y = _istft((S_complex * angles))
return y |
class A000225(SloaneSequence):
def __init__(self):
SloaneSequence.__init__(self, offset=0)
def _repr_(self):
return '2^n - 1.'
def _eval(self, n):
return ZZ(((2 ** n) - 1)) |
('simple_gradient')
class GradientSaliency(SaliencyScorer):
def __init__(self, model):
self._embedding_layer = {}
super().__init__(model)
self.init_from_model()
def init_from_model(self):
logging.info('Initialising from Model .... ')
model = self._model['model']
_... |
def parse_win_mp_grid(f):
parse_line_list = (lambda line, delimiter, T: [T(y) for y in [x.strip() for x in line.strip().split(delimiter)] if y])
for line in f.readlines():
if ('mp_grid' in line):
return parse_line_list(line.split(':')[1], ' ', int) |
class NLayerDiscriminatorAsGen(NLayerDiscriminator):
def forward(self, x):
return super().forward(x)[0] |
def overwrite_variables(variables_to_copy, variables_to_overwrite):
sess = tf.get_default_session()
restores = []
assert (len(variables_to_copy) == len(variables_to_overwrite)), 'number of variables loaded mismatches len(variables)'
for (d, v) in zip(variables_to_copy, variables_to_overwrite):
r... |
def load(model_id: str, device: torch.device='cpu', freeze: bool=True, cache: str=DEFAULT_CACHE) -> Tuple[(nn.Module, Callable[([torch.Tensor], torch.Tensor)])]:
assert (model_id in MODEL_REGISTRY), f'Model ID `{model_id}` not valid, try one of {list(MODEL_REGISTRY.keys())}'
model_cache = (Path(cache) / model_... |
class HourglassNet(nn.Module):
def __init__(self, block, num_classes, num_stacks, num_blocks, depth=4):
super(HourglassNet, self).__init__()
bias = True
num_feats = 256
self.num_stacks = num_stacks
self.pre = nn.Sequential(nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, ... |
_inherit(core.Dataset)
class Dataset(core.Dataset):
def __init__(self, data_home=None):
super().__init__(data_home, name='dcase23_task2', clip_class=Clip, bibtex=BIBTEX, remotes=REMOTES, license_info=LICENSE_INFO)
_docs(load_audio)
def load_audio(self, *args, **kwargs):
return load_audio(*ar... |
def unroll_grid(input_dict: dict[(str, list)]) -> list[dict]:
return [dict(zip(input_dict.keys(), values)) for values in product(*input_dict.values())] |
def batchnorm(inputs):
return tf.layers.batch_normalization(inputs, axis=3, epsilon=1e-05, momentum=0.1, training=True, gamma_initializer=tf.random_normal_initializer(1.0, 0.02)) |
class CUDUDrp1mat(SpectralMatrix):
def assemble(self, method):
(test, trial) = (self.testfunction, self.trialfunction)
assert isinstance(test[0], UD)
assert isinstance(trial[0], UD)
k = np.arange((test[0].N - 1))
d = {0: ((((- 2) * (k + 1)) / ((2 * k) + 1)) + ((2 * (k + 1)) /... |
def max_size_tests():
dataset1 = ReplayPool(observation_shape=(4, 3), action_dim=1, max_steps=10, concat_observations=True, concat_length=4, rng=np.random.RandomState(42))
dataset2 = ReplayPool(observation_shape=(4, 3), action_dim=1, max_steps=1000, concat_observations=True, concat_length=4, rng=np.random.Rando... |
def get_panoptic_num_instances_per_class(nusc: NuScenes, sort_by: str='count_desc') -> Dict[(str, int)]:
sequence_wise_instances_per_class = dict()
for instance in nusc.instance:
instance_class = nusc.get('category', instance['category_token'])['name']
if (instance_class not in sequence_wise_ins... |
def make_data_loader(cfg):
train_transforms = build_transforms(cfg, is_train=True)
val_transforms = build_transforms(cfg, is_train=False)
num_workers = cfg.DATALOADER.NUM_WORKERS
if (len(cfg.DATASETS.NAMES) == 1):
dataset = init_dataset(cfg.DATASETS.NAMES, root=cfg.DATASETS.ROOT_DIR)
else:
... |
class MotionImitationEvaluator(Evaluator, ABC):
def __init__(self, dataset, data_dir):
super().__init__(dataset, data_dir)
self.paired_metrics_runner = None
self.unpaired_metrics_runner = None
def reset_dataset(self, dataset, data_dir):
super().__init__(dataset, data_dir)
def... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.