code stringlengths 101 5.91M |
|---|
def plot_mysql_db(sql_engine: Engine):
db_name = sql_engine.url.database
version_sql = pd.read_sql('SELECT version();', sql_engine)
table_sql = pd.read_sql(("SELECT table_schema AS schemaname, table_name AS table_name, table_rows AS row_count FROM INFORMATION_SCHEMA.tables\n WHERE table_schema not in ('m... |
def add_checkpoint_args(parser):
group = parser.add_argument_group('Checkpointing')
group.add_argument('--save-dir', metavar='DIR', default='checkpoints', help='path to save checkpoints')
group.add_argument('--restore-file', default='checkpoint_last.pt', help='filename from which to load checkpoint (default... |
class TestAbs(test_util.TestCase):
def setUp(self):
self.test_configs = [(1, 1), (2, 3), (2, 3, 4), (2, 3, 4, 5)]
def testAbs(self):
for input_size in self.test_configs:
op = core.CreateOperator('Abs', ['X'], ['Y'])
X = np.random.rand(*input_size).astype(np.float32)
... |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
... |
class BiLSTM(nn.Module):
def __init__(self, rnn_layers, dropout, num_classes, text_hidden_dims, text_embed_size):
super(BiLSTM, self).__init__()
self.text_embed_size = text_embed_size
self.text_hidden_dims = text_hidden_dims
self.rnn_layers = rnn_layers
self.dropout = dropout... |
class Res2Net(nn.Module):
def __init__(self, block, layers, baseWidth=26, scale=4, num_classes=1000):
self.inplanes = 64
super(Res2Net, self).__init__()
self.baseWidth = baseWidth
self.scale = scale
self.conv1 = nn.Sequential(nn.Conv2d(3, 32, 3, 1, padding=(1 + 34), bias=Fals... |
class DataType(Enum):
FP32 = 0
FP16 = 1
INT8 = 2
UINT8 = 3
INT16 = 4
UINT16 = 5
INT32 = 6
UINT32 = 7
BF16 = 8
UNKNOWN = (- 1) |
class ThermalPhiSahaLTE(PhiSahaLTE):
outputs = ('thermal_phi_lte',)
latex_name = ('\\Phi^{*}(T_\\mathrm{e})',)
latex_formula = ('\\dfrac{2Z_{i,j+1}}{Z_{i,j}}\\big( \\\n \\dfrac{2\\pi m_{e}/\\beta_{\\textrm{electron}}}{h^2} \\\n \\big)^{3/2}e^{\\dfrac{-\\chi_{i,j}}{kT_... |
def build_dataset(cfg, default_args=None):
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif (cfg['type'] == 'RepeatDataset'):
dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args), cfg['times'])
else:
dataset... |
class LongT5PreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class ZeroShotClassificationArgumentHandler(ArgumentHandler):
def _parse_labels(self, labels):
if isinstance(labels, str):
labels = [label.strip() for label in labels.split(',')]
return labels
def __call__(self, sequences, labels, hypothesis_template):
if ((len(labels) == 0) ... |
class GenIndividuals(CreatableFromConfig):
def __init__(self, *args, **kwargs):
pass
def __iter__(self):
return self
def __next__(self):
return Individual()
def __call__(self):
while True:
(yield self.__next__()) |
class SchemeMorphism_polynomial_projective_subscheme_field(SchemeMorphism_polynomial_projective_space_field):
def __call__(self, x):
try:
reprs = self.representatives()
except NotImplementedError:
try:
return super().__call__(x)
except ValueError:
... |
def test_ByteMaskedArray_NumpyArray():
v2a = ak.contents.bytemaskedarray.ByteMaskedArray(ak.index.Index(np.array([1, 0, 1, 0, 1], np.int8)), ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=True)
resultv2 = v2a[np.array([0, 1, 2], np.int64)]
assert (to_list(resultv2) =... |
def train_input_fn():
return train_utils.get_input_fn(vocab, data_config, train_filenames, hparams.batch_size, num_epochs=hparams.num_train_epochs, shuffle=True, embedding_files=embedding_files, shuffle_buffer_multiplier=hparams.shuffle_buffer_multiplier) |
class QuantumGroupRepresentation(CombinatorialFreeModule):
def __classcall__(cls, R, C, q=None):
if (q is None):
q = R.gen()
return super().__classcall__(cls, R, C, q)
def __init__(self, R, C, q):
self._q = q
self._d = C.cartan_type().symmetrizer()
cat = Quant... |
class CifLSTMCell(BaseCell):
def __call__(self, inputs, state, scope=None):
if self.recur_diag_bilin:
(inputs1, inputs2) = tf.split(1, 2, inputs)
inputs = tf.concat(1, [(inputs1 * inputs2), inputs1, inputs2])
with tf.variable_scope((scope or type(self).__name__)):
... |
class Softmax(nn.Module):
def __init__(self, **options):
super(Softmax, self).__init__()
self.temp = options['temp']
self.label_smoothing = options['label_smoothing']
def forward(self, x, y, labels=None):
logits = y
if (labels is None):
return (logits, 0)
... |
def save_obj(obj, name):
with open((('results/' + name) + '.pkl'), 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) |
class Othello(core.Env):
def __init__(self):
super().__init__()
def _init(self, key: PRNGKey) -> State:
return _init(key)
def _step(self, state: core.State, action: Array, key) -> State:
del key
assert isinstance(state, State)
return _step(state, action)
def _obse... |
def _encode_idna(domain):
if (not isinstance(domain, text_type)):
domain.decode('ascii')
return domain
try:
return domain.encode('ascii')
except UnicodeError:
pass
parts = domain.split('.')
for (idx, part) in enumerate(parts):
parts[idx] = part.encode('idna')
... |
def _get_custom_platforms(arch):
(arch_prefix, arch_sep, arch_suffix) = arch.partition('_')
if arch.startswith('macosx'):
arches = _mac_platforms(arch)
elif (arch_prefix in ['manylinux2014', 'manylinux2010']):
arches = _custom_manylinux_platforms(arch)
else:
arches = [arch]
r... |
def batch_norm(x, is_training=True, scope='batch_norm'):
return tf_contrib.layers.batch_norm(x, decay=0.9, epsilon=1e-05, center=True, scale=True, updates_collections=None, is_training=is_training, scope=scope) |
def concat_all_gather(input):
bs_int = input.shape[0]
size_list = comm.all_gather(bs_int)
max_size = max(size_list)
max_shape = ((max_size,) + input.shape[1:])
padded_input = input.new_zeros(max_shape)
padded_input[:bs_int] = input
all_inputs = differentiable_all_gather(padded_input)
inp... |
class CacheController(object):
def __init__(self, cache=None, cache_etags=True, serializer=None, status_codes=None):
self.cache = (cache or DictCache())
self.cache_etags = cache_etags
self.serializer = (serializer or Serializer())
self.cacheable_status_codes = (status_codes or (200, ... |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('experiment_group')
parser.add_argument('--eval_name', default='*', required=False)
args = parser.parse_args()
experiment_group = args.experiment_group
eval_name = args.eval_name
print(eval_name)
workspace_path = os.envir... |
.xfail
def test_fetch():
try:
datasets1 = fetch(shuffle=True, random_state=42)
except IOError:
raise SkipTest('Zenodo dataset can not be loaded.')
datasets2 = fetch(shuffle=True, random_state=37)
for k in DATASET_SHAPE.keys():
(X1, X2) = (datasets1[k].data, datasets2[k].data)
... |
class NNrefine(nn.Module):
def __init__(self):
super(NNrefine, self).__init__()
self.linear0 = nn.Sequential(nn.ReLU(inplace=True), nn.Conv2d(256, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)))
self.linear1 = nn.Sequential(nn.ReLU(inplace=True), nn.Conv2d(128, 64, kernel_size=(3, 3)... |
def left_pain_multiple_anatomy(c):
left_window = get_left_tokens(c, 5)
b = list_contains_anatomy_mention(left_window)
b &= (c.pain.char_end < c.anatomy.char_start)
return (True if b else False) |
def create_model(env, agent_name, use_pretrained_weights=False, **kwargs):
(model, _) = learn(network=kwargs['NETWORK_TYPE'], env=env, total_timesteps=1, save_interval=0, nsteps=kwargs['BATCH_SIZE'], nminibatches=kwargs['MINIBATCHES'], noptepochs=kwargs['STEPS_PER_UPDATE'], scope=agent_name, network_kwargs=kwargs)
... |
def init_normc_(weight, gain=1.0):
weight.normal_(0, 1)
weight *= (gain / torch.sqrt(weight.pow(2).sum(1, keepdim=True))) |
def tti_kernel(model, u1, u2, fw=True, q=None):
(m, damp, irho) = (model.m, model.damp, model.irho)
wmr = (irho * m)
q = (q or (0, 0))
(u1_n, u2_n) = ((u1.forward, u2.forward) if fw else (u1.backward, u2.backward))
(udt1, udt2) = ((u1.dt, u2.dt) if fw else (u1.dt.T, u2.dt.T))
(H0, H1) = sa_tti(u... |
def test_minmaximum_filter1d():
in_ = numpy.arange(10)
out = ndimage.minimum_filter1d(in_, 1)
assert_equal(in_, out)
out = ndimage.maximum_filter1d(in_, 1)
assert_equal(in_, out)
out = ndimage.minimum_filter1d(in_, 5, mode='reflect')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out ... |
class EngineBase():
def __init__(self, config: Optional[Config]=None):
if (config is None):
config = get_global_config(auto_create=True)
self.config = config
self.epoch = 0
self.global_train_step = None
self.pretrain = None
self.model_filename = None
... |
class TextRole(ColumnRole):
_name = 'Text'
def __init__(self, dtype: Dtype=str, force_input: bool=True):
self.dtype = dtype
self.force_input = force_input |
class CopaProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, 'train.jsonl'), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, 'val.jsonl'), 'dev')
def get_test_examples(self, ... |
.parametrize('X, y', [(X, y), (sparse.csr_matrix(X), y), (sparse.csc_matrix(X), y)])
def test_function_sampler_func_kwargs(X, y):
def func(X, y, sampling_strategy, random_state):
rus = RandomUnderSampler(sampling_strategy=sampling_strategy, random_state=random_state)
return rus.fit_resample(X, y)
... |
class Accuracy(ConfusionMatrixMetric):
def __init__(self, metric: str='ACURCY'):
super().__init__(metric)
def calculate(self):
sum_ = (((self.confusion_matrix.tp + self.confusion_matrix.tn) + self.confusion_matrix.fp) + self.confusion_matrix.fn)
if (sum_ != 0):
return ((self.... |
class NoisySGD(NoisyMechanism):
def __init__(self, noise_scale: float, dataset_size: int, batch_size: int, epochs: int, max_grad_norm: float):
super().__init__(noise_scale)
self.name = 'NoisySGD'
self.params = {'noise_scale': noise_scale, 'dataset_size': dataset_size, 'batch_size': batch_siz... |
def get_data_loaders(cfg, args):
tr_dataset = Dummy(cfg.train)
train_loader = data.DataLoader(dataset=tr_dataset, batch_size=1, shuffle=False, num_workers=0, drop_last=False)
te_dataset = Dummy(cfg.val)
test_loader = data.DataLoader(dataset=te_dataset, batch_size=1, shuffle=False, num_workers=0, drop_la... |
def create_peeling_paint_metal_node_group(node_tree: bpy.types.NodeTree) -> bpy.types.Node:
peeling_paint_metal_node_group: bpy.types.NodeGroup
if ('Peeling Paint Metal' in bpy.data.node_groups):
peeling_paint_metal_node_group = bpy.data.node_groups['Peeling Paint Metal']
else:
peeling_paint... |
class Function_Fresnel_cos(BuiltinFunction):
def __init__(self):
BuiltinFunction.__init__(self, 'fresnel_cos', nargs=1, latex_name='\\operatorname{C}', conversions=dict(maxima='fresnel_c', sympy='fresnelc', mathematica='FresnelC', maple='FresnelC', fricas='fresnelC'))
def _eval_(self, x):
if isi... |
def create_wave(amplitude: ti.f32, x: ti.f32, y: ti.f32):
for (i, j) in ti.ndrange((1, (shape[0] - 1)), (1, (shape[1] - 1))):
r2 = (((i - x) ** 2) + ((j - y) ** 2))
height[(i, j)] = (height[(i, j)] + (amplitude * ti.exp(((- 0.02) * r2)))) |
def test_point_f1_score(expected, observed):
expected_return = float((1 / 4))
returned = point_f1_score(expected, observed)
assert (returned == expected_return) |
def compute_isogeny_kernel_polynomial(E1, E2, ell, algorithm=None):
if (algorithm == 'starks'):
from sage.misc.superseded import deprecation
deprecation(34871, 'The "starks" algorithm is being renamed to "stark".')
algorithm = 'stark'
if (algorithm is None):
char = E1.base_ring()... |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
def test_tan_forward_backward(seed, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
inputs = [(np.clip(rng.randn(2, 3, 4).astype(np.float32), ((- np.pi) / 2), (np.pi / 2)) * 0.1)]
function_tes... |
_model
def convformer_s36_384(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['convformer_s36_384']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=mo... |
.operations('create_user', 'get_user', 'update_user')
def test_add_link_nothing_is_provided(schema_url):
schema = schemathesis.from_uri(schema_url)
with pytest.raises(ValueError, match='You need to provide `parameters` or `request_body`.'):
schema.add_link(source=schema['/users/']['POST'], target='#/pat... |
.parametrize('BinarySearchTree', KD_TREE_CLASSES)
def test_array_object_type(BinarySearchTree):
X = np.array([(1, 2, 3), (2, 5), (5, 5, 1, 2)], dtype=object)
with pytest.raises(ValueError, match='setting an array element with a sequence'):
BinarySearchTree(X) |
class RobotPose():
def __init__(self):
self.x = 0.0
self.y = 0.0
self.rot = 0.0
def setPose(self, x, y, rot):
self.x = x
self.y = y
self.rot = rot
return (x, y, rot)
def convert2grid(self, scale=0.2):
(x, y) = (round((self.x / scale)), (- round... |
def train_step():
model.train()
model.zero_grad()
(data, label, op) = rules(args.batch_size, args.seq_len, args.gt_rules, 2, args.search_version, args.data_seed)
data = torch.Tensor(data).to(device)
label = torch.Tensor(label).to(device)
op = torch.Tensor(op).to(device)
(out, score) = model(... |
_model
def edgevit_s(pretrained=True, **kwargs):
model = EdgeVit(depth=[1, 2, 5, 3], embed_dim=[48, 96, 240, 384], head_dim=48, mlp_ratio=([4] * 4), qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), sr_ratios=[4, 2, 2, 1], **kwargs)
model.default_cfg = _cfg()
return model |
def split_train_file(treebank, train_input_conllu, train_output_conllu, dev_output_conllu):
random.seed(1234)
sents = read_sentences_from_conllu(train_input_conllu)
random.shuffle(sents)
n_dev = int((len(sents) * XV_RATIO))
assert (n_dev >= 1), 'Dev sentence number less than one.'
n_train = (len... |
def make_eval_env(all_args, run_dir):
def get_env_fn(rank):
def init_env():
if (all_args.env_name == 'Overcooked'):
if (all_args.overcooked_version == 'old'):
env = Overcooked(all_args, run_dir)
else:
env = Overcooked_new(al... |
def test_capture_hypothesis_output():
with utils.capture_hypothesis_output() as hypothesis_output:
value = 'Some text'
report(value)
report(value)
assert (hypothesis_output == [value, value]) |
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super(_PointnetSAModuleBase, self).__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(self, xyz: torch.Tensor, features: Optional[torch.Tensor]) -> Tuple[(torch.Tensor, torch.Tensor)]:
... |
def get_default_endpoints():
endpoints_file = cached_file('huggingface-tools/default-endpoints', 'default_endpoints.json', repo_type='dataset')
with open(endpoints_file, 'r', encoding='utf-8') as f:
endpoints = json.load(f)
return endpoints |
def knn_gather_by_indexing(som_node, som_node_knn_I):
B = som_node.size()[0]
C = som_node.size()[1]
N = som_node.size()[2]
K = som_node_knn_I.size()[2]
som_node_knn_I = som_node_knn_I.unsqueeze(1).expand(B, C, N, K).contiguous().view(B, C, (N * K))
som_node_neighbors = torch.gather(som_node, dim... |
class Document(object):
def __init__(self, identifier, sentences, coref):
self.identifier = identifier
self.in_sentence_ids = []
self.sentence_spans = []
self.tokens = []
self.pos = []
self.ner = []
self.parse = []
self.dep = []
self.speakers =... |
class UnexpectedToken(ParseError, UnexpectedInput):
def __init__(self, token, expected, considered_rules=None, state=None, interactive_parser=None, terminals_by_name=None, token_history=None):
super().__init__()
self.line = getattr(token, 'line', '?')
self.column = getattr(token, 'column', '... |
class TestIterationBasedBatchSampler(unittest.TestCase):
def test_number_of_iters_and_elements(self):
for batch_size in [2, 3, 4]:
for num_iterations in [4, 10, 20]:
for drop_last in [False, True]:
dataset = [i for i in range(10)]
sampler =... |
class MobileNetV2PreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def findSVOs(parsed_sent, sent, is_gold):
global matched_events, matched_events_same_ix
global matched_args, matched_args_same_ix
verbs = [tok for tok in parsed_sent if ((tok.pos_ == 'VERB') and (tok.dep_ != 'aux'))]
for v in verbs:
(subs, pass_subs) = getAllSubs(v)
(v, objs) = getAllObj... |
def transform_targets(targets):
ret = []
for target in targets:
if (target == 'Atheism'):
ret.append('#atheism')
elif (target == 'Climate Change is a Real Concern'):
ret.append('#climatechange')
elif (target == 'Feminist Movement'):
ret.append('#femini... |
.parametrize('action_dist, estimated_rewards_by_reg_model, description', valid_input_of_create_estimator_inputs)
def test_meta_create_estimator_inputs_using_valid_input_data(action_dist, estimated_rewards_by_reg_model, description: str, synthetic_multi_bandit_feedback: BanditFeedback) -> None:
ope_ = MultiLoggersOf... |
def create_transformations(obj: optplan.Function, monitors: List[optplan.Monitor], sim_space: optplan.SimulationSpaceBase, cont_iters: int, num_stages: int=3, min_feature: float=100) -> List[optplan.Transformation]:
trans_list = []
param = optplan.CubicParametrization(undersample=((3.5 * min_feature) / GRID_SPA... |
def get_snorkel_label(train_dialogs, eval_dialogs, test_dialogs):
func_dialogs = filter_function_dialog(train_dialogs)
train_data = pd.DataFrame(func_dialogs, columns=['text'])
lfs = [lf_why_keyword, lf_what_keyword, lf_where_keyword, lf_when_keyword, lf_confirm_keyword]
applier = PandasLFApplier(lfs)
... |
.parametrize('is_spark, sort_col', [pytest.param(False, None, marks=pytest.mark.core), pytest.param(False, 'timestamp', marks=pytest.mark.core), pytest.param(True, None, marks=pytest.mark.spark), pytest.param(True, 'timestamp', marks=pytest.mark.spark)])
def test_groupby_sequences_pandas(pandas_interactions, is_spark, ... |
class AlignmentModel(nn.Module):
def __init__(self, phrase_embedder, token_embedder, max_words, node_filter, top_k=5, dropout=0.3, ablate_text=False, ablate_attrs=False, use_neighbors=False, use_tags=False, neighbor_rels=['above', 'left'], max_neighbors=1):
super(AlignmentModel, self).__init__()
sel... |
def resnest50(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], radix=2, groups=1, bottleneck_width=64, deep_stem=True, stem_width=32, avg_down=True, avd=True, avd_first=False, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_ur... |
_dispatch
def irfft2(x, s=None, axes=((- 2), (- 1)), norm=None, overwrite_x=False, workers=None):
return (Dispatchable(x, np.ndarray),) |
def load_yaml(stream: (((str | bytes) | TextIO) | BinaryIO)) -> Any:
import yaml
return yaml.load(stream, get_yaml_loader()) |
class MapillaryVistas(Dataset):
CLASSES = ['Bird', 'Ground Animal', 'Curb', 'Fence', 'Guard Rail', 'Barrier', 'Wall', 'Bike Lane', 'Crosswalk - Plain', 'Curb Cut', 'Parking', 'Pedestrian Area', 'Rail Track', 'Road', 'Service Lane', 'Sidewalk', 'Bridge', 'Building', 'Tunnel', 'Person', 'Bicyclist', 'Motorcyclist', '... |
def lev_dist(first, second):
if (len(first) > len(second)):
(first, second) = (second, first)
if (len(second) == 0):
return len(first)
first_length = (len(first) + 1)
second_length = (len(second) + 1)
distance_matrix = [([0] * second_length) for x in range(first_length)]
for i in... |
def __get_default(parameter: str, default):
if ((__default_config is not None) and (parameter in __default_config)):
return __default_config[parameter]
return default |
def FolkmanGraph():
from sage.graphs.generators.families import LCFGraph
g = LCFGraph(20, [5, (- 7), (- 7), 5], 5)
g.name('Folkman Graph')
return g |
def to_music21(music: 'Music') -> Score:
score = Score()
if music.metadata:
score.append(to_music21_metadata(music.metadata))
for track in music.tracks:
part = Part()
part.partName = track.name
for tempo in music.tempos:
part.append(to_music21_metronome(tempo))
... |
(Output('data-explanation-state', 'data'), [Input('select-num-figures-data', 'value'), Input('select-plots-data', 'value')], [State('data-explanation-state', 'data')])
def change_parameters(num_figures, plots, data):
params = (json.loads(data) if (data is not None) else {})
ctx = dash.callback_context
if ct... |
def _set_initial_values(result, type_and_name, d):
result.names.append(type_and_name[1])
vtype = ''
dim = 0
if (not type_and_name[0]):
if (len(d.shape) == 2):
vtype = '.csv'
dim = 1
elif (len(d.shape) == 3):
vtype = '.png'
dim = (1 if ((d.s... |
class EvaluationAGCode(AGCode):
_registered_encoders = {}
_registered_decoders = {}
def __init__(self, pls, G):
if issubclass(type(G), FunctionFieldPlace):
G = G.divisor()
F = G.parent().function_field()
K = F.constant_base_field()
n = len(pls)
if any(((p.... |
class Features(FeaturesLike, Sequence[Any]):
_values: FeaturesValuesLike
def __init__(self, values: FeaturesValuesLike=(), *args, **kwargs) -> None:
self._values = values
def values(self) -> FeaturesValuesLike:
return self._values
def values(self, values: FeaturesValuesLike) -> None:
... |
class PostRuleFactory(object):
def get_post_rule_class(cls, name: str) -> Type[PostRuleBase]:
return dynamic_import(name, import_alias)
def create(cls, name: str, **kwargs) -> PostRuleBase:
post_rule_class = cls.get_post_rule_class(name)
return post_rule_class.from_dict(kwargs) |
class RestructuredTextTableRenderer(object):
def __init__(self, table):
self.validator = TableValidator(table)
self.table = table
self.padding = 1
self.widths = self._calculate_widths()
self._adjust_widths()
def get_headers(self):
return self.table.headers
def... |
class CamembertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['attention_mask']
def __init__(self, vocab_file, bos_token='<s>', eos_token... |
class FeverDocDB(DocDB):
def __init__(self, path=None):
super().__init__(path)
def get_doc_lines(self, doc_id):
cursor = self.connection.cursor()
cursor.execute('SELECT lines FROM documents WHERE id = ?', (utils.normalize(doc_id),))
result = cursor.fetchone()
cursor.close... |
def CasingMagDipoleDeriv_z(z):
obsloc = np.vstack([xobs, yobs, z]).T
f = Casing._getCasingHertzMagDipole(srcloc, obsloc, freq, sigma, a, b, mu)
g = utils.sdiag(Casing._getCasingHertzMagDipoleDeriv_z(srcloc, obsloc, freq, sigma, a, b, mu))
return (f, g) |
class CoNLLUVocab():
_field = None
_n_splits = None
_conllu_idx = None
def n_splits(self):
return self._n_splits
def field(self):
return self._field
def conllu_idx(self):
return self._conllu_idx |
.parametrize('csr_container', CSC_CONTAINERS)
def test_assert_allclose_dense_sparse(csr_container):
x = np.arange(9).reshape(3, 3)
msg = 'Not equal to tolerance '
y = csr_container(x)
for X in [x, y]:
with pytest.raises(AssertionError, match=msg):
assert_allclose_dense_sparse(X, (X *... |
class DDPG(QLearningAlgoBase[(DDPGImpl, DDPGConfig)]):
def inner_create_impl(self, observation_shape: Shape, action_size: int) -> None:
policy = create_deterministic_policy(observation_shape, action_size, self._config.actor_encoder_factory, device=self._device)
targ_policy = create_deterministic_pol... |
def base_case_to_qa_file(dict_paragraphs: dict, out_file: str, separate=True):
with open(out_file, 'wt') as tsv_file:
writer = csv.writer(tsv_file, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for (key, values) in dict_paragraphs.items():
i = 0
if (not separate):... |
def pytest_addoption(parser):
parser.addoption('--slow', action='store_true', help='run slow tests') |
class ConvNet2FC(nn.Module):
def __init__(self, in_chan=1, out_chan=64, nh=8, nh_mlp=512, out_activation='linear', use_spectral_norm=False):
super(ConvNet2FC, self).__init__()
self.conv1 = nn.Conv2d(in_chan, (nh * 4), kernel_size=3, bias=True)
self.conv2 = nn.Conv2d((nh * 4), (nh * 8), kerne... |
class GradientPTQLearnRateZeroConvGroupDilationTest(GradientPTQLearnRateZeroTest):
def create_networks(self):
in_shape = self.get_input_shapes()[0][1:]
return build_model(in_shape, group=1, dilation_rate=(2, 2)) |
class Net(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers):
super().__init__()
self.convs = torch.nn.ModuleList()
for _ in range(num_layers):
mlp = MLP([in_channels, hidden_channels, hidden_channels])
self.convs.append(GINCo... |
def remap_module(module_type, k, v):
if (module_type == 'ConvBnAct'):
k = k.replace('bn1.', 'bn.')
elif (module_type == 'InvertedResidual'):
k = k.replace('conv_pw.', 'conv_exp.')
k = k.replace('bn1.', 'bn_exp.')
k = k.replace('bn2.', 'bn_dw.')
k = k.replace('bn3.', 'bn_p... |
def maybe_download(model_name, model_url, model_dir=None, map_location=None):
import os
import sys
from six.moves import urllib
if (model_dir is None):
torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch'))
model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, '... |
_utils.test(arch=get_host_arch_list())
def test_unpack_from_shape():
a = ti.field(ti.f32, ())
b = ti.field(ti.f32, ())
c = ti.field(ti.f32, ())
d = ti.field(ti.f32, (2, 3, 4))
def func():
(a[None], b[None], c[None]) = d.shape
func()
assert (a[None] == 2)
assert (b[None] == 3)
... |
class FullBatchNodeGenerator(FullBatchGenerator):
multiplicity = 1
def flow(self, node_ids, targets=None, use_ilocs=False):
return super().flow(node_ids, targets, use_ilocs)
def default_corrupt_input_index_groups(self):
return [[0]] |
def createEmbedMatrix(srcDicts):
print('Creating Embed matrix ...')
src_embed = torch.FloatTensor(torch.randn(srcDicts.size(), 300))
found = 0
f = codecs.open(opt.src_embedding, 'rb', 'utf-8')
for line in f:
splitLine = line.split(' ')
word = splitLine[0]
embedding = np.array... |
def get_prior_BO_limit(prior, mx_hat, tx0_hat):
ax = (mx_hat + tx0_hat)
A_BO = prior.compute_potential_BO(ax=ax, tx0_hat=tx0_hat)
vx_BO = prior.compute_forward_v_BO(ax=ax, tx0_hat=tx0_hat)
tau_x = prior.forward_second_moment_FG(tx_hat=tx0_hat)
mx_BO = (tau_x - vx_BO)
A_RS = prior.compute_potenti... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.