code stringlengths 101 5.91M |
|---|
def months_in_prison(popu):
return np.array([person['months_in_prison'] for person in popu.values()]) |
def isunsigned_long_longarray(var):
return (isarray(var) and (var.get('typespec') in ['integer', 'logical']) and (get_kind(var) == '-8')) |
class KRTToRCBijectionTypeC(KRTToRCBijectionTypeA):
def next_state(self, val):
n = self.n
tableau_height = (len(self.cur_path[0]) - 1)
if (val > 0):
KRTToRCBijectionTypeA.next_state(self, val)
return
pos_val = (- val)
case_S = ([None] * n)
if (... |
def test_move_and_copy_casts():
cstats = m.move_and_copy_cstats()
(c_m, c_mc, c_c) = (cstats['MoveOnlyInt'], cstats['MoveOrCopyInt'], cstats['CopyOnlyInt'])
assert (m.move_and_copy_casts(3) == 18)
assert ((c_m.copy_assignments + c_m.copy_constructions) == 0)
assert (c_m.move_assignments == 2)
as... |
((not have_sympy), 'SymPy not installed')
def test_conv7b():
x = sympy.Symbol('x')
y = sympy.Symbol('y')
assert (sympify(sympy.sin((x / 3))) == sin((Symbol('x') / 3)))
assert (sympify(sympy.sin((x / 3))) != cos((Symbol('x') / 3)))
assert (sympify(sympy.cos((x / 3))) == cos((Symbol('x') / 3)))
as... |
def test_export_digraph(digraph_2d):
ground_truth = b'"a","b",{}\n"b","c",{}\n"d","e",{}\n"e","f",{}\n'
digraph_2d._export_digraph()
digraph_2d.edge_list.seek(0)
assert (digraph_2d.edge_list.read() == ground_truth) |
class TensorboardOutputFormat(KVWriter):
def __init__(self, dirname):
self._writer = SummaryWriter(dirname)
self.step = 0
def writekvs(self, kvs):
for (k, v) in kvs.items():
self._writer.add_scalar(k, v, self.step)
self.step += 1
def close(self):
self._wri... |
.experimental
def test_inverse_transform(log):
indexer = JoinBasedIndexerEstimator().fit(log)
indexed_df = indexer.transform(log)
df_with_primary_indexes = indexer.inverse_transform(indexed_df)
assert (indexed_df.count() == df_with_primary_indexes.count())
expected_unique_user_ids = log.select('user... |
def propagate_memlets_sdfg(sdfg):
reset_state_annotations(sdfg)
for state in sdfg.nodes():
propagate_memlets_state(sdfg, state)
propagate_states(sdfg) |
def test_imageio_as_gray():
img = imread(fetch('data/color.png'), as_gray=True)
assert (img.ndim == 2)
assert (img.dtype == np.float64)
img = imread(fetch('data/camera.png'), as_gray=True)
assert (np.core.numerictypes.sctype2char(img.dtype) in np.typecodes['AllInteger']) |
def _dataset_info(txt_labels, num_classes=10000):
with open(txt_labels, 'r') as f:
images_list = f.readlines()
file_names = []
labels = []
for row in images_list:
row = row.split(' ')
if (int(row[1]) >= num_classes):
continue
file_names.append(row[0])
... |
def test_basic():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
e = ((x + y) + z)
assert (e.subs({x: y, z: y}) == (3 * y)) |
def named_buffers(partition, prefix='', recurse=True):
params = nn.Module.named_buffers(partition, prefix=prefix, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = ... |
class DNetV3(nn.Module):
def __init__(self, arch, op_names=None, num_classes=1000, **kwargs):
super(DNetV3, self).__init__()
if (op_names is None):
op_names = ['conv3', 'conv1', 'conv3_grp2', 'conv3_grp4', 'conv3_base1', 'conv3_base32', 'conv3_sep']
(block_str, num_channel, macro... |
def create_window(window_size, channel=1):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window |
class Ensemble(nn.Module):
def __init__(self, models):
super().__init__()
self.models = nn.ModuleList()
for m in models:
self.models.append(m)
self.ensemble_size = len(models)
self.input_size = self.models[0].input_size
self.output_size = self.models[0].ou... |
def test_options():
options = Options(Real, {(- 0.5), 0.5, np.inf}, deprecated={(- 0.5)})
assert options.is_satisfied_by((- 0.5))
assert options.is_satisfied_by(np.inf)
assert (not options.is_satisfied_by(1.23))
assert ('-0.5 (deprecated)' in str(options)) |
def convert(expr, target):
base_target = target
z = {}
tz = {}
for x in expr.variables():
if is_unit(x):
if (unit_to_type[str(x)] == 'temperature'):
return convert_temperature(expr, target)
else:
z[x] = base_units(x)
expr = expr.subs(z)... |
def test_crop_and_pad():
dataset = [{'tokens': [1, 2, 4, 3, 6, 2], 'transitions': [0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1]}, {'tokens': [6, 1], 'transitions': [0, 0, 1]}, {'tokens': [6, 1, 2, 3, 5, 1], 'transitions': [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]}]
length = 5
expected = [{'tokens': [6, 2, 0, 0, 0], 'transition... |
def to_lean_paren_description_aux(expr: Expression, local_vars: Dict[(int, str)]={}, context: Optional[LeanDescContext]=None) -> Tuple[(str, int)]:
if (isinstance(expr, ExprOperator) or isinstance(expr, ExprAddressOf) or isinstance(expr, ExprSubscript)):
(result, div_var_startnum) = to_lean_description_aux(... |
class AvgPool(nn.Module):
def __init__(self, stride=None, padding=0):
super(AvgPool, self).__init__()
self.stride = stride
self.padding = padding
def forward(self, x):
kernel_size = x.size(2)
pooling = nn.AvgPool1d(kernel_size=kernel_size, stride=self.stride, padding=self... |
def getDBConnection():
try:
db = mysql.connector.connect(host='localhost', user='root', passwd='root', database='dati')
return db
except Exception as e:
print(e)
return None |
class VoiceBase(AbstractSingleton):
def __init__(self):
self._url = None
self._headers = None
self._api_key = None
self._voices = []
self._mutex = Lock()
self._setup()
def say(self, text: str, voice_index: int=0) -> bool:
text = re.sub('\\b(?: '', text)
... |
def auto_lambdify_delay_1(optimizer_class, simplify=False, allow_no_coeff=False):
(_, preds, gaps) = run_sim(1, optimizer_class, simplify=simplify)
gap = gaps[0]
pred = preds[0]
fs_gap = list(gap.free_symbols)
fs_pred = list(pred.free_symbols)
f = lambdify(fs_gap, gap, modules=['math'])
dict... |
class GroupResBlock(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels, groups, res_scale=1.0):
super().__init__()
self.res = nn.Sequential(nn.Conv2d(in_channels, mid_channels, 3, 1, 1, groups=groups), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Conv2d(mid_channels, out_ch... |
class EqtRestructureAndLoad(object):
def find_module(self, fullname, path=None):
if hasattr(path, '_path'):
path = path._path
if ((not path) or (not path[0].startswith(__path__[0]))):
return None
for key in _import_map.keys():
if fullname.startswith(key):
... |
def test_init_roll():
a = _roll_init_dice(rng)
assert (len(a) == 2)
assert (a[0] != a[1]) |
def _create_tensor_dicts(input_queue: Queue, output_queue: Queue, iterator: DataIterator, shuffle: bool, index: int) -> None:
def instances() -> Iterator[Instance]:
instance = input_queue.get()
while (instance is not None):
(yield instance)
instance = input_queue.get()
fo... |
class BenchmarkResult():
name: str
wall_time: int
cuda_memory_usage: int
def from_json(cls, inp: bytes) -> 'BenchmarkResult':
return cls(**loads(inp))
def to_json(self) -> bytes:
obj = asdict(self)
res = dumps(obj, ensure_ascii=False, indent=None)
return res |
def test_nested_BitMaskedArray_NumpyArray():
v2a = ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 1, 14], dtype=np.int64)), ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1], np.uint8))), ak.contents.numpyarray.NumpyArray(np.array([99... |
def test_anchor_generator_with_tuples():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(type='SSDAnchorGenerator', scale_major=False, input_size=300, basesize_ratio_range=(0.15, 0.9), str... |
class FiniteExtensionFromLimitValuation(FiniteExtensionFromInfiniteValuation):
def __init__(self, parent, approximant, G, approximants):
self._approximants = approximants
from .limit_valuation import LimitValuation
limit = LimitValuation(approximant, G)
FiniteExtensionFromInfiniteVal... |
def dict2json(obj, filename=None, *args, **kwargs):
if (filename is not None):
before_save(filename)
with open(filename, 'w') as f:
return json.dump(obj, f, *args, **kwargs)
return json.dumps(obj, **kwargs) |
def encoder_net():
inputs = Input((IMG_SHAPE, IMG_SHAPE, 3))
normalization_layer = UnitNormLayer()
encoder = tf.keras.applications.ResNet50(weights=None, include_top=False)
encoder.trainable = True
embeddings = encoder(inputs, training=True)
embeddings = GlobalAveragePooling2D()(embeddings)
... |
class TeladocViewReviews(VirtualFunctionTool):
name = 'TeladocViewReviews'
summary = "View reviews for a doctor by providing the doctor's unique identifier."
parameters: List[ArgParameter] = [{'name': 'doctor_id', 'type': 'string', 'description': 'The unique identifier of the chosen doctor.', 'required': Tr... |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('dump_dir')
parser.add_argument('stage')
parser.add_argument('--dump_paths', default=None, help='Relative to `dump_dir/phrase`. If specified, creates subindex dir and save there with same name')
parser.add_argument('--subindex_na... |
class AddPXG(object):
def __init__(self, op, colocate_gradients_with_ops=False, gate_gradients=False):
assert (op.node_def.op == 'Add')
self.op = op
self.colocate_gradients_with_ops = colocate_gradients_with_ops
self.gate_gradients = gate_gradients
def __call__(self, x, z_grads):... |
def init_plots(title: Optional[str]=None, ylabels: Optional[Sequence[str]]=None, keys: Optional[Sequence[str]]=None, xlabel: str='Step', **kwargs):
set_plot_style()
plots = {}
if plt.interactive:
if ((keys is not None) and (len(keys) > 0)):
for key in keys:
plots[key] = i... |
def changeContagion_GENCOMP(G, A, i):
delta = 0
delta += sum(((A[u] == 1) for u in G.outIterator(i)))
delta += sum(((A[u] == 1) for u in G.inIterator(i)))
return delta |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
def test_add2_double_backward(seed, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3).astype(np.float32), rng.randn(2, 3).astype(np.float32)]
backward_function... |
def show_move(node, edge_weights, file_name):
dot = build_dot(node, edge_weights)
dot.format = 'pdf'
if os.path.exists(f'./{file_name}.pdf'):
os.remove(f'./{file_name}.pdf')
dot.render(file_name, directory='.', cleanup=True) |
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
print(f'data path: {source_path}')
features = np.load((source_path + '.npy'), mmap_mode='r')
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
... |
def test_local_batched_data_loading_model_axis_1():
devices = jax.devices()
model_axis_size = 1
mesh = Mesh(np.array(devices).reshape((- 1), model_axis_size), (ResourceAxis.DATA, ResourceAxis.MODEL))
with mesh, haliax.axis_mapping({'batch': ResourceAxis.DATA}):
seq_len = 128
cache = _sma... |
class Test_Flag(TestCase):
def test_flag__iter__(self):
flagobj = flags.Flags()
flagobj.addFlag('Calculated Temperature')
flagobj.addFlag('Estimated Mass')
self.assertTrue(('Calculated Temperature' in flagobj))
self.assertTrue(('Estimated Mass' in flagobj))
self.asser... |
def test_desknn():
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
desknn = DESKNN(pool_classifiers, DFP=True)
desknn.fit(X_dsel, y_dsel)
assert np.isclose(desknn.score(X_test, y_test), 0.) |
def evaluate_df_coefficient_dict(coeff_dict, alpha):
tot = 0
for (key, val) in coeff_dict.items():
if (key[0] == 'indirect'):
pwer = key[1]
rt_alpha_inv = (1 / np.sqrt(alpha))
tot += (val * (rt_alpha_inv ** pwer))
else:
(p, arg) = key
t... |
def lea(clusters, mention_to_gold):
(num, dem) = (0, 0)
for c in clusters:
if (len(c) == 1):
continue
common_links = 0
all_links = ((len(c) * (len(c) - 1)) / 2.0)
for (i, m) in enumerate(c):
if (m in mention_to_gold):
for m2 in c[(i + 1):]:... |
def register_command(subparsers):
parser = subparsers.add_parser('prediction-models', help="Evaluate Skyline's prediction accuracy.")
parser.add_argument('entry_point', help='The entry point file in this project that contains the Skyline provider functions.')
parser.add_argument('-b', '--batch-sizes', help=... |
def _worker_terminate_task(g, scope=None):
g = _get_scoped_g(g, scope)
if getattr(g, 'env', None):
g.env.close()
g.env = None
if getattr(g, 'policy', None):
g.policy.terminate()
g.policy = None |
class XmodModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class DCSRCH():
def __init__(self, phi, derphi, ftol, gtol, xtol, stpmin, stpmax):
self.stage = None
self.ginit = None
self.gtest = None
self.gx = None
self.gy = None
self.finit = None
self.fx = None
self.fy = None
self.stx = None
self.... |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('paths', nargs='+')
parser.add_argument('-o', '--out', default='ensemble.json')
parser.add_argument('--data_path', default='data/squad/data_test.json')
parser.add_argument('--shared_path', default='data/squad/shared_test.json')
... |
class ExtendedF1(BaseMetric):
def __init__(self, recommendations, config, params, eval_objects, additional_data):
super().__init__(recommendations, config, params, eval_objects, additional_data)
self._beta = 1
self._squared_beta = (self._beta ** 2)
parse_metric_func = importlib.impor... |
def gradient_based_attack_wrt_w(model, input, trg, num_classes):
target = keras.utils.to_categorical(trg, num_classes)
loss = K.categorical_crossentropy(target, model.output)
grads = K.gradients(loss, model.trainable_weights)
fn = K.function([model.input], grads)
g = fn([input])
weight_grad = li... |
class Pipeline():
def __init__(self, model_cfg_dict, optimizer_cfg_dict=None, local_rank=(- 1), training=False, resume=False):
self.model_cfg_dict = model_cfg_dict
self.optimizer_cfg_dict = optimizer_cfg_dict
self.epe = EPE()
self.ter = Ternary()
self.laploss = LapLoss()
... |
def main_debug():
parser = argparse.ArgumentParser(description='Debug snippets')
parser.add_argument('mode', choices=['training', 'inference'], help="'training' to debug training and 'inference to debug inference'")
parser.add_argument('path', help='Path to the dataset or trained assistant')
parser.add_... |
class ExtendedTimeStepWrapper(dm_env.Environment):
def __init__(self, env):
self._env = env
def reset(self):
time_step = self._env.reset()
return self._augment_time_step(time_step)
def step(self, action):
time_step = self._env.step(action)
return self._augment_time_st... |
def assert_not_exists(name):
raised = False
try:
dace.library.get_library(name)
except:
raised = True
pass
if (not raised):
raise RuntimeError((('Library ' + name) + ' exists.')) |
class ImageEncoderTypes(Enum):
default = 'default'
identity = 'identity'
torchvision_resnet = 'torchvision_resnet'
resnet152 = 'resnet152'
detectron2_resnet = 'detectron2_resnet' |
def dataio_prep(hparams):
datasets = {}
datasets['train'] = sb.dataio.dataset.DynamicItemDataset.from_json(json_path=hparams['train_annotation'], replacements={'data_root': hparams['data_folder']}, dynamic_items=[audio_pipeline_train], output_keys=['id', 'noisy_sig'])
datasets['valid'] = sb.dataio.dataset.D... |
class GridMapEnv(object):
END_POINT_MODE_BLOCK = 1
END_POINT_MODE_RADIUS = 2
def __init__(self, name='DefaultGridMapEnv', gridMap=None, workingDir='./'):
self.name = name
self.map = gridMap
self.workingDir = workingDir
self.renderDir = os.path.join(self.workingDir, 'Render')
... |
def setup_dataloaders(cfg, tokenizer):
LOGGER.info('Init. train_loader and val_loader...')
train_loaders = {}
for db in cfg.train_datasets:
train_loaders[db.name] = mk_captions_pretrain_dataloader(dataset_name=db.name, anno_path=db.ann, video_dir=db.img, txt_dir=db.txt, cfg=cfg, tokenizer=tokenizer,... |
def test_type_tracing_max_depth():
proxy = tt.ObjectProxy(MagicMock())
for i in range(tt._MAX_PROXY_NESTING):
proxy = proxy['foo']
assert isinstance(proxy, tt.ObjectProxy) |
def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0):
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
(tpr, fpr, accuracy, best_thresholds) = calculate_roc(thresholds, embeddings1, embeddings2, np.asarray(actual_issame), nrof_folds=nrof_folds, pc... |
class DataLoaderWithPrefetch(DataLoader):
def __init__(self, *args, prefetch_size=None, **kwargs):
super().__init__(*args, **kwargs)
self.prefetch_size = (prefetch_size if (prefetch_size is not None) else (2 * kwargs.get('num_workers', 0)))
def __iter__(self):
if (self.num_workers == 0):... |
class FootSteps(object):
def __init__(self, right, left):
self.right = [right]
self.left = [left]
self.time = [0.0]
self.flying_foot = []
def add_phase(self, duration, foot, position=None):
assert ((foot == 'left') or (foot == 'right') or (foot == 'none'))
self.ti... |
class AsrDataset(FairseqDataset):
def __init__(self, aud_paths, aud_durations_ms, tgt, tgt_dict, ids, speakers, num_mel_bins=80, frame_length=25.0, frame_shift=10.0):
assert (frame_length > 0)
assert (frame_shift > 0)
assert all(((x > frame_length) for x in aud_durations_ms))
self.fr... |
_arg_scope
def stack_blocks_dense(net, blocks, output_stride=None, outputs_collections=None):
current_stride = 1
rate = 1
for block in blocks:
with variable_scope.variable_scope(block.scope, 'block', [net]) as sc:
for (i, unit) in enumerate(block.args):
if ((output_stride... |
_utils.test()
def test_atomic_max_f32():
def max_kernel() -> ti.f32:
x = (- 1000.0)
for i in range(1, 20):
ti.atomic_max(x, (- ti.f32(i)))
return x
assert (max_kernel() == (- 1.0)) |
_utils.test(debug=True)
def test_adjoint_checkbit_lazy_grad():
x = ti.field(float, shape=())
ti.root.lazy_grad()
def test():
x[None] = 1
with ti.ad.Tape(loss=x, validation=True):
test()
assert x.snode.ptr.has_adjoint_checkbit() |
def render(renderer: Union[(nn.Module, dict)], meshes: Union[(Meshes, None)]=None, output_path: Optional[str]=None, resolution: Union[(Iterable[int], int)]=None, device: Union[(str, torch.device)]='cpu', cameras: Union[(MMCamerasBase, CamerasBase, dict, None)]=None, lights: Union[(MMLights, dict, None)]=None, batch_siz... |
def test_homogeneous_graph_schema(example_graph_schema):
gs = example_graph_schema(bb=0)
assert (gs.node_index('A') == 0)
assert (gs.node_index('B') == 1)
assert (gs.edge_index(EdgeType('A', 'a0', 'A')) == 0)
assert (gs.edge_index(EdgeType('A', 'ab0', 'B')) == 1)
assert (gs.edge_index(EdgeType('... |
('/user')
def retrieve_info():
user_id = request.args['user_id']
url = (' + user_id)
response = requests.get(url)
return response.text |
def validate_graph(csgraph, directed, dtype=DTYPE, csr_output=True, dense_output=True, copy_if_dense=False, copy_if_sparse=False, null_value_in=0, null_value_out=np.inf, infinity_null=True, nan_null=True):
if (not (csr_output or dense_output)):
raise ValueError('Internal: dense or csr output must be true')
... |
class PredicateCollector(Visitor_Recursive):
_pred_spec: PredicateSpec
def __init__(self):
self._pred_spec = PredicateSpec()
def _process_arg(self, tree):
arg_kind = str(tree.data)
if (arg_kind == 'pred_var'):
return str(tree.children[0])
elif (arg_kind == 'pred_s... |
def main_loop():
num_steps = 0
for i_episode in count():
state = env.reset()
state = running_state(state)
reward_episode = 0
for t in range(10000):
state_var = tensor(state).unsqueeze(0).to(dtype)
action = policy_net(state_var)[0][0].detach().numpy()
... |
_task_model('fluorescence', 'resnet')
_task_model('stability', 'resnet')
class ProteinResNetForValuePrediction(ProteinResNetAbstractModel):
def __init__(self, config):
super().__init__(config)
self.resnet = ProteinResNetModel(config)
self.predict = ValuePredictionHead(config.hidden_size)
... |
def get_tiny_model_names_from_repo():
model_names = set(get_all_model_names())
with open('tests/utils/tiny_model_summary.json') as fp:
tiny_model_info = json.load(fp)
tiny_models_names = set()
for model_base_name in tiny_model_info:
tiny_models_names.update(tiny_model_info[model_base_nam... |
class AnInfinity():
def _repr_(self):
return (self._sign_char + 'Infinity')
def _giac_init_(self):
return (self._sign_char + 'infinity')
def _maxima_init_(self):
if (self._sign < 0):
return 'minf'
else:
return 'inf'
def _fricas_init_(self):
... |
def load_and_cache_examples(args, tokenizer):
dataset = CNNDMDataset(args.documents_dir)
return dataset |
def warmup_cosine(x, warmup=0.002):
s = tf.cast((x <= warmup), tf.float32)
return ((s * (x / warmup)) + ((1 - s) * (0.5 * (1 + tf.cos((math.pi * x)))))) |
def get_device(x):
if isinstance(x, torch.Tensor):
return x.device
elif isinstance(x, torch.nn.Module):
return next(x.parameters()).device
else:
raise RuntimeError('{} do not have `device`'.format(type(x))) |
def test_aug_assign_tasklet_lhs_cpp():
def sdfg_aug_assign_tasklet_lhs_cpp(A: dace.float64[32], B: dace.float64[32]):
for i in range(32):
with dace.tasklet(language=dace.Language.CPP):
(a << A[i])
(k << B[i])
(b >> A[i])
sdfg = sdfg_aug_assign_... |
class RandAugmentPC(object):
def __init__(self, n, m):
assert (n >= 1)
assert (1 <= m <= 10)
self.n = n
self.m = m
self.augment_pool = my_augment_pool()
def __call__(self, img):
ops = random.choices(self.augment_pool, k=self.n)
for (op, max_v, bias) in ops... |
class DiagGaussianDistribution(Distribution):
def __init__(self, action_dim: int):
super(DiagGaussianDistribution, self).__init__()
self.action_dim = action_dim
self.mean_actions = None
self.log_std = None
def proba_distribution_net(self, latent_dim: int, log_std_init: float=0.0)... |
class Feature(object):
def __init__(self, base_name, func, offset=0, drop_out=0):
if (base_name == TOKEN_NAME):
raise ValueError(("'%s' name is reserved" % TOKEN_NAME))
self.offset = offset
self._name = None
self._base_name = None
self.base_name = base_name
... |
def create_summarization_algo_setting_layout():
return html.Div(id='algo-setting-layout', children=[html.Br(), html.B('Parsing Algortihm'), dcc.Dropdown(id='parsing-algo-select', options=['DRAIN', 'IPLoM', 'AEL'], value='DRAIN'), html.Div(id='parsing-param-table', children=[create_param_table()])]) |
def load_data():
print('[*] Loading data...')
if (DATA_CONFIG['training_data_location'] == 'sa'):
x_train = sa.attach(DATA_CONFIG['training_data'])
elif (DATA_CONFIG['training_data_location'] == 'hd'):
x_train = np.load(DATA_CONFIG['training_data'])
x_train = x_train.reshape((- 1), MODEL... |
class Symk_class(OverconvergentDistributions_abstract):
def __init__(self, k, base, character, adjuster, act_on_left, dettwist, act_padic, implementation):
if hasattr(base, 'prime'):
p = base.prime()
else:
p = ZZ(0)
OverconvergentDistributions_abstract.__init__(self, ... |
def make_params(alg_name, exp_name):
params = dict()
alg_param_names = alg_dict[alg_name].related_parameters()
(json_content, res_path) = load_exp_json_file(alg_name, exp_name)
json_exp_params = json_content.get('meta_parameters')
for param in alg_param_names:
params[param] = json_exp_params... |
class RGMP(nn.Module):
def __init__(self):
super(RGMP, self).__init__()
self.Encoder = Encoder()
self.Decoder = Decoder() |
_test(assert_ii_1=False)
def test_4_interface_to_2_banks_hbm_non_decoupled_interface():
return four_interface_to_2_banks(mem_type='HBM', decouple_interfaces=False) |
class ParsePartitioningOptsGlue(Parser):
def _add_model_args(self, group):
group.add_argument('--task_name', type=str, default='mnli', help='Glue task')
group.add_argument('--model_type', default=None, type=str, required=True, help=('Model type selected in the list: ' + ', '.join(MODEL_TYPES)))
... |
class ConvGraph():
def __init__(self, graph_data_dir):
self.query_types = []
self.flow_data = {}
self.page_data = {}
if file_exists('botsim', os.path.join(graph_data_dir, 'visualization.json')):
self.query_types.append('All')
self.flow_data = read_s3_json('bot... |
(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32)], key=['N', 'HAS_DRESIDUAL', 'STORE_DRESIDUAL', 'IS_RMS_NORM', 'HAS_BIAS'])
({'RECOMPUTE_OUTPUT': (lambda args: (args... |
.parametrize('ph', ['?', ':1', ':foo', '%s', '%(foo)s'])
def test_placeholder(ph):
p = sqlparse.parse(ph)[0].tokens
assert (len(p) == 1)
assert (p[0].ttype is T.Name.Placeholder) |
class docInternalType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
if (mixedclass_ is None):
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if (content_ ... |
class ImageGPTFeatureExtractor(ImageGPTImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use ImageGPTImageProcessor instead.', FutureWarning)
super().__init__(*args, *... |
def generate_ngram_attrs(corpus_by_cat, ngram_range, k, attrs):
vectorizer = TfidfVectorizer(stop_words=get_stop_words(), ngram_range=ngram_range, max_features=1000)
top_attrs_by_cat = dict()
for (category, corpus) in tqdm(corpus_by_cat.items(), total=len(corpus_by_cat)):
asins = [_[0] for _ in corp... |
class TwoRandomIndex(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
def forward(self, x):
batch_idxs_1 = torch.randint(x.shape[1], (x.shape[0],))
x1 = x[(torch.arange(0, x.shape[0], dtype=torch.long), batch_idxs_1)]
batch_idxs_2 = torch.randint(x.s... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.