code stringlengths 101 5.91M |
|---|
(scope='module')
def simpledf() -> dd.DataFrame:
df = pd.DataFrame(np.random.rand(1000, 3), columns=['a', 'b', 'c'])
df = pd.concat([df, pd.Series(np.random.choice(['a', 'b', 'c'], 1000, replace=True))], axis=1)
df = pd.concat([df, pd.Series(np.random.choice(['2020/03/29', '2020/01/10', '2019/11/21'], 1000,... |
class DecoderR2plus1d(Decoder3d):
def __init__(self, n_classes=2, inter_block=GC3d, refine_block=Refine3d):
super(DecoderR2plus1d, self).__init__(n_classes=n_classes)
mdim = 256
self.GC = inter_block(512, 256)
self.RF4 = refine_block(256, mdim)
self.RF3 = refine_block(128, md... |
def main():
parser = argparse.ArgumentParser(description='Model')
parser.add_argument('--gpu', type=str, default='0', help='Set CUDA_VISIBLE_DEVICES')
parser.add_argument('--verbose', type=int, default=logging.INFO, help='Logging Level, 0, 10, ..., 50')
parser.add_argument('--log_file', type=str, defaul... |
def resnet50(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['resnet50']), strict=False)
return model |
def train_ngram_lm(kenlm_path, data_path, output_path, N):
curdir = os.path.abspath(os.path.curdir)
command = ((((('bin/lmplz -o ' + str(N)) + ' <') + os.path.join(curdir, data_path)) + ' >') + os.path.join(curdir, output_path))
os.system(((('cd ' + os.path.join(kenlm_path, 'build')) + ' && ') + command))
... |
def txt_to_h5(weights_file_name, output_file_name=''):
lr = False
bias = []
weights = []
batchnorm_params = []
bias_count = 0
weights_count = 0
batchnorm_count = 0
with open(weights_file_name, mode='r') as weights_file:
lines = weights_file.readlines()
for (idx, line) in ... |
def add_eval_options(parser):
parser.add_argument('--batch_size', type=int, default=0, help='if > 0 then overrule, otherwise load from checkpoint.')
parser.add_argument('--num_images', type=int, default=(- 1), help='how many images to use when periodically evaluating the loss? (-1 = all)')
parser.add_argume... |
class DiagonalPhaseTest(tf.test.TestCase):
def test(self):
for units in TEST_DIMENSIONS:
diag_phase = DiagonalPhaseLayer(units=units)
self.assertAllClose(diag_phase(diag_phase.inverse_matrix), tf.eye(units)) |
def add_std_ofstream(module):
module.add_include('<fstream>')
ostream = module.add_class('ostream', foreign_cpp_namespace='::std')
ostream.set_cannot_be_constructed('abstract base class')
ofstream = module.add_class('ofstream', foreign_cpp_namespace='::std', parent=ostream)
ofstream.add_enum('openmo... |
def GeneralizedSierpinskiGraph(G, k, stretch=None):
if (not isinstance(G, Graph)):
raise ValueError('parameter G must be a Graph')
if (k < 1):
raise ValueError('parameter k must be >= 1')
loops = G.allows_loops()
multiedges = G.allows_multiple_edges()
def rec(H, kk):
if (kk =... |
class StorageType(object):
def __init__(self):
self.class_member_declarations = ''
self.class_member_initializations = ''
self.local_declarations = ''
def cheap_copies(self):
return False
def python_refcounted(self):
return False
def cython_decl_type(self):
... |
def _get_predicate_id_and_arity(text, type_dict, predicate_dict):
global SEEN_WARNING_TYPE_PREDICATE_NAME_CLASH
the_type = type_dict.get(text)
the_predicate = predicate_dict.get(text)
if ((the_type is None) and (the_predicate is None)):
raise SystemExit(('Undeclared predicate: %s' % text))
e... |
class TestPose(TestCase):
def test_pose_tf_posebody_normalize_graph_mode_does_not_fail(self):
with tf.Graph().as_default():
assert (tf.executing_eagerly() is False)
pose = _get_random_pose_object_with_tf_posebody(num_keypoints=5)
pose.normalize(pose.header.normalization_i... |
def setup_output_folder(folder_only: bool=False):
save_dir = get_mmf_env(key='save_dir')
time_format = '%Y_%m_%dT%H_%M_%S'
log_filename = 'train_'
log_filename += Timer().get_time_hhmmss(None, format=time_format)
log_filename += '.log'
log_folder = os.path.join(save_dir, 'logs')
env_log_dir ... |
_level_function()
def unflatten(array, counts, axis=0, *, highlevel=True, behavior=None, attrs=None):
(yield (array,))
return _impl(array, counts, axis, highlevel, behavior, attrs) |
def balance_classes(ds, classes_to_keep=None):
if (classes_to_keep is None):
return ds
class_datasets = [TargetFilter(ds, [c]) for c in classes_to_keep]
num_sample = min([len(ds) for ds in class_datasets])
balanced_datasets = [SubSampler(ds, num_sample) for ds in class_datasets]
class_bal_ds... |
def skew(outer, inner, maxrows=(- 1)):
return _lrcalc_dict_to_sage(lrcalc.skew(outer, inner, maxrows)) |
def convolution_data_grad_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, base_axis=1, pad=None, stride=None, dilation=None, group=1, channel_last=False):
gdx = grad_inputs[0]
dy = inputs[0]
w0 = inputs[1]
ctx = nn.get_current_context()
dfw = ConvolutionFilterGrad(ctx, base_axis,... |
def main_mean():
fig = plt.figure(figsize=(10, 5), dpi=150)
plt.subplot(1, 2, 1)
plt.grid(True)
plot_i = 0
(h1,) = plt.plot(region_ids, ad_2_list, '--', marker=markers[plot_i], markersize=marker_size, markerfacecolor='none', label=labels[plot_i], linewidth=linewidth, color=_COLORS[(plot_i * color_st... |
class ChineseCLIPTextModelTester():
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, at... |
def generate_inputs(data):
split = []
for d in data:
table_id = d['table_id']
with open('{}/tables_tok/{}.json'.format(resource_path, table_id), 'r') as f:
table = json.load(f)
headers = [cell[0] for cell in table['header']]
tmp = []
labels = []
for no... |
class BaseExperiment(ABC):
def __init__(self, cfg: DictConfig) -> None:
super().__init__()
self._created = get_timestamp('%Y-%m-%d-%H%M%S')
self.cfg = cfg
self.config: ExperimentConfig = instantiate(cfg)
assert (self.config.framework.lower() in ['pt', 'tf', 'pytorch', 'torch'... |
def start_cleaner():
if (not os.fork()):
os.setpgid(os.getpid(), os.getpid())
with open(os.devnull, 'r+') as f:
os.dup2(f.fileno(), 0)
os.dup2(f.fileno(), 1)
os.dup2(f.fileno(), 2)
try:
maxopenfiles = os.sysconf('SC_OPEN_MAX')
if (m... |
def skip_if_matplotlib_not_installed(fname):
try:
import matplotlib
except ImportError:
basename = os.path.basename(fname)
raise SkipTest(f'Skipping doctests for {basename}, matplotlib not installed') |
class UndeclaredNameVisitor(NodeVisitor):
def __init__(self, names):
self.names = set(names)
self.undeclared = set()
def visit_Name(self, node):
if ((node.ctx == 'load') and (node.name in self.names)):
self.undeclared.add(node.name)
if (self.undeclared == self.nam... |
.parametrize('n_points', [None, 12])
def test_a1a(n_points):
(X, y) = shap.datasets.a1a(n_points=n_points)
n_points = (1605 if (n_points is None) else n_points)
assert (X.shape == (n_points, 119))
assert (y.shape == (n_points,)) |
def forward_fn(x, is_eval=False):
net = ActorCritic(env.num_actions, activation='tanh')
(logits, value) = net(x)
return (logits, value) |
def encode_right_truncated(text, tokenizer, max_length=511):
tokenized = tokenizer.tokenize(text)
truncated = tokenized[(- max_length):]
ids = tokenizer.convert_tokens_to_ids(truncated)
return ([tokenizer.cls_token_id] + ids) |
def CalculateDistributionCharge(ProteinSequence):
result = CalculateDistribution(ProteinSequence, _Charge, '_Charge')
return result |
def generating_file_message(output_type: str) -> None:
print(f'''
Creating {output_type.replace('_', ' ').lower()}...''') |
class label(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=''):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if label.subclass:
return label.subclass(*args_, **kwargs_)
else:
return label(*args_, **kwargs_)
... |
class HellingerDistanceCriterion(SplitCriterion):
def __init__(self, min_branch_frac_option=0.01):
super().__init__()
self.min_branch_frac_option = min_branch_frac_option
self.lowest_entropy = None
self.best_idx = 0
def get_merit_of_split(self, pre_split_dist, post_split_dist):
... |
def test_analyze_traces_empty():
results = []
trace = ff.analyze_results(results)
assert (trace == ExecutionTrace()) |
def plot_prior_BO_limit(prior):
df = check_prior_BO_limit(prior)
(fig, axs) = plt.subplots(1, 3, figsize=(12, 4), sharex=True)
axs[0].plot(df['mx_hat'], df['A_BO'], '-', label='$A \\quad BO$')
axs[0].plot(df['mx_hat'], df['A_RS'], '--', label='$A \\quad RS$')
axs[0].set(xlabel='$\\widehat{m}_x^-$')
... |
class StandardSkewTableaux_all(StandardSkewTableaux):
def __init__(self):
StandardSkewTableaux.__init__(self, category=InfiniteEnumeratedSets())
def _repr_(self):
return 'Standard skew tableaux'
def __iter__(self):
n = 0
while True:
for st in StandardSkewTableaux_... |
class Lambda(DDict):
_globals = {}
def update_globals(cls, list_or_dict):
dictionary = (dict(((x.__name__, x) for x in list_or_dict)) if isinstance(list_or_dict, list) else list_or_dict)
cls._globals.update(dictionary)
def __init__(self, func):
super().__init__(tag='<Lambda>', func=f... |
def test_two_level_delete_the_only_field():
base = ak.zip({'a': ak.zip({'x': [1, 2, 3]})}, depth_limit=1)
assert (ak.without_field(base, where=['a', 'x']).to_list() == [{'a': {}}, {'a': {}}, {'a': {}}])
assert (ak.fields(base) == ['a'])
del base[('a', 'x')]
assert (base.to_list() == [{'a': {}}, {'a'... |
class EnumCase(object):
def __init__(self, int_value, name):
self.int_value = int_value
self.definition_name = name
self.djinni_idl_name = name.lower()
self.djinni_name = name.upper()
self.lcm_name = name
self.proto_name = name |
def unimod_matrices_from_infty(r, s):
if (s != 0):
L = convergents((r / s))
v = [M2Z([(- L[0].numerator()), 1, (- L[0].denominator()), 0])]
for j in range((len(L) - 1)):
a = L[j].numerator()
c = L[j].denominator()
b = L[(j + 1)].numerator()
d =... |
class BatchNormalizationForwardFolding(common.BaseSubstitution):
def __init__(self, bn_node: NodeOperationMatcher, conv_node: NodeOperationMatcher, update_weights_for_bn_forward_folding_fn: Callable, get_kernel_hw_fn: Callable, is_group_conv_fn: Callable, get_foldable_node_type_and_validity_fn: Callable, kernel_str... |
class BeamState():
def __init__(self):
self.entries = {}
def norm(self):
for (k, _) in self.entries.items():
labelingLen = len(self.entries[k].labeling)
self.entries[k].prText = (self.entries[k].prText ** (1.0 / (labelingLen if labelingLen else 1.0)))
def sort(self):
... |
def load_meta(meta_path):
meta = None
if meta_path.is_file():
meta = load_json(meta_path)
meta = {Path(row['filename']).stem: row for row in meta}
return (meta_path, meta) |
class MaskFormerSwinConfig(BackboneConfigMixin, PretrainedConfig):
model_type = 'maskformer-swin'
attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 2... |
def aes128_encrypt(key: Sequence[int], data: Sequence[int]) -> List[int]:
if (not isinstance(key, Sequence)):
raise TypeError('key must be a sequence of 16 bytes')
if (not isinstance(data, Sequence)):
raise TypeError('data must be a sequence of 16 bytes')
if ((len(key) != 16) or next((True f... |
def test_combine_workspace_incompatible_parameter_configs_right_outer_join(workspace_factory):
ws = workspace_factory()
new_ws = ws.rename(channels={channel: f'renamed_{channel}' for channel in ws.channels})
new_ws.get_measurement(measurement_name='GaussExample')['config']['parameters'][0]['bounds'] = [[0.0... |
def test_nans():
assert_equal(sc.owens_t(20, np.nan), np.nan)
assert_equal(sc.owens_t(np.nan, 20), np.nan)
assert_equal(sc.owens_t(np.nan, np.nan), np.nan) |
def _prediction_confidence(cos_similarities: List[float]) -> float:
T = (1 / 20)
return max((np.exp((np.array(cos_similarities) / T)) / np.sum(np.exp((np.array(cos_similarities) / T))))) |
def test_meta_evaluator_with_tf():
set_seed(100)
tasks = SetTaskSampler((lambda : GarageEnv(PointEnv())))
max_path_length = 200
env = GarageEnv(PointEnv())
n_traj = 3
with tempfile.TemporaryDirectory() as log_dir_name:
ctxt = SnapshotConfig(snapshot_dir=log_dir_name, snapshot_mode='none'... |
def create_pipeline_configuration(DEBUG=False, batch_size=32):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (CrossEntropyLoss, Linear, Dropout, T5LayerNorm, T5Block, StatelessEmbedding), 'model_inputs': {'attention_mask': {'shape': torch.Size([32, 1, 1, 64]), 'dtype': torch.float32, 'is_batched': True,... |
_tf
_retrieval
_sentencepiece
class TFRagTestMixin():
all_model_classes = ((TFRagModel, TFRagTokenForGeneration, TFRagSequenceForGeneration) if (is_tf_available() and is_datasets_available() and is_faiss_available()) else ())
all_generative_model_classes = ((TFRagTokenForGeneration, TFRagSequenceForGeneration) ... |
class TVAModel_Self(nn.Module):
def __init__(self, params):
super(TVAModel_Self, self).__init__()
rnn = (nn.LSTM if (params.rnntype == 'lstm') else nn.GRU)
self.text_encoder = rnn(input_size=params.txt_dim, hidden_size=params.txt_rnnsize, num_layers=params.txt_rnnnum, dropout=params.txt_rnnd... |
def remove_punctuation(a_string):
return a_string.translate(str.maketrans('', '', string.punctuation)) |
class ResizeNormalize(object):
def __init__(self, size, interpolation=PIL.Image.BICUBIC):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def __call__(self, image):
image = image.resize(self.size, self.interpolation)
image = self.... |
class DMA_gather_reg(atomic_reg):
OP_NAME = 'DMA_gather'
_fields_ = [('intr_en', ctypes.c_uint64, 1), ('stride_enable', ctypes.c_uint64, 1), ('nchw_copy', ctypes.c_uint64, 1), ('cmd_short', ctypes.c_uint64, 1), ('decompress_enable', ctypes.c_uint64, 1), ('cmd_id_en', ctypes.c_uint64, 4), ('cmd_id', ctypes.c_uin... |
class BaseContrastEncoder(util.BaseEncoder, util.UnsupervisedTransformerMixin):
prefit_ordinal = True
encoding_relation = util.EncodingRelation.ONE_TO_N_UNIQUE
def __init__(self, verbose=0, cols=None, mapping=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value'):
... |
def keyword_notor(A: dace.float32[N], B: dace.float32[N], C: dace.bool, D: dace.bool):
if ((not C) or D):
B[:] = A[:] |
def test_indexed_layout():
layout = ak.contents.IndexedArray(ak.index.Index64(np.arange(5)), ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float64)))
assert ak.almost_equal(ak.unflatten(layout, [3, 0, 2]), [[1.1, 2.2, 3.3], [], [4.4, 5.5]]) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('alpha', [0.0, (- 1.0), 1.0])
def test_sign_double_backward(seed, alpha, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
inputs = [(rng.randn(2, 3, 4).astype(np.float32) * 2)... |
class VGG(nn.Module):
def __init__(self, features, num_classes=10):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(512, 512), nn.ReLU(True), nn.Dropout(), nn.Linear(512, 512), nn.ReLU(True), nn.Linear(512, num_classes))
fo... |
def deconv2d_act(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), adj=(0, 0), no_bias=True, target_shape=None, act_type='relu', name='deconv2d', **kwargs):
deconv = deconv2d(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, adj=adj, target_shape=target_shape, no_bias=no_bias, name=... |
def test_setup_test_cluster_not_empty():
gen.set_configuration(configuration=MagicMock(type_inference=MagicMock(type_inference_strategy=config.TypeInferenceStrategy.TYPE_HINTS)))
with mock.patch('pynguin.generator.generate_test_cluster') as gen_mock:
tc = MagicMock()
tc.num_accessible_objects_un... |
class WebBrowserInputText(VirtualFunctionTool):
name = 'WebBrowserInputText'
summary = 'Inputs multiple text into specified input fields.'
parameters: List[ArgParameter] = [{'name': 'elements_and_texts', 'type': 'array', 'description': "A list of objects, each includes 'element_id' (string, the id of the in... |
def convert_deepsf_data_to_tfrecords(filenames: str, outfilenames: str, feature_dir: str, pssm_dir: str, fasta: str, vocab: Dict[(str, int)]):
serialize_with_vocab = partial(serialize_remote_homology_sequence, vocab=vocab)
class_to_int_label = {}
fold_to_int_label = {}
superfamily_to_int_label = {}
... |
class Trainer(object):
def __init__(self, args, task, model, criterion, dummy_batch=None, oom_batch=None):
self.args = args
self.task = task
self._criterion = criterion
self._model = model
self.cuda = (torch.cuda.is_available() and (not args.cpu))
if args.fp16:
... |
class webvision_dataloader():
def __init__(self, batch_size, num_class, num_workers, root_dir, distributed, crop_size=0.2):
self.batch_size = batch_size
self.num_class = num_class
self.num_workers = num_workers
self.root_dir = root_dir
self.distributed = distributed
s... |
def task_stats(tasks_file):
with open(tasks_file, 'r') as f:
tasks = json.load(f)
stats = {}
for selected_pipeline in ['early_combine', 'generate', 'atlas']:
stats[selected_pipeline] = {}
for selected_subset in ['head', 'tail', 'recent']:
pipeline_subset_task_count = 0
... |
def divide_dataset(val_ration=0.1):
test_set = []
val_set = []
train_set = []
train_path = os.path.join((Root + '/train'))
scenes = os.listdir(train_path)
for i_scene in scenes:
sub_files = os.listdir(os.path.join(train_path, (i_scene + '/img1')))
for i in sub_files:
... |
def get_file_sess(file_name):
sessions = []
with open(file_name) as f:
for line in f:
session = json.loads(line)
sessions.append(session)
return sessions |
class GmailAddOrUpdateContact(VirtualFunctionTool):
name = 'GmailAddOrUpdateContact'
summary = "Add a new contact to the contact list or update an existing contact's information."
parameters: List[ArgParameter] = [{'name': 'contact_id', 'type': 'string', 'description': 'The unique identifier of the contact.... |
def fix_seed(seed=None):
if (seed is None):
seed = time.time()
seed = int(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
return seed |
.parametrize('shadow_model_fn', [torch_shadow_model_fn])
def test_prepare_attack_data(data, shadow_model_fn):
((X_train, y_train), (X_test, y_test)) = data
clf = shadow_model_fn()
clf.fit(X_train, y_train, epochs=3, verbose=False)
(X_attack, y_attack) = prepare_attack_data(clf, (X_train[:100], y_train[:... |
def gemm_kernel(alpha: dc.float64, beta: dc.float64, C: dc.float64[(NI, NJ)], A: dc.float64[(NI, NK)], B: dc.float64[(NK, NJ)]):
C[:] = (((alpha * A) B) + (beta * C)) |
def parse_args():
parser = ArgumentParser(description='PyTorch distributed training launch helper utility that will spawn up multiple distributed processes')
parser.add_argument('--nnodes', type=int, default=1, help='The number of nodes to use for distributed training')
parser.add_argument('--node_rank', ty... |
def changeAltTwoPathsTD(G, i, j):
return (0.5 * (changeAltTwoPathsT(G, i, j) + changeAltTwoPathsD(G, i, j))) |
def CalculateTotalAbsoulteCharge(mol):
Hmol = Chem.AddHs(mol)
GMCharge.ComputeGasteigerCharges(Hmol, iter_step)
res = []
for atom in Hmol.GetAtoms():
res.append(float(atom.GetProp('_GasteigerCharge')))
if (res == []):
return 0
else:
cc = numpy.array(res, 'd')
retu... |
def single_packet_loop(r_packet, numba_radial_1d_geometry, numba_model, opacity_state, estimators, vpacket_collection, rpacket_tracker):
line_interaction_type = montecarlo_configuration.line_interaction_type
if montecarlo_configuration.full_relativity:
set_packet_props_full_relativity(r_packet, numba_mo... |
class TFAlbertPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def _test_mesh_for(cell_reorder=False, vert_reorder=False, extra_tests=True):
mesh_builder = ti.lang.mesh._TetMesh()
mesh_builder.verts.place({'t': ti.i32}, reorder=vert_reorder)
mesh_builder.cells.place({'t': ti.i32}, reorder=cell_reorder)
model = mesh_builder.build(ti.Mesh.load_meta(model_file_path))
... |
.skip(reason='Requires production')
def test_prod_continue():
prompt = 'Paris is the capital of'
for model_deployment_name in prod_model_deployments:
model_deployment: ModelDeployment = get_model_deployment(model_deployment_name)
model_name: str = (model_deployment.model_name or model_deployment... |
class ETSDetectorConfig(ETSConfig, NoCalibrationDetectorConfig):
_default_threshold = AggregateAlarms(alm_threshold=3.0) |
def _datacopied(arr, original):
if (arr is original):
return False
if ((not isinstance(original, np.ndarray)) and hasattr(original, '__array__')):
return False
return (arr.base is None) |
_optimizer('adam_w_skip_params_with_zero_grad')
class AdamWSkipParamsWithZeroGrad(AdamW):
def step(self, closure: Callable=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.gra... |
class _BuiltinOverride(object):
def __init__(self, py_name, args, ret_type, cname, py_equiv='*', utility_code=None, sig=None, func_type=None, is_strict_signature=False, builtin_return_type=None):
(self.py_name, self.cname, self.py_equiv) = (py_name, cname, py_equiv)
(self.args, self.ret_type) = (arg... |
def read_features_from_row(row, select_cols, feature_column_names, feature_metas, is_xgboost=False):
features = []
for name in feature_column_names:
feature = read_feature(row[select_cols.index(name)], feature_metas[name], name, is_xgboost)
features.append(feature)
return tuple(features) |
def register_Ns3Simulator_methods(root_module, cls):
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
cls.add_method('Cancel', 'void', [param('ns3::EventId const &', 'id')], is_static=True)
cls.add_method('Destroy', 'void', [], is_static=True)
cls.add_method('GetContext', 'uint32_t', [], i... |
def test_python_min1():
def python_min1(a: dace.int64):
return min(a)
for _ in range(100):
a = random.randint((- 10), 10)
assert (python_min1(a)[0] == a) |
def accuracy_topk_subselected(logits, targets):
targets = torch.tensor([class_sublist_1_8.index(x) for x in targets])
return accuracy_topk(logits, targets) |
def _prepare_state(state: EnvironmentState, script: Script, name_equivalence, object_placing, properties_data):
state_classes = {n.class_name for n in state.get_nodes()}
script_classes = {so.name for sl in script for so in sl.parameters}
missing_classes = set()
for sc in script_classes:
if ((sc ... |
((not workspace.C.use_mkldnn), 'No MKLDNN support.')
class PoolTest(hu.HypothesisTestCase):
(stride=st.integers(1, 3), pad=st.integers(0, 3), kernel=st.integers(3, 5), size=st.integers(7, 9), input_channels=st.integers(1, 3), batch_size=st.integers(1, 3), method=st.sampled_from(['MaxPool', 'AveragePool']), **mu.gcs... |
class attentive_node_features(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.transform = nn.Linear(hidden_size, hidden_size)
def forward(self, features, lengths, nodal_att_type):
if (nodal_att_type == None):
return features
batch_size = features.... |
.parametrize('function', ['public_function'])
def test_get_function_description(comments_tree, function):
descriptions = get_function_description(get_function_node_from_ast(comments_tree, function))
assert (descriptions.name == function) |
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
model = get_model(args)
patch_size = model.encoder.patch_embed.patch_size
pri... |
def list2card_str(hand_list):
card_str = ''
cards = [card for card in INDEX]
for (index, count) in enumerate(hand_list):
card_str += (cards[index] * count)
return card_str |
def clip1d_kmeans(x, num_bits=8, n_jobs=(- 1)):
orig_shape = x.shape
x = np.expand_dims(x.flatten(), (- 1))
kmeans = KMeans(n_clusters=(2 ** num_bits), random_state=0)
kmeans.fit(x)
x = np.clip(x, kmeans.cluster_centers_.min(), kmeans.cluster_centers_.max())
return x.reshape(orig_shape) |
def load_pickle(path):
try:
with open(str(path), 'rb') as f:
data = pickle.load(f)
except EOFError as e:
raise EOFError('Ran out of Input: (file path: {}), (msg: {})'.format(path, e))
return data |
class ResnetBlock(nn.Module):
def __init__(self, fin, fout, actvn, fhidden=None, is_bias=True):
super().__init__()
self.actvn = actvn
self.is_bias = is_bias
self.learned_shortcut = (fin != fout)
self.fin = fin
self.fout = fout
if (fhidden is None):
... |
def get_config(use_cmd_config=True):
config = _read_config()
if use_cmd_config:
config = argument_parser(config)
if (config[GENERAL][BASE_PATH] == ''):
base_path = os.getcwd().split('/SelfPlay')[0]
config[GENERAL][BASE_PATH] = base_path
if (config[GENERAL][DEVICE] == ''):
... |
class BUDUDmat(SpectralMatrix):
def assemble(self, method):
(test, trial) = (self.testfunction, self.trialfunction)
assert isinstance(test[0], UD)
assert isinstance(trial[0], UD)
d0 = get_norm_sq(test[0], trial[0], method)
d = {0: (d0[:(- 1)] + d0[1:]), (- 1): (- d0[1:(- 1)])... |
def run_dynappo_mutative(landscape, wt, problem_name, start_num):
def make_explorer(model, ss):
return baselines.explorers.DynaPPOMutative(model=model, landscape=landscape, rounds=10, starting_sequence=wt, sequences_batch_size=sequences_batch_size, model_queries_per_batch=model_queries_per_batch, num_experi... |
def plotHeatmap():
modelname = GetModelAndOptNames()
FLAGS = args.getFlag(modelname)
file_comment = config.file_comment
years_train = np.arange(1998, 2014)
years_val = np.arange(2014, 2016)
years_test = np.arange(2016, 2018)
ipaths_train = [((((global_macros.TF_DATA_DIRECTORY + '/tf_') + str... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.