code stringlengths 101 5.91M |
|---|
class Segmentation_model(nn.Module):
def __init__(self, filters=32, in_channels=3, n_block=4, bottleneck_depth=4, n_class=4, feature_dis=False):
super().__init__()
self.encoder = Encoder(filters=filters, in_channels=in_channels, n_block=n_block)
self.bottleneck = Bottleneck(filters=filters, ... |
.expansion
class ExpandGerFpga(ExpandTransformation):
environments = []
def expansion(node, state, sdfg, m=None, n=None, tile_size_x=None, tile_size_y=None):
(desc_a_in, desc_x, desc_y) = node.validate(sdfg, state)
desc_a_out = None
for e in state.out_edges(node):
if (e.src_c... |
_utils.test(arch=ti.cpu)
def test_fields_builder_numpy_dimension():
shape = np.int32(5)
fb = ti.FieldsBuilder()
x = ti.field(ti.f32)
y = ti.field(ti.i32)
fb.dense(ti.i, shape).place(x)
fb.pointer(ti.j, shape).place(y)
fb.finalize() |
class MultibankConcatGlobalAttention(nn.Module):
def __init__(self, dim, memory_dims, coverage=False, attn_type='dot'):
super(MultibankConcatGlobalAttention, self).__init__()
self.attentions = nn.ModuleList([GlobalAttention(d, coverage, attn_type) for d in memory_dims])
self.proj = nn.Linear... |
def get_project_data(name):
url = ('%s/%s/project.json' % (name[0].upper(), name))
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result |
class TXTDataset(torch.utils.data.Dataset):
def __init__(self, args, split):
assert (split in {'train', 'val', 'test'}), "Split '{}' not supported for dataset".format(split)
self.args = args
self._split = split
self.data_dir = args.data_dir
self._construct_imdb()
self... |
class inputs_states_t(object):
__slots__ = ['p']
def __init__(self, p=None, _skip_initialize=False):
if _skip_initialize:
return
self.p = (Vector2d._default() if (p is None) else p)
def from_all_fields(p):
return inputs_states_t(p=p)
def _skytype_meta():
retur... |
class AbstractVectorGroupOps(AbstractStorageOps[ElementT]):
def identity(cls, a: ElementOrTypeT) -> ElementT:
return cls.from_storage(a, ([0] * cls.storage_dim(a)))
def compose(cls, a: ElementT, b: ElementT) -> ElementT:
if (cls.storage_dim(a) != cls.storage_dim(b)):
raise ValueError... |
def layer(op):
def layer_decorated(self, *args, **kwargs):
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
if (len(self.terminals) == 0):
raise RuntimeError(('No input variables found for layer %s.' % name))
elif (len(self.terminals) == 1):
layer_i... |
def test_method_statement_no_args(test_case_mock, variable_reference_mock, method_mock):
statement = stmt.MethodStatement(test_case_mock, method_mock, variable_reference_mock)
assert (statement.args == {}) |
def filter_embeddings(embedding_files):
sys.stderr.write('\nReading FrameNet {} vocabulary...\n'.format(VERSION))
vocab = set([])
corpora = [DEV_CONLL, TRAIN_FTE, TRAIN_EXEMPLAR, TEST_CONLL]
for corpus in corpora:
with codecs.open(corpus, 'r', 'utf-8') as cf:
tokens = [line.split('\t... |
def run_tardis(config, atom_data=None, packet_source=None, simulation_callbacks=[], virtual_packet_logging=False, show_convergence_plots=False, log_level=None, specific_log_level=None, show_progress_bars=True, **kwargs):
from tardis.io.logger.logger import logging_state
from tardis.io.configuration.config_reade... |
def get_torch_home():
torch_home = os.path.expanduser(os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
return torch_home |
class PhraseTokenizer(WordTokenizer):
def __init__(self, vocab: Iterable[str]=[], stop_words: Iterable[str]=ENGLISH_STOP_WORDS, do_lower_case: bool=False, ngram_separator: str='_', max_ngram_length: int=5):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separ... |
class SpanMetric(Metric):
DELETE = {'TOP', 'S1', '-NONE-', ',', ':', '``', "''", '.', '?', '!', ''}
EQUAL = {'ADVP': 'PRT'}
def __init__(self, cfg, fields):
super().__init__()
self.cfg = cfg
self.fields = fields
self.vocab = fields.get_vocab('chart')
self.add_state('n... |
def _seg_25():
return [(11302, 'M', u''), (11303, 'M', u''), (11304, 'M', u''), (11305, 'M', u''), (11306, 'M', u''), (11307, 'M', u''), (11308, 'M', u''), (11309, 'M', u''), (11310, 'M', u''), (11311, 'X'), (11312, 'V'), (11359, 'X'), (11360, 'M', u''), (11361, 'V'), (11362, 'M', u''), (11363, 'M', u''), (11364, '... |
def R9A():
E = 'abcdefghi'
CC = {3: ['abde', 'bcdf', 'aceg', 'abch', 'aefh', 'adgh', 'acdi', 'abfi', 'defi', 'begi', 'bdhi', 'cehi', 'fghi'], 4: [E]}
M = CircuitClosuresMatroid(groundset=E, circuit_closures=CC)
M.rename(('R9A: ' + repr(M)))
return M |
def p_if_clause(s):
pos = s.position()
test = p_test(s)
body = p_suite(s)
return Nodes.IfClauseNode(pos, condition=test, body=body) |
class VideoModelStem(nn.Module):
def __init__(self, dim_in, dim_out, kernel, stride, padding, inplace_relu=True, eps=1e-05, bn_mmt=0.1):
super(VideoModelStem, self).__init__()
assert (len({len(dim_in), len(dim_out), len(kernel), len(stride), len(padding)}) == 1), 'Input pathway dimensions are not co... |
def gosimple(**kwargs):
assert_no_java('no java when starting')
p = CoreNLP('ssplit', **kwargs)
ret = p.parse_doc('Hello world.')
assert (len(ret['sentences']) == 1)
assert (u' '.join(ret['sentences'][0]['tokens']) == u'Hello world .')
p.kill_proc_if_running()
assert_no_java() |
.gpu
def test_bn_cudnn(sdfg_name):
with change_default(donnx.ONNXBatchNormalization, 'cuDNN'):
torch_bn = nn.BatchNorm2d(3).cuda()
dace_bn = DaceModule(nn.BatchNorm2d(3).cuda(), backward=True, training=True)
with torch.no_grad():
dace_inputs = torch.rand(8, 3, 224, 224).cuda()
... |
def is_incomplete_argument(current_params, cmd_param):
if (not isinstance(cmd_param, Argument)):
return False
current_param_values = current_params[cmd_param.name]
if (current_param_values is None):
return True
if (cmd_param.nargs == (- 1)):
return True
if (isinstance(current... |
def main():
opt = get_opt()
print(opt)
print(('Start to test stage: %s, named: %s!' % (opt.stage, opt.name)))
train_dataset = CPDataset(opt)
train_loader = CPDataLoader(opt, train_dataset)
if (not os.path.exists(opt.tensorboard_dir)):
os.makedirs(opt.tensorboard_dir)
board = SummaryW... |
def _impl(arrays, depth_limit, broadcast_parameters_rule, left_broadcast, right_broadcast, highlevel, behavior, attrs):
if (len(arrays) == 0):
return []
backend = backend_of(*arrays, default=cpu)
inputs = [ak.operations.to_layout(x, allow_record=True, allow_unknown=True, primitive_policy='promote', ... |
class AlproVideoRetrievalDataset(AlproBaseDataset):
def __init__(self, datalist, tokenizer, img_lmdb_dir, fps=3, num_frm=3, frm_sampling_strategy='rand', max_img_size=1000, max_txt_len=40, itm_neg_size=1, ensemble_n_clips=1, random_sample_clips=True, video_fmt='.mp4', img_db_type='lmdb', is_train=False):
su... |
def plot_prior_BN_limit(prior):
df = check_prior_BN_limit(prior)
(fig, axs) = plt.subplots(1, 3, figsize=(12, 4), sharex=True)
axs[0].plot(df['mx_hat'], df['A_BN'], '-', label='$A \\quad BN$')
axs[0].plot(df['mx_hat'], df['A_FG'], '--', label='$A \\quad FG$')
axs[0].set(xlabel='$\\widehat{m}_x^-$')
... |
('model_deep_moji')
class DeepMojiModel(Model):
def __init__(self, vocab: Vocabulary, emb_size: int, hidden_size: int) -> None:
super().__init__(vocab)
self.emb_size = emb_size
layers = []
layers.append(nn.Linear(self.emb_size, hidden_size))
layers.append(nn.Tanh())
l... |
def pressure_step():
divergence(velocities_pair.cur, divField)
pressure_solve(pressure_pair, divField)
pressure_projection(pressure_pair.cur, velocities_pair.cur, velocities_pair.nxt)
velocities_pair.swap()
apply_vel_bc(velocities_pair.cur) |
class RandAugment():
def __init__(self, ops, num_layers=2, choice_weights=None):
self.ops = ops
self.num_layers = num_layers
self.choice_weights = choice_weights
def __call__(self, img):
ops = np.random.choice(self.ops, self.num_layers, replace=(self.choice_weights is None), p=se... |
def patch_instances(fields):
with tempfile.TemporaryDirectory(prefix='detectron2') as dir, tempfile.NamedTemporaryFile(mode='w', encoding='utf-8', suffix='.py', dir=dir, delete=False) as f:
try:
_clear_jit_cache()
(cls_name, s) = _gen_instance_module(fields)
f.write(s)
... |
def _key_to_hand(key: PRNGKey) -> Array:
def _convert_quat(j):
shifts = jnp.arange(24, (- 1), step=(- 2))
quat_digits = ((j >> shifts) & 3)
return quat_digits
cards = jax.vmap(_convert_quat)(key).flatten()
hand = jnp.zeros((4, 13), dtype=jnp.int32)
def loop_j(j, val):
(i,... |
.torch
def test_can_get_sequence(sequential_info):
sequential_dataset = PandasSequentialDataset(**sequential_info)
def compare_sequence(index: int, feature_name: str, expected: List[int]):
assert (sequential_dataset.get_sequence(index, feature_name) == np.array(expected)).all()
compare_sequence(0, '... |
class TimeDistributed(nn.Module):
def __init__(self, module, batch_first=False):
super(TimeDistributed, self).__init__()
self.module = module
self.batch_first = batch_first
def forward(self, x):
if (len(x.size()) <= 2):
return self.module(x)
x_reshape = x.cont... |
def make_miniimagenet_cnn_model(num_output_classes, multi_headed=False, num_heads=(- 1), input_shape=(84, 84, 3)):
inputs = tf.keras.layers.Input(shape=input_shape, dtype=tf.float32)
x = inputs
for i in range(4):
x = Conv2D(filters=32, kernel_size=3, strides=1, padding='same', activation=None)(x)
... |
def files_to_videoTensor(path, downscale=1.0):
from PIL import Image
files = sorted(os.listdir(path))
print(len(files))
images = [torch.Tensor(np.asarray(Image.open(os.path.join(input_video, f)))).type(torch.uint8) for f in files]
print(images[0].shape)
videoTensor = torch.stack(images)
retu... |
def base_axis_1_reshape_without_neg_1(x):
h = PF.convolution(x, 3, (3, 3), pad=(0, 0), name='c1', base_axis=1)
y = F.reshape(h, shape=(1, 18, 6))
return y |
def get_optimizer(args, optimizer_cls, parameters):
assert isinstance(parameters, list)
if (len(parameters) == 0):
if (not getattr(args, 'allow_stateless', False)):
raise ValueError(f'Got stateless partition {args.stage}, if this is wanter, set "allow_stateless": true')
else:
... |
class NodeType(type):
def __new__(mcs, name, bases, d):
for attr in ('fields', 'attributes'):
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert (len(bases) == 1), 'multiple inheritance not allowed'
asse... |
_method
class Set_object(Set_generic, Set_base, Set_boolean_operators, Set_add_sub_operators):
def __init__(self, X, category=None):
from sage.rings.integer import is_Integer
if (isinstance(X, int) or is_Integer(X)):
raise ValueError('underlying object cannot be an integer')
if (... |
class AlexnetModel(model.Model):
def __init__(self):
super(AlexnetModel, self).__init__('alexnet', (224 + 3), 512, 0.005)
def add_inference(self, cnn):
cnn.conv(64, 11, 11, 4, 4, 'VALID')
cnn.mpool(3, 3, 2, 2)
cnn.conv(192, 5, 5)
cnn.mpool(3, 3, 2, 2)
cnn.conv(384... |
def test_memory(group, batch_size, sequence_length, hidden_size, ffn_hidden_size, num_experts, top_k):
args = arguments.Arguments(hidden_size=hidden_size, ffn_hidden_size=ffn_hidden_size, moe_num_experts=num_experts, moe_top_k=top_k, moe_expert_model_parallelism=True, expert_parallel_group=group, fp16=False, bf16=T... |
class BB84MsgType(Enum):
BEGIN_PHOTON_PULSE = auto()
RECEIVED_QUBITS = auto()
BASIS_LIST = auto()
MATCHING_INDICES = auto() |
class GaussianEnsemble(Ensemble):
def __init__(self, M, N):
self.M = M
self.N = N
self.repr_init()
def generate(self):
sigma_x = (1 / np.sqrt(self.N))
X = (sigma_x * np.random.randn(self.M, self.N))
return X |
class RemoteTool(Tool):
def __init__(self, endpoint_url=None, token=None, tool_class=None):
self.endpoint_url = endpoint_url
self.client = EndpointClient(endpoint_url, token=token)
self.tool_class = tool_class
def prepare_inputs(self, *args, **kwargs):
inputs = kwargs.copy()
... |
class QuantifierRef(BoolRef):
def as_ast(self):
return self.ast
def get_id(self):
return Z3_get_ast_id(self.ctx_ref(), self.as_ast())
def sort(self):
if self.is_lambda():
return _sort(self.ctx, self.as_ast())
return BoolSort(self.ctx)
def is_forall(self):
... |
def typeset_examples(term_class, term_use):
to_shorter_name = (lambda st: '.'.join([s[:3] for s in st.split('-')[(- 1)].split('_')]))
link_list = [(link_example % (to_shorter_name(exmpl), exmpl)) for exmpl in term_use[term_class.name]]
return ', '.join(link_list) |
def make_loss(cfg, num_classes):
sampler = cfg.DATALOADER.SAMPLER
if (cfg.MODEL.METRIC_LOSS_TYPE == 'triplet'):
triplet = TripletLoss(cfg.SOLVER.MARGIN)
else:
print('expected METRIC_LOSS_TYPE should be tripletbut got {}'.format(cfg.MODEL.METRIC_LOSS_TYPE))
if (cfg.MODEL.IF_LABELSMOOTH ==... |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_ro_cui(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_re... |
def remove_identifier(root, mark='"identifier=', replacement='$ID'):
if (mark in root.label):
root.label = replacement
for child in root.children:
remove_identifier(child)
return root |
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
print('Computing the redirect map')
redirects = get_redirects(redirects_filename)
print('Computing the integer index map')
index_map = dict()
links = list()
for (l, line) in enumerate(BZ2File(page_links_filename)):
... |
class Variables():
def __init__(self):
self.variables = core.Variables()
def from_core(variables):
new_variables = Variables()
new_variables.variables = variables
return new_variables
def get_core(self):
return self.variables
def append(self, variables):
s... |
def achieve_answer(query, ent_in, ent_out):
assert (type(query[(- 1)]) == list)
all_relation_flag = True
for ele in query[(- 1)]:
if ((type(ele) != int) or (ele == (- 1))):
all_relation_flag = False
break
if all_relation_flag:
if (type(query[0]) == int):
... |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/depparse', help='Root dir for saving models.')
parser.add_argument('--wordvec_dir', type=str, default='extern_data/word2vec', help='Directory of word vectors')
parser.add_argument('--train_file... |
def main():
generator = AtomicFactGenerator('api.key', 'demos', gpt3_cache_dir=None)
(atomic_facts, para_breaks) = generator.run("Thierry Henry (born 17 August 1977) is a French professional football coach, pundit, and former player. He is considered one of the greatest strikers of all time, and one the greates... |
class BNLayerInfoCollectionTest(BasePytorchTest):
def __init__(self, unit_test):
super().__init__(unit_test)
self.val_batch_size = 1
def create_inputs_shape(self):
return [[self.val_batch_size, 3, 32, 32]]
def generate_inputs(input_shapes):
return to_torch_tensor([torch.randn... |
class TestLazyPythonConfig(unittest.TestCase):
def setUp(self):
self.root_filename = os.path.join(os.path.dirname(__file__), 'root_cfg.py')
def test_load(self):
cfg = LazyConfig.load(self.root_filename)
self.assertEqual(cfg.dir1a_dict.a, 'modified')
self.assertEqual(cfg.dir1b_dic... |
_operation
def mult_real_cplx(a: torch.Tensor, b: torch.Tensor):
if is_real(b):
raise ValueError('Last dimension must have length 2.')
return (a.unsqueeze((- 1)) * b) |
class FeedableExample(Feedable):
def __init__(self):
x = tf.placeholder(tf.float32, shape=[], name='x')
y = tf.get_variable('y', shape=[], initializer=tf.constant_initializer(2.0))
z = (x * y)
self.x = x
self.y = y
self.z = z
def inputs_to_feed_dict(self, batch):
... |
class ResT(nn.Module):
def __init__(self, model_name: str='S', pretrained: str=None, num_classes: int=1000, *args, **kwargs) -> None:
super().__init__()
assert (model_name in rest_settings.keys()), f'ResT model name should be in {list(rest_settings.keys())}'
(embed_dims, depths, drop_path_ra... |
class DLRepVerifier(Verifier):
def check_responses_consistency(self, responses, responses_dict=None):
if (responses_dict is None):
responses_dict = {}
for (i, s) in enumerate(self.stmt.secret_vars):
if (s in responses_dict.keys()):
if (responses[i] != response... |
def _get_required(element: Element, path: str) -> Element:
elem = element.find(path)
if (elem is None):
raise MuseScoreError(f"Element `{path}` is required for an '{element.tag}' element.")
return elem |
def compute_mean_vector(category_index, save_path, featurefilepath):
featurefile_list = os.listdir(os.path.join(featurefilepath, category_index))
correct_features = []
for featurefile in featurefile_list:
feature = torch.from_numpy(np.load(os.path.join(featurefilepath, folder_name, featurefile)))
... |
def pre_extract_audio_embedding(framework, text_type, text_rep):
ecals_test = torch.load(f'../mtr/{framework}/exp/transformer_cnn_cf_mel/{text_type}_{text_rep}/audio_embs.pt')
msdid = [k for k in ecals_test.keys()]
audio_embs = [ecals_test[k] for k in msdid]
audio_embs = torch.stack(audio_embs)
retu... |
def init_backend_engine():
if config.value('PYTORCH_CUDA_ALLOC_CONF', None):
value = config.value('PYTORCH_CUDA_ALLOC_CONF', '')
print(f'Set PYTORCH_CUDA_ALLOC_CONF={value!r}.')
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = value
BackendEngine.select_engine(config=config)
if BackendEngine.... |
def _interpolate(name, dim, interpolate_mode):
def symbolic_fn(g, input, output_size, *args):
(scales, align_corners) = sym_help._get_interpolate_attributes(g, interpolate_mode, args)
sym_help._interpolate_warning(interpolate_mode)
align_corners = sym_help._maybe_get_scalar(align_corners)
... |
def _segments_to_sequence_example(segments, labels):
raw_segments = [segment.tostring() for segment in segments]
raw_labels = np.array(labels, dtype=np.uint8).tostring()
context = tf.train.Features(feature={'raw_labels': _bytes_feature(raw_labels)})
feature_lists = tf.train.FeatureLists(feature_list={'r... |
def _getNewick(node, newick, parentdist, leaf_names):
if node.is_leaf():
return ('%s:%.2f%s' % (leaf_names[node.id], (parentdist - node.dist), newick))
else:
if (len(newick) > 0):
newick = ('):%.2f%s' % ((parentdist - node.dist), newick))
else:
newick = ');'
... |
class ModulatedDeformConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, bias=True):
super(ModulatedDeformConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
sel... |
def get_indent(code):
lines = code.split('\n')
idx = 0
while ((idx < len(lines)) and (len(lines[idx]) == 0)):
idx += 1
if (idx < len(lines)):
return re.search('^(\\s*)\\S', lines[idx]).groups()[0]
return '' |
class ShapeSeg(InMemoryDataset):
mit_folders = {'crane': 18, 'squat1': 25, 'jumping': 15, 'squat2': 25, 'bouncing': 18, 'march1': 25, 'handstand': 18, 'march2': 25}
url = '
def __init__(self, root, train=True, transform=None, pre_transform=None, pre_filter=None):
super(ShapeSeg, self).__init__(root,... |
def get_type(attributes):
pos = attributes['pos'][attributes['head_index']]
head_ner = attributes['ner'][attributes['head_index']]
if pos.startswith('NNP'):
return 'NAM'
elif (head_ner != 'NONE'):
return 'NAM'
elif pos.startswith('PRP'):
return 'PRO'
elif pos.startswith('... |
def clean_nl_brin(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame:
if (output_format not in {'compact', 'standard'}):
raise ValueError(f'output_format {output_format} is invalid. It needs to ... |
class Llama2HF(AbstractLanguageModel):
def __init__(self, config_path: str='', model_name: str='llama7b-hf', cache: bool=False) -> None:
super().__init__(config_path, model_name, cache)
self.config: Dict = self.config[model_name]
self.model_id: str = self.config['model_id']
self.prom... |
def make_env_fn_alfred(args, scene_names, rank):
env = Sem_Exp_Env_Agent_Thor(args, scene_names, rank)
return env |
class jitter(object):
def __init__(self, std=0.01, clip=0.02):
self.std = std
self.clip = clip
def __call__(self, pointcloud):
(N, C) = pointcloud.shape
pointcloud += np.clip((self.std * np.random.randn(N, C)), ((- 1) * self.clip), self.clip)
return pointcloud |
def is_multibank_array_with_distributed_index(array: dt.Data):
if is_multibank_array(array):
res = parse_location_bank(array)
(low, high) = get_multibank_ranges_from_subset(res[1], None)
return (((high - low) > 1) or ((len(array.shape) > 1) and (str(array.shape[0]) == '1')))
else:
... |
def batch_to_device(batch, target_device: device):
features = batch['features']
for paired_sentence_idx in range(len(features)):
for feature_name in features[paired_sentence_idx]:
features[paired_sentence_idx][feature_name] = features[paired_sentence_idx][feature_name].to(target_device)
... |
.parametrize('gru_type', ['GRU', 'AIGRU', 'AGRU'])
def test_DIEN(gru_type):
if (version.parse(tf.__version__) >= version.parse('2.0.0')):
tf.compat.v1.disable_eager_execution()
return
model_name = ('DIEN_' + gru_type)
(x, y, feature_columns, behavior_feature_list) = get_xy_fd(hash_flag=True)... |
def ocp(state_forms, bcs_list, J, states, controls, adjoints, config_ocp):
return cashocs.OptimalControlProblem(state_forms, bcs_list, J, states, controls, adjoints, config=config_ocp) |
class KPI():
def __init__(self, weights_memory: float=np.inf, activation_memory: float=np.inf, total_memory: float=np.inf, bops: float=np.inf):
self.weights_memory = weights_memory
self.activation_memory = activation_memory
self.total_memory = total_memory
self.bops = bops
def __... |
class CrystalOfLSPaths(UniqueRepresentation, Parent):
def __classcall_private__(cls, starting_weight, cartan_type=None, starting_weight_parent=None):
if (cartan_type is not None):
(cartan_type, starting_weight) = (CartanType(starting_weight), cartan_type)
extended = cartan_type.is_af... |
class SparqlEngine():
gs1 = None
PRED_INSTANCE = 'pred:instance_of'
PRED_NAME = 'pred:name'
PRED_VALUE = 'pred:value'
PRED_UNIT = 'pred:unit'
PRED_YEAR = 'pred:year'
PRED_DATE = 'pred:date'
PRED_FACT_H = 'pred:fact_h'
PRED_FACT_R = 'pred:fact_r'
PRED_FACT_T = 'pred:fact_t'
SP... |
def validate_query_distinction_local(previous_queries, query):
if (previous_queries == []):
return True
if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8):
return False
return True |
class GroupNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, num_groups):
self.X = ((torch.rand(*dims) - 0.5) * 256)
self.num_groups = num_groups
num_channels = dims[1]
self.weight = torch.rand(num_channels, dtype=torch.float)
self.bias = torch.rand(num_channel... |
def mask(words):
L = bitmask(words, 'L')
H = bitmask(words, 'H')
V = ((~ (L | H)) & 65535)
a = (L & (H >> 1))
b = (a << 1)
c = ((V | a) | b)
return c |
class Args(Tap):
data_path: str
save_dir: str
smiles_column: str = None
split_type: Literal[('random', 'scaffold_balanced')] = 'random'
split_sizes: Tuple[(int, int, int)] = (0.8, 0.1, 0.1)
seed: int = 0 |
def create_g2p_gt_map(words, pronunciations):
g2p_gt_map = {}
for (word, pronunciation) in zip(words, pronunciations):
if (word in g2p_gt_map):
g2p_gt_map[word].append(pronunciation)
else:
g2p_gt_map[word] = [pronunciation]
return g2p_gt_map |
class _Text(Doc):
__slots__ = ('text',)
def __init__(self, text):
assert ('\n' not in text)
self.text = text
def send_to(self, out, indent):
out.send((Doc.Text, self.text))
def __nonzero__(self):
return bool(self.text) |
def test_graphsage_constructor_passing_aggregator():
gs = GraphSAGE(layer_sizes=[4], n_samples=[2], input_dim=2, multiplicity=1, aggregator=MeanAggregator)
assert (gs.dims == [2, 4])
assert (gs.n_samples == [2])
assert (gs.max_hops == 1)
assert gs.bias
assert (len(gs._aggs) == 1)
with pytest... |
class XmlControl(XmlElem):
tag = 'control'
class Meta():
typ = XmlAttr('type', Choice('force', 'torque'), required=True)
body = XmlAttr('body', String(), help='name of the body to apply force on')
bodies = XmlAttr('bodies', List(String()), help='names of the bodies to apply force on')
... |
def user_simulate_passage(dlg_history, title_and_passage, user_character, user_engine, user_temperature):
(title, passage) = title_and_passage
return llm_generate(template_file='benchmark/prompts/user_with_passage.prompt', prompt_parameter_values={'dlg': dlg_history, 'title': title, 'passage': passage, 'user_ch... |
class _MultipleMatch(ParseElementEnhance):
def __init__(self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = ParserElement._literalStringClass(ender)
self.not_ender = (... |
def joint_prob2pianoroll48(probs):
chord_list = [[48, 55, 64], [48, 55, 63], [48, 56, 64], [48, 54, 63], [49, 56, 65], [49, 56, 64], [49, 57, 65], [49, 55, 64], [50, 57, 66], [50, 57, 65], [50, 58, 66], [50, 56, 65], [51, 58, 67], [51, 58, 66], [51, 59, 67], [51, 57, 66], [52, 59, 68], [52, 59, 67], [52, 60, 68], [... |
def get_code_tokens(code, lang):
code = code.split('\n')
code_tokens = [(x + '\\n') for x in code if x]
return code_tokens |
def is_blacked_out_image(image_location: str) -> bool:
try:
import cv2
except ModuleNotFoundError as e:
handle_module_not_found_error(e, ['heim'])
if is_url(image_location):
arr = np.asarray(bytearray(urlopen(image_location).read()), dtype=np.uint8)
image = cv2.imdecode(arr, ... |
class PatchEmbedding(torch.nn.Module):
def __init__(self, cnn_temporal_kernels, cnn_spatial_kernels, cnn_temporal_kernelsize, cnn_spatial_kernelsize, cnn_poolsize, cnn_poolstride, cnn_pool_type, dropout, activation_type):
super().__init__()
if (activation_type == 'gelu'):
activation = to... |
class FC(tf.keras.layers.Layer):
_scope
def __init__(self, num_outputs, kernel_initializer=tf.keras.initializers.he_normal(), use_biases=True, biases_initializer=tf.keras.initializers.Zeros(), activation_fn=None, name='fc', trainable=True, **kwargs):
super(FC, self).__init__(name=name, **kwargs)
... |
class TestWith(JitTestCase):
def test_with_as(self):
global Context
.script
class Context(object):
def __init__(self, start: int):
self.count = torch.tensor([start], dtype=torch.double)
def __enter__(self):
self.count.add_(0.3)
... |
class BiEncoder(nn.Module):
def __init__(self, question_model: nn.Module, ctx_model: nn.Module, fix_q_encoder: bool=False, fix_ctx_encoder: bool=False):
super(BiEncoder, self).__init__()
self.question_model = question_model
self.ctx_model = ctx_model
self.fix_q_encoder = fix_q_encode... |
def test_config():
intervention = wn.opioid.Intervention(time=2021, nonmedical_incidence=(- 0.12))
config = wn.opioid.Config()
assert (config.nonmedical_incidence.intervention_val == 0.0)
config = config.update(intervention)
assert (config.nonmedical_incidence.intervention_val == (- 0.12))
inter... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.