code stringlengths 101 5.91M |
|---|
class BuiltInMethodProxy(object):
def __init__(self, ml_name, pyop_m_self):
self.ml_name = ml_name
self.pyop_m_self = pyop_m_self
def __repr__(self):
return ('<built-in method %s of %s object at remote 0x%x>' % (self.ml_name, self.pyop_m_self.safe_tp_name(), self.pyop_m_self.as_address()... |
class SupervisedDataset(Dataset):
def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer):
super(SupervisedDataset, self).__init__()
logging.warning('Loading data...')
list_data_dict = utils.jload(data_path)
logging.warning('Formatting inputs...')
(pro... |
def collect_mentions(words, y_p, tag_ind):
(e_span, e_spans) = ([], [])
for (w, pred) in zip(words, y_p):
if (pred == tag_ind):
e_span.append(w)
elif e_span:
e_spans.append(' '.join(e_span))
e_span = []
if e_span:
e_spans.append(' '.join(e_span))
... |
def test_gated_update():
h = GPUVariable(torch.FloatTensor([[1, 2, 3], [4, 5, 6]]))
h_new = GPUVariable(torch.FloatTensor([[(- 1), 2, 3], [4, 8, 0]]))
update = GPUVariable(torch.FloatTensor([[0], [1]]))
out = gated_update(h, h_new, update)
assert_tensor_equal(out, [[1, 2, 3], [4, 8, 0]]) |
def unique_word(tag):
(unique_tag, remove_dix) = ([], None)
token = tag.split()
for (idx, i) in enumerate(token):
if (len(i) == 1):
unique_tag.append((token[idx] + token[(idx + 1)]))
remove_dix = (idx + 1)
else:
unique_tag.append(i)
if remove_dix:
... |
(old_name='scipy.sparse.sparsetools', message='scipy.sparse.sparsetools is a private module for scipy.sparse, and should not be used.')
def _deprecated():
pass |
class FakeModel(flexs.Model):
def _fitness_function(self, sequences):
return rng.random(size=len(sequences))
def train(self, *args, **kwargs):
pass |
def condense_ner_labels(confusion, gold_labels, pred_labels):
new_confusion = defaultdict((lambda : defaultdict(int)))
new_gold_labels = []
new_pred_labels = []
for l1 in gold_labels:
if (l1.find('-') >= 0):
new_l1 = l1.split('-', 1)[1]
else:
new_l1 = l1
i... |
class DDNTemplate(nn.Module):
def __init__(self, constructor, feat_extract_layer, num_classes, pretrained_path=None, aux_loss=None):
super().__init__()
self.num_classes = num_classes
self.pretrained_path = pretrained_path
self.pretrained = (pretrained_path is not None)
self.a... |
def _tensor_str(self, indent):
if (self.numel() == 0):
return '[]'
if self.has_names():
self = self.rename(None)
summarize = (self.numel() > PRINT_OPTS.threshold)
if ((self.dtype is torch.float16) or (self.dtype is torch.bfloat16)):
self = self.float()
if self.dtype.is_comple... |
def run_prefetch(prefetch_queue, folder_name, prefix, num_batch, shuffle, id2name):
n_batch_prefetch = 0
fetch_order = np.arange(num_batch)
while True:
if ((n_batch_prefetch == 0) and shuffle):
fetch_order = np.random.permutation(num_batch)
batch_id = fetch_order[n_batch_prefetch... |
def test_single_best():
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
single_best = SingleBest(pool_classifiers)
single_best.fit(X_dsel, y_dsel)
assert np.isclose(single_best.score(X_test, y_test), 0.) |
def parse_string(astr, env, level, line):
lineno = ('#line %d\n' % line)
def replace(match):
name = match.group(1)
try:
val = env[name]
except KeyError:
msg = ('line %d: no definition of key "%s"' % (line, name))
raise ValueError(msg)
return va... |
def choose_holes(project_lines, comments):
data = {}
count = 0
repeated_holes = 0
chosen_lines = []
selected_lines = np.arange(0, len(project_lines))
for proj_line_id in selected_lines:
(file, file_line_id, line) = project_lines[proj_line_id]
line = line.strip()
if (line ... |
def get_incoming_requests(app):
if isinstance(app, Flask):
return app.config['incoming_requests']
return app['incoming_requests'] |
class Elliott_GoogLeNet(nn.Module):
def __init__(self):
super(Elliott_GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(nn.Conv2d(3, 192, kernel_size=3, padding=1), nn.BatchNorm2d(192), Elliott())
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 12... |
def module_build(parser: argparse.ArgumentParser):
parser.add_argument('SOURCE', help='Path to the Taichi program source (Python script).')
parser.add_argument('-o', '--output', type=str, help='Output module path.', default=None)
parser.set_defaults(func=module_build_impl) |
class DataTrainingArguments():
task_name: Optional[str] = field(default='ner', metadata={'help': 'The name of the task (ner, pos...).'})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = fie... |
def set_coef_d(variables, ir, ic, mode, pis, corrs_rs):
mode2var = {'row': 'u1_m', 'col': 'u2_m'}
val = (pis.states[(ir, ic)]['u_m'] + corrs_rs.states[(ir, ic)]['u_m'])
variables[mode2var[mode]].set_data(val) |
.utils.register_keras_serializable()
class Embedding(tf.keras.layers.Layer):
def __init__(self, field_dims, factors, kernel_initializer: Union[(Text, tf.keras.initializers.Initializer)]='truncated_normal', kernel_regularizer: Union[(Text, None, tf.keras.regularizers.Regularizer)]=None, **kwargs):
super().__... |
def register_Ns3LteRrcSapRrcConnectionReestablishmentComplete_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRrcSap::RrcConnectionReestablishmentComplete const &', 'arg0')])
cls.add_instance_attribute('rrcTransactionIdentifier', 'uint8_t', is_const=False)
return |
_model('causallm')
class CausalLMModel(BaseModel):
MODEL_DICT = 'configs/inference/causal_lm.yaml'
def __init__(self, model, model_config, tokenizer):
super().__init__()
self.model = model
self.tokenizer = tokenizer
self.max_prediction_length = model_config['max_prediction_length... |
def create_simple_unrolled_sdfg():
def ucopy(input: dace.float32[4], output: dace.float32[4]):
for i in dace.map[0:4]:
output[i] = input[i]
sdfg = ucopy.to_sdfg()
for node in sdfg.states()[0].nodes():
if isinstance(node, dace.sdfg.nodes.MapEntry):
node.schedule = dace... |
def run_interaction_loop(monkeypatch: pytest.MonkeyPatch, agent: Agent, cycle_count: int, challenge_name: str, level_to_run: int) -> None:
setup_mock_input(monkeypatch, cycle_count)
setup_mock_log_cycle_agent_name(monkeypatch, challenge_name, level_to_run)
with contextlib.suppress(SystemExit):
agent... |
def l1loss_double_backwards(ctx, ggI):
size_average = ctx.additional_args[0]
(input, target, grad_output) = ctx.saved_tensors
gI = torch.zeros_like(ggI)
positive_mask = (input > target).type_as(ggI)
negative_mask = (input < target).type_as(ggI)
ggO = (ggI * (positive_mask - negative_mask)).sum()... |
class SimulationFlag():
def __init__(self, data: np.ndarray):
self._data = data
assert (self._data.dtype == bool), self._data.dtype
def data(self) -> np.ndarray:
return self._data
def shape(self) -> Sequence[int]:
return self._data.shape
def data(self, value: np.ndarray):... |
_model('fconv_lm')
class FConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
def add_args(parser):
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--decoder-embed-dim', type=int, metav... |
class FilterBankConfig(AudioConfig):
transform_method: str = 'fbank'
n_mels: int = 80
freq_mask_para: int = 18 |
def visibleEdgeHelper(node_A, node_B, graph):
path = []
path.append(node_A)
for node_C in graph.get_nodes_into(node_A, Endpoint.ARROW):
if graph.is_parent_of(node_C, node_A):
return True
if visibleEdgeHelperVisit(graph, node_C, node_A, node_B, path):
return True
r... |
class _TestPythranFunc():
ALL_INTEGER = [np.int8, np.int16, np.int32, np.int64, np.intc, np.intp]
ALL_FLOAT = [np.float32, np.float64]
ALL_COMPLEX = [np.complex64, np.complex128]
def setup_method(self):
self.arguments = {}
self.partialfunc = None
self.expected = None
def get_... |
class DeployDataset(TextDataset):
def __init__(self, image_root, transform=None):
super().__init__(transform)
self.image_root = image_root
self.image_list = os.listdir(image_root)
def __getitem__(self, item):
image_id = self.image_list[item]
image_path = os.path.join(self... |
def ShowPlots(subplot=False):
for (log_ind, path) in enumerate(FLAGS.path.split(':')):
log = Log(path)
if subplot:
plt.subplot(len(FLAGS.path.split(':')), 1, (log_ind + 1))
for index in FLAGS.index.split(','):
index = int(index)
for attr in ['pred_acc', 'p... |
def get_clib_test_routine(name, restype, *argtypes):
ptr = getattr(clib_test, name)
return ctypes.cast(ptr, ctypes.CFUNCTYPE(restype, *argtypes)) |
def test_weighted_resampling():
np.random.seed(1)
k = 1.0
scores = np.array([[0.0, 1.0], [1.0, 0.0], [2.0, 2.0], [3.0, 1.0]])
true_ranks = np.array([2, 2, 3, 4])
true_weights = softmax(((- np.log(true_ranks)) / k))
(ranks, weights, resampled_idxs) = weighted_resampling(scores, k=k)
assert np... |
.expansion
class ExpandPgemmReferenceMPICH(ExpandTransformation):
environments = [environments.ref_mpich.ScaLAPACKMPICH]
def expansion(node, parent_state, parent_sdfg, **kwargs):
(a, b, c, desca, descb, gdescc, ldesc) = node.validate(parent_sdfg, parent_state)
dtype = a.dtype.base_type
l... |
class MultipleInputsModelTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test):
super().__init__(unit_test, num_of_inputs=3, experimental_exporter=True)
def create_networks(self):
inputs_1 = layers.Input(shape=self.get_input_shapes()[0][1:])
inputs_2 = layers.Input(shape=self.... |
.parametrize('likelihood', LIKELIHOODS)
def test_likelihood_grad_EP_diagonal(likelihood):
assert (not likelihood.isotropic)
df = check_likelihood_grad_EP_diagonal(likelihood)
assert_allclose(df['rz'], df['grad_bz_A1'], rtol=0, atol=EPSILON)
assert_allclose(df['vz'], df['grad_bz_A2'], rtol=0, atol=EPSILO... |
class Statistic(EventWriter):
def __init__(self, max_iter, tau, num_gpus, num_classes, output_dir, prefix):
self.tau = tau
self.LOG_PERIOD = int((1280 / num_gpus))
self.max_iter = max_iter
self.cur_iter = 0
self.num_classes = num_classes
self.ori_label = [0.0 for c in... |
def test_trigger_tensorlib_changed_name(mocker):
numpy_64 = pyhf.tensor.numpy_backend(precision='64b')
jax_64 = pyhf.tensor.jax_backend(precision='64b')
pyhf.set_backend(numpy_64)
func = mocker.Mock()
pyhf.events.subscribe('tensorlib_changed')(func.__call__)
assert (func.call_count == 0)
pyh... |
class QuantizedAutoregressiveAudio(SequenceDataset):
_name_ = 'qautoaudio'
def d_input(self):
return 1
def d_output(self):
return (1 << self.bits)
def l_output(self):
return self.sample_len
def n_tokens(self):
return (1 << self.bits)
def init_defaults(self):
... |
()
('-p', '--config_path', default='Configs/speaker_domain_config.yml', type=str)
def main(config_path):
config = yaml.safe_load(open(config_path))
log_dir = config['log_dir']
if (not osp.exists(log_dir)):
os.makedirs(log_dir, exist_ok=True)
shutil.copy(config_path, osp.join(log_dir, osp.basenam... |
def conv3x3_down(in_planes, out_planes):
return nn.Sequential(conv3x3(in_planes, out_planes), nn.MaxPool2d(kernel_size=2, stride=2)) |
def test_stop_event_stream_after_second_event(event_stream, workers_num, stop_worker):
next(event_stream)
assert isinstance(next(event_stream), events.BeforeExecution)
event_stream.stop()
assert isinstance(next(event_stream), events.Finished)
assert (next(event_stream, None) is None)
if (workers... |
def run_cs(N, alpha, f, prior_rho):
model = glm_generative(N=N, alpha=alpha, ensemble_type='random_feature', prior_type='gauss_bernoulli', output_type='gaussian', ensemble_f=f, prior_rho=prior_rho, output_var=1e-11)
scenario = BayesOptimalScenario(model, x_ids=['x'])
early = EarlyStopping()
records = sc... |
def test_jsonschema_error(testdir, openapi_3_schema_with_invalid_security):
testdir.make_test('\nlazy_schema = schemathesis.from_pytest_fixture("simple_schema")\n\_schema.parametrize()\(max_examples=1)\ndef test_(case):\n pass\n ', schema=openapi_3_schema_with_invalid_security, validate_schema=False)
resu... |
class TestBenchmarkContinuousTimeSeries(unittest.TestCase):
def test_benchmark_graph_density(self):
np.random.seed(0)
b = BenchmarkContinuousTimeSeries(algo_dict=None, kargs_dict=None, num_exp=2, custom_metric_dict=None)
b.benchmark_graph_density(graph_density_list=[0.1, 0.5], num_vars=5, T=... |
class TwoLayerConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.conv2 = torch.nn.Conv2d(5, 5, 1, bias=False).to(dtype=torch.float)
def forward(self, x):
x = self.conv1(x)
x = sel... |
_require_initialized
def rpc_sync(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT):
fut = _invoke_rpc(to, func, RPCExecMode.SYNC, args, kwargs, timeout)
return fut.wait() |
('delete_file', 'Delete file', '"filename": "<filename>"')
def delete_file(filename: str, agent: Agent) -> str:
if is_duplicate_operation('delete', filename, agent.config):
return 'Error: File has already been deleted.'
try:
os.remove(filename)
log_operation('delete', filename, agent)
... |
def torch_op(*, output_shapes=[(1,)]):
def inner(f):
from taichi.lang.util import has_pytorch
if has_pytorch():
import torch
class CustomTaichiOp(torch.autograd.Function):
def forward(ctx, *inputs):
outputs = tuple([torch.zeros(shape, dtype=torch.doubl... |
def _build_prompt(text, suffix, show_default=False, default=None, show_choices=True, type=None):
prompt = text
if ((type is not None) and show_choices and isinstance(type, Choice)):
prompt += ' ({})'.format(', '.join(map(str, type.choices)))
if ((default is not None) and show_default):
promp... |
class ConstantSchedule(ScalarSchedule):
def __init__(self, value):
self._value = value
def get_value(self, t):
return self._value |
def require_sigopt(test_case):
return unittest.skipUnless(is_sigopt_available(), 'test requires SigOpt')(test_case) |
def add_ipv4_address_tp_hash(module):
module.body.writeln('\nlong\n_ns3_Ipv4Address_tp_hash (PyObject *obj)\n{\n PyNs3Ipv4Address *addr = reinterpret_cast<PyNs3Ipv4Address *> (obj);\n return static_cast<long> (ns3::Ipv4AddressHash () (*addr->obj));\n}\n')
module.header.writeln('long _ns3_Ipv4Address_tp_hash... |
_config
def task_mlm_itm():
exp_name = 'mlm_itm'
datasets = ['cc3m']
loss_names = _loss_names({'itm': 1, 'mlm': 1})
batch_size = 4096
max_epoch = 10
max_image_len = 200 |
def human_study_purpose(reference, nsample=1000):
data = []
nlp = stanza.Pipeline('en', processors='tokenize')
with open(src) as fsrc:
raw_data = fsrc.readlines()
sampled_id = random.sample(range(len(raw_data)), nsample)
for i in sampled_id:
text = raw_data[i].strip()
doc = n... |
_incremental_state
class MultiheadAttention(nn.Module):
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kd... |
def from_lehmer_cocode(lehmer, parent=Permutations()):
p = []
ell = len(lehmer)
i = (ell - 1)
open_spots = list(range(1, (ell + 1)))
for ivi in reversed(lehmer):
p.append(open_spots.pop((i - ivi)))
i -= 1
p.reverse()
return parent(p) |
class _NonLocalNd(nn.Module, metaclass=ABCMeta):
def __init__(self, in_channels: int, reduction: int=2, use_scale: bool=True, conv_cfg: Optional[Dict]=None, norm_cfg: Optional[Dict]=None, mode: str='embedded_gaussian', **kwargs):
super().__init__()
self.in_channels = in_channels
self.reducti... |
def _kmeans_single_lloyd(X, sample_weight, centers_init, max_iter=300, verbose=False, tol=0.0001, n_threads=1):
n_clusters = centers_init.shape[0]
centers = centers_init
centers_new = np.zeros_like(centers)
labels = np.full(X.shape[0], (- 1), dtype=np.int32)
labels_old = labels.copy()
weight_in_... |
def convert_cvt_checkpoint(cvt_model, image_size, cvt_file_name, pytorch_dump_folder):
img_labels_file = 'imagenet-1k-id2label.json'
num_labels = 1000
repo_id = 'huggingface/label-files'
num_labels = num_labels
id2label = json.load(open(cached_download(hf_hub_url(repo_id, img_labels_file, repo_type=... |
class XLMTokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = XLMTokenizer
def setUp(self):
super(XLMTokenizationTest, self).setUp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer<... |
def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None):
(_, n_features) = X.shape
fit_intercept = ((n_features + 2) == w.shape[0])
if fit_intercept:
intercept = w[(- 2)]
sigma = w[(- 1)]
w = w[:n_features]
n_samples = np.sum(sample_weight)
linear_loss = (y - safe_s... |
def _add_compositing(scene):
tree = scene.node_tree
alpha_node = tree.nodes.new('CompositorNodeAlphaOver')
composite_node = tree.nodes['Composite']
tree.links.new(tree.nodes['Render Layers'].outputs['Image'], alpha_node.inputs[1])
tree.links.new(tree.nodes['Background Render Layers'].outputs['Image'... |
def train_pvae(args):
torch.manual_seed(args.seed)
if (args.mask == 'indep'):
data = IndepMaskedCelebA(obs_prob=args.obs_prob)
mask_str = f'{args.mask}_{args.obs_prob}'
elif (args.mask == 'block'):
data = BlockMaskedCelebA(block_len=args.block_len)
mask_str = f'{args.mask}_{a... |
def _full_class_name(obj: Any) -> str:
return f'{obj.__class__.__module__}.{obj.__class__.__name__}' |
def test_2d_2d_different_stride_trick():
data = np.array([101], dtype=np.int32)
array = np.lib.stride_tricks.as_strided(data, (40, 3), strides=(0, 0))
container = {'node0-data': array}
form = '\n {\n "class": "NumpyArray",\n "primitive": "int32",\n "form_key": "no... |
def full_configs(training_sets):
configs = []
for (i, training_set) in enumerate(training_sets):
(_, _, t1, _) = training_set
config = {'t0': t0, 't1': t1, 'training_set': i}
configs.append(config)
return configs |
class TrainDataset(Dataset):
def __init__(self, triples, nentity, negative_sample_size):
self.len = len(triples)
self.triples = triples
self.nentity = nentity
self.negative_sample_size = negative_sample_size
self.hr2t = ddict(set)
for (h, r, t) in triples:
... |
class TestParameterSweep():
def initialize(self):
CPB1 = qubit.Transmon(EJ=40.0, EC=0.2, ng=0.3, ncut=40, truncated_dim=3)
CPB2 = qubit.Transmon(EJ=30.0, EC=0.15, ng=0.0, ncut=10, truncated_dim=4)
resonator = qubit.Oscillator(E_osc=6.0, truncated_dim=4)
hilbertspace = HilbertSpace([C... |
def plot_tracks(tree, treeId, obj_id, imagePath, targetPath='outs'):
paths = tree.paths_to_leaves()
for pathId in range(len(paths)):
bboxs = []
for node in paths[pathId]:
t = int(node.split('_')[1])
bbox = tree.nodes[node].data['bbox']
bboxs.append(bbox)
... |
def combine_channel(channel_bs_user_k, channel_irs_user_k, channel_bs_irs, phase_shifts):
channel_combine_irs = (channel_bs_irs np.diag(phase_shifts))
channel_combine = (channel_bs_user_k + (channel_combine_irs channel_irs_user_k))
return (channel_combine, channel_combine_irs) |
def _decode_cfg_value(v):
if isinstance(v, dict):
return AttrDict(v)
if (not isinstance(v, basestring)):
return v
try:
v = literal_eval(v)
except ValueError:
pass
except SyntaxError:
pass
return v |
def init_environment(scorep_config, keep_files=False, verbose=False):
if ('libscorep' in os.environ.get('LD_PRELOAD', '')):
raise RuntimeError('Score-P is already loaded. This should not happen at this point')
if ('--user' not in scorep_config):
scorep_config.append('--user')
if verbose:
... |
class PPCTask(BaseTask):
def __init__(self):
self._spec = TaskSpecification('PPC', 'classification', 3, 2)
self._spec.evaluation_metric = self._spec.accuracy
def read(self, data_path: str, split: str) -> Iterable[DataExample]:
if (split == 'dev'):
split = 'test'
split... |
def load_from_adjacency_lists(fname):
adjlists = [l.split('\t') for l in open(fname).read().splitlines()]
allgenes = list(set([x for lst in adjlists for x in lst]))
g = igraph.Graph(directed=True)
g.add_vertices(allgenes)
for l in adjlists:
g.add_edges([(l[0], v) for v in l[1:]])
return ... |
def yolov8_preprocess_chw_transpose(x: np.ndarray, img_mean: float=0.0, img_std: float=255.0, pad_values: int=114, size: Tuple[(int, int)]=(640, 640)) -> np.ndarray:
return yolov8_preprocess(x, img_mean, img_std, pad_values, size).transpose([2, 0, 1]) |
def get_bag_word_pairs(bag_word_size: tuple, scale_factor: int, scale_multipliers: list):
bag_sz = bag_word_size[0]
word_sz = bag_word_size[1]
assert ((bag_sz % word_sz) == 0), 'Bag size should be divisible by word size. Got B: {}, W: {}'.format(bag_sz, word_sz)
num_bags = (bag_sz // word_sz)
assert... |
def test_case130():
url = (brokerIp + '/ngsi-ld/v1/subscriptions/')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata129), headers=headers)
print(r.content)
pr... |
class Config():
device = torch.device('cuda')
MAX_SEQ = 100
EMBED_DIMS = 512
ENC_HEADS = DEC_HEADS = 8
NUM_ENCODER = NUM_DECODER = 4
BATCH_SIZE = 32
TRAIN_FILE = '../input/riiid-test-answer-prediction/train.csv'
TOTAL_EXE = 13523
TOTAL_CAT = 10000 |
def _get_llama_config(use_flash=False) -> LlamaConfig:
rope_scaling = {'type': 'linear', 'factor': 2.0}
return LlamaConfig(seq_len=128, hidden_dim=16, num_heads=4, rope_scaling=rope_scaling, gradient_checkpointing=False, use_flash_attention=use_flash) |
class FairseqLanguageModel(BaseFairseqModel):
def __init__(self, decoder):
super().__init__()
self.decoder = decoder
assert isinstance(self.decoder, FairseqDecoder)
def forward(self, src_tokens, src_lengths):
return self.decoder(src_tokens)
def max_positions(self):
re... |
class TrivialModel(model.Model):
def __init__(self):
super(TrivialModel, self).__init__('trivial', (224 + 3), 32, 0.005)
def add_inference(self, cnn):
cnn.reshape([(- 1), ((227 * 227) * 3)])
cnn.affine(1)
cnn.affine(4096) |
def simGetObjectInt32Parameter(objectHandle, parameter):
value = ffi.new('int *')
ret = lib.simGetObjectInt32Parameter(objectHandle, parameter, value)
_check_set_object_parameter(ret)
_check_return(ret)
return value[0] |
class UCSDped(AnomalibVideoDataModule):
def __init__(self, root: (Path | str), category: str, clip_length_in_frames: int=1, frames_between_clips: int=1, task: TaskType=TaskType.SEGMENTATION, image_size: ((int | tuple[(int, int)]) | None)=None, center_crop: ((int | tuple[(int, int)]) | None)=None, normalization: (st... |
class ConstantLengthDataset(IterableDataset):
def __init__(self, tokenizer, dataset, seq_length=1024, num_of_sequences=1024, chars_per_token=3.6):
self.tokenizer = tokenizer
self.concat_token_id = tokenizer.bos_token_id
self.dataset = dataset
self.seq_length = seq_length
self... |
class Semigroups(CategoryWithAxiom):
_base_category_class_and_axiom = (Magmas, 'Associative')
def example(self, choice='leftzero', **kwds):
import sage.categories.examples.semigroups as examples
if (choice == 'leftzero'):
return examples.LeftZeroSemigroup(**kwds)
else:
... |
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = ('tcp://%s:%s' % (os.environ['MASTER_ADDR'], os... |
def detect_nan(i, node, fn):
if (not isinstance(node.op, (T.AllocEmpty, T.IncSubtensor, G.GpuAllocEmpty, G.GpuIncSubtensor))):
for output in fn.outputs:
if ((not isinstance(output[0], np.random.RandomState)) and (not np.isfinite(output[0]).all())):
print('*** NaN detected ***')
... |
def _sys_git_create_repo_workdir(repo, version):
main_path = _main_repo_path(repo)
rev = _rev_from_version(version)
version_path = _repo_path(repo, version)
common.logger.info('Git add worktree %s: %s', repo, version)
check_call(['git', 'worktree', 'add', version_path, rev], cwd=main_path) |
def test_get_successors(x_and_y_arrays):
(x, y, weights) = x_and_y_arrays._get_successors('a')
assert_array_equal(x_and_y_arrays.X_[0:2], x)
assert_array_equal(['b', 'e'], y)
assert (weights is None)
(x, y, weights) = x_and_y_arrays._get_successors('d')
assert_array_equal([x_and_y_arrays.X_[(- 1... |
def set_seeds(seed: int) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) |
class ReplayPool(Serializable):
def __init__(self, observation_shape, action_dim, max_steps, observation_dtype=np.float32, action_dtype=np.float32, concat_observations=False, concat_length=1, rng=None):
self.observation_shape = observation_shape
self.action_dim = action_dim
self.max_steps = ... |
.parametrize('method', (filters.FilterSet.include, filters.FilterSet.exclude))
.parametrize('kwargs', ({'name': 'foo'}, {'func': matcher_func}, {'func': matcher_func, 'method': 'POST'}, {'func': (lambda o: True)}))
def test_repeating_filter(method, kwargs):
filter_set = filters.FilterSet()
filter_set.include(**... |
def test_ocsm_ncg_DY(fine_model, coarse_model, parameter_extraction):
space_mapping = ocsm.SpaceMappingProblem(fine_model, coarse_model, parameter_extraction, method='ncg', cg_type='DY', max_iter=8, tol=0.1, use_backtracking_line_search=False)
space_mapping.solve()
assert (np.abs((fine_model.cost_functional... |
class DummyScheduler(object):
def __init__(self):
pass
def update(self):
pass |
def extract_c_includes(fname):
result = {}
std_inc_pat = re.compile('[ \t]*#include[ \t]*"(.*)"[ \t]*')
system_inc_pat = re.compile('[ \t]*#include[ \t]*\\<.*\\>[ \t]*')
non_std_inc_pat = re.compile('.*#include.*')
f = io.open(fname, encoding='utf-8', mode='r')
linenum = 1
for line in f:
... |
class LongformerTokenizer(RobertaTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES |
def test_parse_arguments():
from speechbrain.core import parse_arguments
(filename, run_opts, overrides) = parse_arguments(['params.yaml', '--device=cpu', '--seed=3', '--data_folder', 'TIMIT'])
assert (filename == 'params.yaml')
assert (run_opts['device'] == 'cpu')
assert (overrides == 'seed: 3\ndat... |
class ConfigDict():
def __init__(self, name):
self.name = name
def __getitem__(self, item):
return getattr(self, item) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.