code stringlengths 101 5.91M |
|---|
class TrivializationFrame(LocalFrame):
_cobasis_class = TrivializationCoFrame
def __init__(self, trivialization):
from sage.misc.latex import latex
from .trivialization import Trivialization
if (not isinstance(trivialization, Trivialization)):
raise TypeError('the first argum... |
def wav2vec_large(refresh=False, legacy=False, **kwargs):
kwargs['ckpt'] = '
if (not legacy):
kwargs['ckpt'] = '
return wav2vec_custom(refresh=refresh, legacy=legacy, **kwargs) |
def mytest():
input_data = np.random.random((10, 142, 200))
Y = np.random.random((10, 142, 200))
alpha = np.random.random((10, 142))
print(input_data.shape)
sys.exit(0) |
def shuffle_for_conversion_1_or_2_utf8_bytes_aux():
for mask in range(256):
def getbit(k):
return ((mask & (1 << k)) != 0)
a = getbit(0)
b = getbit(2)
c = getbit(4)
d = getbit(6)
e = getbit(1)
f = getbit(3)
g = getbit(5)
h = getbit(... |
def r2(x, y, impute_nan=True):
if impute_nan:
x = torch.nan_to_num(x)
y = torch.nan_to_num(y)
return r2_score(x.cpu(), y.cpu(), multioutput='raw_values') |
def get_setup_result(setup: TrainingSetup, model, epochs, train_set, train_loader, test_set, criterion, optimizer, evaluation_metrics, result_dict) -> Dict:
def _log_accuracy(epoch: int):
logger.info(f"Epoch: {epoch} - Training Loss: {round(evaluation_metrics['train_loss_list'][(epoch - 1)], 6)} - Tes... |
class DistributedMultiSourceRandomSampler(Sampler):
def __init__(self, data_source, num_samples=None, num_replicas=None, rank=None):
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replica... |
def combine_coco_captions(annotation_path):
if (not os.path.exists(('%s/captions_%s2014.json' % (annotation_path, 'val')))):
raise Exception('Please download MSCOCO caption annotations for val set')
if (not os.path.exists(('%s/captions_%s2014.json' % (annotation_path, 'train')))):
raise Exceptio... |
def computeDST_EM(greedy, answer, tasks):
assert (len(tasks) == 1)
dataset_class = getattr(dialogues, tasks[0].dataset_name)
dataset = dataset_class()
answer = [dataset.span2state(a[0]) for a in answer]
greedy = [dataset.span2state(g) for g in greedy]
return dataset.compute_dst_em(greedy, answer... |
class CifarResNeXt(nn.Module):
def __init__(self, nlabels, cardinality=8, depth=29, base_width=64, widen_factor=4, in_channels=3):
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = ((self.depth - 2) // 9)
self.base_width... |
def test_state_after():
(sdfg, A, expected) = _configure()
old_states = list(sdfg.nodes())
for state in old_states:
sdfg.add_state_after(state)
assert (sdfg.number_of_nodes() == (2 * len(old_states)))
sdfg(A=A)
assert np.allclose(A, expected) |
def print_table(tab, name=''):
print('')
print(f' | {name} | counts |')
print(' | :--- | ---: |')
tab = sorted(tab.items(), key=(lambda x: x[1]), reverse=True)
for (a, n) in tab:
print(f' | {a} | {n} |')
return tab |
(scope='module')
def os_structured_full():
return TriFingerObservations(observation_mode='structured', observation_keys=['action_joint_positions', 'joint_velocities', 'joint_torques'], normalize_observations=False) |
def test_normalize_adj(example_graph):
node_list = list(example_graph.nodes())
Aadj = example_graph.to_adjacency_matrix()
csr = normalize_adj(Aadj, symmetric=True)
dense = csr.todense()
(eigen_vals, _) = np.linalg.eig(dense)
assert (eigen_vals.max() == pytest.approx(1, abs=1e-05))
assert (cs... |
def rebuild_cuda_tensor(tensor_cls, tensor_size, tensor_stride, tensor_offset, storage_cls, storage_device, storage_handle, storage_size_bytes, storage_offset_bytes, requires_grad, ref_counter_handle, ref_counter_offset, event_handle, event_sync_required):
if ((storage_handle is None) or (storage_size_bytes == 0)):... |
def translate_to_html(results_file, html_file):
html_file += '.html'
print(('Writing results to html file %s...' % html_file), end='')
f = open(html_file, 'w')
f.write('<html>\n')
f.write('<body>\n')
f.write('<center><h1>ns-3 Test Results</h1></center>\n')
import xml.etree.ElementTree as ET
... |
def test_train_database_train_objects_exist():
(train, test) = load_toy_cancer()
assert (train.pos is not None)
assert (train.neg is not None)
assert (train.facts is not None)
assert (test.pos is not None)
assert (test.neg is not None)
assert (test.facts is not None) |
class PairedImageTest(PairedImageBase):
def __init__(self, size, test_images_list_file=None, folder1=None, folder2=None):
super().__init__()
if (test_images_list_file is not None):
with open(test_images_list_file, 'r') as f:
paths = f.read().splitlines()
else:
... |
def test_prank(train_data):
(X, y) = train_data
est = PRank(n_iter=10, shuffle=False, random_state=0)
est.fit(X, y)
np.testing.assert_almost_equal(est.score(X, y), 41.86, 2)
est = PRank(n_iter=10, shuffle=True, random_state=0)
est.fit(X, y)
np.testing.assert_almost_equal(est.score(X, y), 71.... |
class LemmaProcessor(UDProcessor):
PROVIDES_DEFAULT = set([LEMMA])
REQUIRES_DEFAULT = set([TOKENIZE])
DEFAULT_BATCH_SIZE = 5000
def __init__(self, config, pipeline, use_gpu):
self._use_identity = None
super().__init__(config, pipeline, use_gpu)
def use_identity(self):
return ... |
def test__any_overlap_true(expected, observed):
part = expected['true'][0]
interval = observed['true']
expected_return = 1
returned = _any_overlap(part, interval)
assert (returned == expected_return) |
class TrainContext():
def __init__(self, modules):
self.modules = modules
def __enter__(self):
for m in self.modules:
m.train()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for m in self.modules:
m.eval() |
def _onenormest_matrix_power(A, p, t=2, itmax=5, compute_v=False, compute_w=False):
from scipy.sparse.linalg._onenormest import onenormest
return onenormest((aslinearoperator(A) ** p)) |
def is_tensorflow_text_available():
return (is_tf_available() and (importlib.util.find_spec('tensorflow_text') is not None)) |
def estimate_advantages(rewards, masks, values, gamma, tau):
rewards = rewards.to(device_cpu)
masks = masks.to(device_cpu)
values = values.to(device_cpu)
tensor_type = type(rewards)
deltas = tensor_type(rewards.size(0), 1)
advantages = tensor_type(rewards.size(0), 1)
mc_returns = tensor_type... |
class LayoutLMModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class SEResNeXtBottleneck(Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4):
super(SEResNeXtBottleneck, self).__init__()
width = int((math.floor((planes * (base_width / 64.0))) * groups))
self.conv1 = nn.Conv2d(inp... |
def fast_directional_self_attention(rep_tensor, rep_mask, hn, head_num=2, is_train=None, attn_keep_prob=1.0, dense_keep_prob=1.0, wd=0.0, use_direction=True, attn_self=False, use_fusion_gate=True, final_mask_ft=None, dot_activation_name='exp', use_input_for_attn=False, add_layer_for_multi=True, activation_func_name='re... |
class OpenAIGpt():
def __init__(self, split, prompt, dataset_type, n_iter=True):
load_dotenv()
self.split = split
self.prompt = prompt
self.dataset_type = dataset_type
if (self.dataset_type == 'msd'):
self.annotation = json.load(open('./dataset/ecals_annotation/an... |
def log_density_gaussian(x, mu, logvar):
normalization = ((- 0.5) * (math.log((2 * math.pi)) + logvar))
inv_var = torch.exp((- logvar))
log_density = (normalization - (0.5 * (((x - mu) ** 2) * inv_var)))
return log_density |
def _flatten_config(cfg: DictConfig):
config = OmegaConf.to_container(cfg)
namespace = None
for (k, v) in list(config.items()):
if isinstance(v, argparse.Namespace):
namespace = v
del config[k]
if (namespace is not None):
config['args'] = vars(namespace)
retur... |
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, (4 * growth_rate), kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d((4 * growth_rate))
self.c... |
def test_retina_head_forward_single():
retina_model = retinanet_config()
feat = torch.rand(1, retina_model.in_channels, 32, 32)
ort_validate(retina_model.forward_single, feat) |
class TFT5EncoderModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def test_quad_vec_pool():
f = _lorenzian
(res, err) = quad_vec(f, (- np.inf), np.inf, norm='max', epsabs=0.0001, workers=4)
assert_allclose(res, np.pi, rtol=0, atol=0.0001)
with Pool(10) as pool:
def f(x):
return (1 / (1 + (x ** 2)))
(res, _) = quad_vec(f, (- np.inf), np.inf,... |
class GradScaler(object):
_scale: Optional[torch.Tensor]
_grows_tracker: Optional[torch.Tensor]
_per_optimizer_states: Dict[(int, Dict[(str, Any)])]
def __init__(self, init_scale=(2.0 ** 16), growth_factor=2.0, backoff_factor=0.5, growth_interval=2000, enabled=True):
if (enabled and (not torch.c... |
def train_model(one_model):
optimizer = torch.optim.Adam(one_model.parameters(), lr=lr)
iterations = 150
epoch_iter = tqdm(range(iterations))
for epoch in epoch_iter:
optimizer.zero_grad()
(train_loss, test_loss, test_y) = one_model.one_d_regress(x_train, x_test, y_train, y_test_gt)
... |
def random_pairs_of_minibatches(minibatches):
perm = torch.randperm(len(minibatches)).tolist()
pairs = []
for i in range(len(minibatches)):
j = ((i + 1) if (i < (len(minibatches) - 1)) else 0)
(xi, yi) = (minibatches[perm[i]][0], minibatches[perm[i]][1])
(xj, yj) = (minibatches[perm[... |
def __getattr__(name):
if (name not in __all__):
raise AttributeError(f'`scipy.misc.doccer` has no attribute `{name}`; furthermore, `scipy.misc.doccer` is deprecated and will be removed in SciPy 2.0.0.')
attr = getattr(import_module('scipy._lib.doccer'), name, None)
if (attr is not None):
me... |
def plot_roc_curve_image(y_true, y_pred, path):
sns.set(style='whitegrid', font_scale=1.5)
plt.figure(figsize=(10, 10))
(fpr_reg, tpr_reg, _) = roc_curve(y_true, y_pred)
auc_score_reg = roc_auc_score(y_true, y_score=y_pred)
lw = 2
plt.plot(fpr_reg, tpr_reg, color='darkorange', lw=lw, label='Whit... |
def add_args(parser):
parser.add_argument('--task', type=str, required=True, choices=['summarize', 'concode', 'translate', 'refine', 'defect', 'clone', 'multi_task'])
parser.add_argument('--sub_task', type=str, default='')
parser.add_argument('--lang', type=str, default='')
parser.add_argument('--eval_t... |
def parse(exit_code, log, output):
(findings, infos) = ([], set())
(errors, fails) = sb.parse_utils.errors_fails(exit_code, log)
errors.discard('EXIT_CODE_1')
if (('DOCKER_TIMEOUT' in fails) or ('DOCKER_KILL_OOM' in fails)):
fails.discard('exception (Killed)')
for e in list(fails):
m... |
def load_image_from_url(url):
response = requests.get(url)
image = Image.open(BytesIO(response.content))
image = preprocess_image(image)
return image |
def change_color(id_robot, color):
if (len(color) == 3):
color = (color[0], color[1], color[2], 1)
for j in range(p.getNumJoints(id_robot)):
p.changeVisualShape(id_robot, j, rgbaColor=color) |
def generate_substitute_method_trait(byte_array, name, template):
s = StringIO()
fields = template.fields()
field_types = [f.c_type() for f in fields]
field_names = [f.name for f in fields]
if ((len(fields) == 1) and (field_types[0] == 'u32')):
s.write(('impl AssemblyTemplateSubstitute for %... |
def test_graph_schema_sampling_tree(example_graph_schema):
schema = example_graph_schema(bb=0)
type_list = schema.type_adjacency_list(['A', 'B'], 3)
(_, type_tree) = schema.sampling_tree(['A', 'B'], 3)
def check_tree(tree):
items = []
for x in tree:
chd = check_tree(x[2])
... |
def get_RelationAndNucleus(label_index):
RelationTable = ['Attribution_SN', 'Enablement_NS', 'Cause_SN', 'Cause_NN', 'Temporal_SN', 'Condition_NN', 'Cause_NS', 'Elaboration_NS', 'Background_NS', 'Topic-Comment_SN', 'Elaboration_SN', 'Evaluation_SN', 'Explanation_NN', 'TextualOrganization_NN', 'Background_SN', 'Cont... |
def fixed_poi_fit(poi_val, data, pdf, init_pars=None, par_bounds=None, fixed_params=None, **kwargs):
if (pdf.config.poi_index is None):
raise UnspecifiedPOI('No POI is defined. A POI is required to fit with a fixed POI.')
init_pars = [*(init_pars or pdf.config.suggested_init())]
fixed_params = [*(fi... |
class MarkupLMProcessor(ProcessorMixin):
feature_extractor_class = 'MarkupLMFeatureExtractor'
tokenizer_class = ('MarkupLMTokenizer', 'MarkupLMTokenizerFast')
parse_html = True
def __call__(self, html_strings=None, nodes=None, xpaths=None, node_labels=None, questions=None, add_special_tokens: bool=True,... |
def register_Ns3UdpL4Protocol_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_static_attribute('PROT_NUMBER', 'uint8_t const', is_const=True)
cls.add_constructor([])
cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')])
cls.add_... |
.parametrize('GradientBoosting, X, y', [(HistGradientBoostingClassifier, X_classification, y_classification), (HistGradientBoostingRegressor, X_regression, y_regression)])
def test_max_iter_with_warm_start_validation(GradientBoosting, X, y):
estimator = GradientBoosting(max_iter=10, early_stopping=False, warm_start... |
class DiscriminatorP(torch.nn.Module):
_init
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False, hidden=32):
super(DiscriminatorP, self).__init__()
self.period = period
norm_f = (weight_norm if (use_spectral_norm == False) else spectral_norm)
self.convs =... |
def number_of_visits(traj, show_progress=True):
if (constants.UID not in traj.columns):
return len(traj)
if show_progress:
df = traj.groupby(constants.UID).progress_apply((lambda x: len(x)))
else:
df = traj.groupby(constants.UID).apply((lambda x: len(x)))
return pd.DataFrame(df).... |
_sentencepiece
_tokenizers
class MBart50TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = MBart50Tokenizer
rust_tokenizer_class = MBart50TokenizerFast
test_rust_tokenizer = True
test_sentencepiece = True
def setUp(self):
super().setUp()
tokenizer = MBart50T... |
def GetWordIds(text, vocab, pad_len=None, pad_id=None):
ids = []
for w in text.split():
i = vocab.WordToId(w)
if (i >= 0):
ids.append(i)
else:
ids.append(vocab.WordToId(UNKNOWN_TOKEN))
if (pad_len is not None):
return Pad(ids, pad_id, pad_len)
retu... |
def filter_by_node(node_dict, edgefile, outname):
with open(edgefile, 'r') as in_file:
with open(outname, 'w') as out_file:
csv_reader = csv.reader(in_file, delimiter=',')
csv_writer = csv.writer(out_file, delimiter=',')
csv_writer.writerow(['token_address', 'from_address... |
def postprocess_results(dataset: TextToSpeechDataset, sample, hypos, resample_fn, dump_target):
def to_np(x):
return (None if (x is None) else x.detach().cpu().numpy())
sample_ids = [dataset.ids[i] for i in sample['id'].tolist()]
texts = sample['src_texts']
attns = [to_np(hypo['attn']) for hypo ... |
def VDCNN17MaxPool(num_classes=5, shortcut=False, bias=False):
return VDCNN(MaxPoolBlock, blocks=[2, 2, 2, 2], filters=[64, 128, 256, 512], num_classes=num_classes, shortcut=shortcut, bias=bias) |
def check_hole_scope(hole_pos, class_spans):
for class_span in class_spans:
cs = int(class_span.split('_')[0])
ce = int(class_span.split('_')[1])
(l, c) = hole_pos
if ((l == cs) or (cs == (- 1))):
return None
if (cs < l <= ce):
return class_span |
def prune_graph_backward(opG):
opG.remove_edge('combined_dX1gamma_dX2gamma', 'combined_QKV-merge_baib') |
def scaled_exp(field, scale_constant):
def func(edges):
return {field: torch.exp((edges.data[field] / scale_constant).clamp((- 5), 5))}
return func |
def get_results(result_map, r_name, relative=None, parent_folder=None):
rs = result_map[r_name]
if (relative is None):
return rs.get_results(parent_folder)
base_rs = result_map[relative]
base_seen = base_rs.get_per_run_results(parentdir=parent_folder)
r_seen = rs.get_per_run_results(parentdi... |
def train_mlpinit():
model_mlpinit.train()
total_loss = total_correct = 0
for (x, y) in tqdm(train_mlpinit_loader):
x = x.to(device)
y = y.to(device)
optimizer_model_mlpinit.zero_grad()
out = model_mlpinit(x)
loss = F.nll_loss(out, y)
loss.backward()
o... |
def pair_bb84_protocols(sender: 'BB84', receiver: 'BB84') -> None:
sender.another = receiver
receiver.another = sender
sender.role = 0
receiver.role = 1 |
class HBFile(object):
def __init__(self, file, hb_info=None):
self._fid = file
if (hb_info is None):
self._hb_info = HBInfo.from_file(file)
else:
self._hb_info = hb_info
def title(self):
return self._hb_info.title
def key(self):
return self._hb... |
def _get_locals_and_globals(f):
result = {'__dace__': True}
result.update(f.__globals__)
if (f.__closure__ is not None):
result.update({k: v for (k, v) in zip(f.__code__.co_freevars, [_get_cell_contents_or_none(x) for x in f.__closure__])})
return result |
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
label_map = {label: i for (i, label) in enumerate(label_list)}
features = []
max_len = 0
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
... |
def build_detect(cfg):
detect_cfg = deepcopy(cfg)
name = detect_cfg.pop('name')
if (name == 'FCOSDetect'):
return FCOSDetect(**detect_cfg)
elif (name == 'YOLOv5Detect'):
return YOLOv5Detect(**detect_cfg)
elif (name == 'YOLOv6Detect'):
return YOLOv6Detect(**detect_cfg)
eli... |
def main():
args = parse_args()
spark = SparkSession.builder.appName('forecast').getOrCreate()
df = read_dataset(spark=spark, file_format=args.file_format, path=args.train_data, time_col=args.time_col, index_cols=args.index_cols, data_cols=args.data_cols)
if (args.time_col is None):
args.time_co... |
def test_read_structure():
(M, N, nnz) = (dace.symbol(s) for s in ('M', 'N', 'nnz'))
csr_obj = dace.data.Structure(dict(indptr=dace.int32[(M + 1)], indices=dace.int32[nnz], data=dace.float32[nnz]), name='CSRMatrix')
sdfg = dace.SDFG('csr_to_dense')
sdfg.add_datadesc('A', csr_obj)
sdfg.add_array('B',... |
('revnet-btl-test')
class RevNetBottleneckTestConfig(RevNet38Config):
def __init__(self):
super(RevNetBottleneckTestConfig, self).__init__()
self.batch_size = 10
self.num_residual_units = [2, 2]
self.filters = [16, 16, 32]
self.height = 8
self.width = 8
self.m... |
()
('--data_path')
('--out_path')
def main(data_path, out_path):
DEFENDED = out_path
dataset = data_path
outdirectory = DEFENDED
if (not os.path.exists(outdirectory)):
os.makedirs(outdirectory)
unmod = []
mod = []
added = []
count = 0
for fname in tqdm(os.listdir(dataset)):
... |
class BaseModelTest(BasePytorchTest):
def __init__(self, unit_test, model, float_reconstruction_error=1e-05, convert_to_fx=True):
super().__init__(unit_test, float_reconstruction_error, convert_to_fx)
self.model = model
def create_inputs_shape(self):
return [[self.val_batch_size, 3, 224,... |
_torch
class OpenAIGPTModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = ((OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else ())
all_generative_model_classes = ((OpenAIGPTLMHeadModel,) if is_t... |
def parameters_string(module):
lines = ['', 'List of model parameters:', '']
row_format = '{name:<40} {shape:>20} ={total_size:>12,d}'
params = list(module.named_parameters())
for (name, param) in params:
lines.append(row_format.format(name=name, shape=' * '.join((str(p) for p in param.size())),... |
class TestEnsureClipped(hu.HypothesisTestCase):
(X=hu.arrays(dims=[5, 10], elements=hu.floats(min_value=(- 1.0), max_value=1.0)), in_place=st.booleans(), sparse=st.booleans(), indices=hu.arrays(dims=[5], elements=st.booleans()), **hu.gcs_cpu_only)
def test_ensure_clipped(self, X, in_place, sparse, indices, gc, ... |
class CmpNode(object):
special_bool_cmp_function = None
special_bool_cmp_utility_code = None
def infer_type(self, env):
return py_object_type
def calculate_cascaded_constant_result(self, operand1_result):
func = compile_time_binary_operators[self.operator]
operand2_result = self.... |
def get_neighbor(publish_time):
sort_pt = np.argsort(np.array(publish_time))
window = 100
neighbor_dict = {}
cnt = 0
item_num = len(publish_time)
for i in sort_pt:
left = max(0, (cnt - window))
right = min((cnt + window), item_num)
neighbor_dict[i] = sort_pt[left:right]
... |
def module_cppgen_impl(a):
module_path = a.MODOLE
print(f'Generating C++ header for Taichi module: {Path(module_path).absolute()}')
tcm = None
if (a.bin2c and module_path.endswith('.tcm')):
with open(module_path, 'rb') as f:
tcm = f.read()
if a.module_name:
module_name = ... |
class Hrep2Vrep(PivotedInequalities):
def __init__(self, base_ring, dim, inequalities, equations):
super().__init__(base_ring, dim)
inequalities = [list(x) for x in inequalities]
equations = [list(x) for x in equations]
if ((not inequalities) and (not equations)):
inequal... |
class TrainDataset(Dataset):
def __init__(self, data, tokenizer, max_len=256):
self.data = data
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.data)
def __getitem__(self, index):
text = self.data[index]
tokens = self.token... |
def random_expr_helper(n_nodes, internal, leaves, verbose):
if (n_nodes == 1):
return choose_from_prob_list(leaves)[1]
else:
r = choose_from_prob_list(internal)
n_nodes -= 1
n_children = r[2]
n_spare_nodes = (n_nodes - n_children)
if (n_spare_nodes <= 0):
... |
def create_initializer(initializer_range=0.02):
return tf.truncated_normal_initializer(stddev=initializer_range) |
class DQN(OffPolicyRLAlgorithm):
def __init__(self, env_spec, policy, qf, replay_buffer, exploration_strategy=None, steps_per_epoch=20, min_buffer_size=int(10000.0), buffer_batch_size=64, rollout_batch_size=1, n_train_steps=50, max_path_length=None, qf_lr=0.001, qf_optimizer=tf.compat.v1.train.AdamOptimizer, discou... |
def cohen_kappa(output, target, topk=(1,)):
maxk = min(max(topk), output.size()[1])
(_, pred) = output.topk(maxk, 1, True, True)
kappa = cohen_kappa_score(pred, target, weights='quadratic')
return kappa |
class UnionArray(UnionMeta[Content], Content):
def __init__(self, tags, index, contents, *, parameters=None):
if (not (isinstance(tags, Index) and (tags.dtype == np.dtype(np.int8)))):
raise TypeError("{} 'tags' must be an Index with dtype=int8, not {}".format(type(self).__name__, repr(tags)))
... |
def GenerateSM80_TensorOp_884_complex_gaussian(manifest, args):
if (not CudaToolkitVersionSatisfies(args.cuda_version, 11, 0)):
return
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.R... |
def test_linear_direct():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
(in_dim, out_dim) = (Dim(7, name='in'), Dim(13, name='out'))
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32'), 'classes': Tensor('classes', [batch_dim, time_dim], dtype='int32... |
def DegreeSequenceTree(deg_sequence):
import networkx
return Graph(networkx.degree_sequence_tree([int(i) for i in deg_sequence])) |
_utils.test(arch=[ti.cpu])
def test_arch_list_cpu():
assert (ti.lang.impl.current_cfg().arch in [ti.cpu]) |
def register_Ns3TracedValue__Ns3SequenceNumber__lt__unsigned_int__int__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::TracedValue< ns3::SequenceNumber< unsigned int, int > > const &', 'o')])
cls.add_constructor([param('ns3::SequenceNumber< unsigned int, int > const ... |
class SqueezeEmbedding(nn.Module):
def __init__(self, batch_first=True):
super(SqueezeEmbedding, self).__init__()
self.batch_first = batch_first
def forward(self, x, x_len):
x_sort_idx = torch.sort((- x_len))[1].long()
x_unsort_idx = torch.sort(x_sort_idx)[1].long()
x_len... |
def read_simple_element(f, type_, size):
date = None
if (size == 0):
return ''
if (type_ == EET.UNSIGNED):
data = read_fixedlength_number(f, size, False)
elif (type_ == EET.SIGNED):
data = read_fixedlength_number(f, size, True)
elif (type_ == EET.TEXTA):
data = f.read... |
class GP_diag(GPbase):
name = 'GP_diag'
def __init__(self, manif: Manifold, m: int, n_samples: int, ts: torch.Tensor, _scale=0.9, ell=None):
super(GP_diag, self).__init__(manif, m, n_samples, ts, _scale=_scale, ell=ell)
def I_v(self, v, sample_idxs=None):
scale = self.scale
if (sampl... |
class MPolynomialMult2(Benchmark):
def __init__(self, nvars=2, base=QQ, allow_singular=True):
if (nvars % 2):
nvars += 1
self.nvars = nvars
self.base = base
self.allow_singular = allow_singular
s = ('Compute (x_1 + 2*x_2 + 3*x_3 + ... + %s*x_%s) * (%s * x_%s + ...... |
def fit_predict_add_res(name: str, model: Recommender, experiment: Experiment, train: PandasDataFrame, top_k: int, test_users: PandasDataFrame, predict_only: bool=False):
start_time = time.time()
if (not predict_only):
if (isinstance(model, CQL) or isinstance(model, LightFMWrap)):
model.fit(... |
def _chisquare(f_obs, f_exp):
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
chisq = f_obs
chisq -= f_exp
chisq **= 2
with np.errstate(invalid='ignore'):
chisq /= f_exp
chisq = chisq.sum(axis=0)
return (chisq, special.chdtrc((k - 1), chisq)) |
class SegformerConfig(PretrainedConfig):
model_type = 'segformer'
def __init__(self, image_size=224, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[32, 64, 160, 256], downsampling_rates=[1, 4, 8, 16], patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], num_attention... |
class PolynomialQuotientRingElement(polynomial_singular_interface.Polynomial_singular_repr, CommutativeRingElement):
def __init__(self, parent, polynomial, check=True):
from sage.rings.polynomial.polynomial_quotient_ring import PolynomialQuotientRing_generic
from sage.rings.polynomial.polynomial_ele... |
class FakeBSMNode(Node):
def __init__(self, name, tl, **kwargs):
super().__init__(name, tl)
self.msg_log = []
def receive_message(self, src: str, msg: 'Message'):
self.msg_log.append((self.timeline.now(), src, msg))
super().receive_message(src, msg)
def receive_qubit(self, sr... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.