code stringlengths 101 5.91M |
|---|
.parametrize('split,num_sample', [('train', 22662), ('test', 5666)])
def test_ae_price_prediction(split, num_sample):
df = AEPricePrediction(split).data
assert (len(df) == num_sample) |
def standardize_generator(g, convert_dict=None, as_cycles=False):
if isinstance(g, pari_gen):
g = list(g)
needs_conversion = True
if isinstance(g, GapElement_Permutation):
g = g.sage()
needs_conversion = False
if isinstance(g, GapElement):
g = str(g)
needs_convers... |
def meta_training(train_dataset, valid_dataset, embedding_model, relation_model, lr=None, optimizer=None, epochs=100, episodes=1000, ways=5, shots=5, query_num=15, report_epoch=1, lr_step_epoch=10, save_model_epoch=20, save_model_root='~/trained_models'):
lr = (0.001 if (lr is None) else lr)
if (optimizer is No... |
('resnet-test')
class ResNetTestConfig(ResNet32Config):
def __init__(self):
super(ResNetTestConfig, self).__init__()
self.batch_size = 10
self.num_residual_units = [2, 2, 2]
self.filters = [2, 2, 4, 8]
self.num_classes = 10 |
def inception_resnet_v2_arg_scope(weight_decay=4e-05, batch_norm_decay=0.9997, batch_norm_epsilon=0.001, activation_fn=tf.nn.relu):
with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay), biases_regularizer=slim.l2_regularizer(weight_decay)):
batch_nor... |
def cases_test_moments():
fail_normalization = set(['vonmises'])
fail_higher = set(['vonmises', 'ncf'])
for (distname, arg) in (distcont[:] + [(histogram_test_instance, tuple())]):
if (distname == 'levy_stable'):
continue
cond1 = (distname not in fail_normalization)
cond2... |
def test():
W.set(3)
A = dace.ndarray([W])
B = dace.ndarray([W])
C = dace.ndarray([W])
A[:] = np.mgrid[0:W.get()]
B[:] = 0.0
C[:] = 0.0
local_inline(A, B, C)
diff = (np.linalg.norm((((- ((- A) + 1)) + 1) - C)) / W.get())
print('Difference:', diff)
print('==== Program end ====... |
_subclass('mpt')
class MptConfig(HFCompatConfig):
d_model: int = 768
n_heads: int = 12
n_layers: int = 12
expansion_ratio: int = 4
max_seq_len: int = 2048
learned_pos_emb: bool = False
attn_config: MptAttentionConfig = field(default_factory=MptAttentionConfig)
logit_scale: Optional[Union... |
def services_execution_time(num_of_users_yolo, num_of_users_mobilenet, num_of_cores, ram, workload_cpu, workload_gpu):
bash_pid = limit_cpu_core(num_of_cores)
workload_cpu_pid = generate_cpu_workload(workload_cpu, num_of_cores)
workload_gpu_pid = generate_gpu_workload(workload_gpu)
time.sleep(1)
wl_... |
(scope='module')
def schema_target_list_len():
return StructType([StructField('user_id', LongType(), True), StructField('item_id', LongType(), True), StructField('timestamp', LongType(), True), StructField('item_id_list', ArrayType(LongType(), False), False), StructField('timestamp_list', ArrayType(LongType(), Fals... |
def exists_lora_config_file(dir_path: Optional[Union[(Path, str)]]=None):
if (dir_path is None):
return False
dir_path = Path(dir_path)
assert dir_path.is_dir(), 'The following path {} should be a directory'.format(str(dir_path))
lora_config_file_path = (dir_path / 'adapter_config.json')
ret... |
def _hash_dict(d):
s = json.dumps(d, sort_keys=True, separators=(',', ':'), ensure_ascii=True)
return hashlib.sha224(s.encode('ascii')).hexdigest() |
def generate_generator_multiple(generator, dir1, dir2, batch_size, img_height, img_width):
genX1 = generator.flow_from_directory(dir1, target_size=(img_height, img_width), class_mode='categorical', batch_size=batch_size, shuffle=True, seed=32)
genX2 = generator.flow_from_directory(dir2, target_size=(img_height,... |
def _merge_a_into_b(a, b):
if (type(a) is not edict):
return
for (k, v) in a.iteritems():
if (not b.has_key(k)):
raise KeyError('{} is not a valid config key'.format(k))
old_type = type(b[k])
if (old_type is not type(v)):
if isinstance(b[k], np.ndarray):
... |
def changeAltKTrianglesC(G, i, j):
assert (lambda0 > 1)
delta = 0
for v in G.inIterator(i):
assert G.isArc(v, i)
if ((v == i) or (v == j)):
continue
if G.isArc(j, v):
delta += (((1 - (1 / lambda0)) ** G.MixTwoPathMatrix[(i, v)]) + ((1 - (1 / lambda0)) ** G.Mix... |
def _dropout_add_layer_norm_forward(x0, residual, gamma, beta, rowscale, colscale, dropout_p, epsilon, residual_in_fp32=False, is_rms_norm=False):
hidden_size = gamma.numel()
x0mat = x0.view(((- 1), hidden_size))
residualmat = (residual.view(((- 1), hidden_size)) if (residual is not None) else None)
row... |
def test_backwards_flow():
graph = csr_matrix([[0, 10, 0, 0, 10, 0, 0, 0], [0, 0, 10, 0, 0, 0, 0, 0], [0, 0, 0, 10, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 10], [0, 0, 0, 10, 0, 10, 0, 0], [0, 0, 0, 0, 0, 0, 10, 0], [0, 0, 0, 0, 0, 0, 0, 10], [0, 0, 0, 0, 0, 0, 0, 0]])
res = maximum_flow(graph, 0, 7)
assert (res... |
def register_functions(root_module):
module = root_module
module.add_function('MakeBoxChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
module.add_function('MakeRectangleChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
module.add_function('MakeWaypointChecker', 'ns3::Ptr< ns3::AttributeCh... |
.parametrize('start_date', [pd.to_datetime('2020/01/01 08:00:00')])
.parametrize('end_date', [pd.to_datetime('2020/01/10 08:00:00')])
.parametrize('spatial_tessellation', [tessellation])
.parametrize('social_graph', ['random'])
.parametrize('n_agents', [(- 2), (- 1), 0])
.parametrize('random_state', [2])
.parametrize('... |
class AzureCLI(LoggingBase):
def __init__(self, system_config: SeBSConfig, docker_client: docker.client):
super().__init__()
repo_name = system_config.docker_repository()
image_name = 'manage.azure'
try:
docker_client.images.get(((repo_name + ':') + image_name))
e... |
class DataArguments():
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
tr... |
class _Logger(object):
def __init__(self):
self.n_chars = 0
self.lines = []
def print_over(self, *strings):
string = string_tuple_to_string(strings)
sys.stdout.write(('\r' * self.n_chars))
sys.stdout.write(string)
sys.stdout.flush()
self.n_chars = len(stri... |
def pool_flops_counter_hook(pool_module, inputs, output):
batch_size = inputs[0].size(0)
kernel_size = pool_module.kernel_size
(out_C, output_height, output_width) = output.shape[1:]
assert (out_C == inputs[0].size(1)), '{:} vs. {:}'.format(out_C, inputs[0].size())
overall_flops = (((((batch_size * ... |
class IEMOCAPDataset(Dataset):
def __init__(self, data_dir, meta_path, pre_load=True):
self.data_dir = data_dir
self.pre_load = pre_load
with open(meta_path, 'r') as f:
self.data = json.load(f)
self.class_dict = self.data['labels']
self.idx2emotion = {value: key f... |
class TestScatterOps(serial.SerializedTestCase):
(num_args=st.integers(1, 5), first_dim=st.integers(1, 20), index_dim=st.integers(1, 10), extra_dims=st.lists(st.integers(1, 4), min_size=0, max_size=3), ind_type=st.sampled_from([np.int32, np.int64]), **hu.gcs)
(deadline=10000)
def testScatterWeightedSum(self... |
.ignore_generate
def test_cone_profile(get_cone_csvy_model, get_cone_reference_data):
assert (get_cone_csvy_model == get_cone_reference_data) |
class ToTorchTensor(_CVTransformBase):
def __init__(self, format='HWC', dtype=None):
super(self.__class__, self).__init__('to torch tensor')
assert (format in ['HWC', 'CHW'])
self.format = format
self.dtype = dtype
def __call__(self, image):
tensor = torch.from_numpy(np.a... |
class wrapper1D(torch.nn.Module):
def __init__(self, input_shape, output_shape, use_embedder=True, weight='roberta', train_epoch=0, activation=None, target_seq_len=512, drop_out=None, from_scratch=False):
super().__init__()
self.dense = False
self.output_raw = True
self.weight = weig... |
class Subtract(_Merge):
def build(self, input_shape):
super().build(input_shape)
def _merge_function(self, inputs):
if (len(inputs) != 2):
raise ValueError('`Subtract` layer should be called on exactly 2 inputs')
if (inputs[0]._keras_shape != inputs[1]._keras_shape):
... |
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop_rate=0.0):
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.fc1 = nn.Conv2d(in_features, hidde... |
def test_tsallis_shift_plus_2():
shift = EntropyShift(system_1_b, system_2_b, alpha=2)
shift.get_shift_graph(system_names=['1B', '2B']) |
def _get_dense_mask(X, value_to_mask):
with suppress(ImportError, AttributeError):
import pandas
if (value_to_mask is pandas.NA):
return pandas.isna(X)
if is_scalar_nan(value_to_mask):
if (X.dtype.kind == 'f'):
Xt = np.isnan(X)
elif (X.dtype.kind in ('i', ... |
class PipelineParams(ParamGroup):
def __init__(self, parser):
self.convert_SHs_python = False
self.compute_cov3D_python = True
self.debug = False
super().__init__(parser, 'Pipeline Parameters') |
def test_classes_file_path():
tmp_file = tempfile.NamedTemporaryFile()
classes_path = f'{tmp_file.name}.txt'
train_pipeline = [dict(type='LoadImageFromFile')]
kwargs = dict(pipeline=train_pipeline, img_dir='./', classes=classes_path)
categories = get_classes('cityscapes')
with open(classes_path,... |
def test_inductor_error_massages():
error = 'The input unit for the inductor is not correct. Look at the documentation for the correct input format.'
with pytest.raises(ValueError, match=error):
Inductor(10, 'F') |
def clean_sentence(line):
line = line.replace('\n', '')
line = line.replace('- ', '')
line = line.replace('_', '')
line = line.replace('\\', '')
line = line.replace('"', '')
line = line.replace(' ', ' ')
remove_digits = str.maketrans('', '', digits)
line = line.translate(remove_digits)
... |
class TestIntegerDirectories(object):
def int_dirs(self, tmpdir):
tmpdir.mkdir('152_blah')
tmpdir.mkdir('153_woo')
tmpdir.mkdir('1_')
tmpdir.mkdir('-1')
tmpdir.mkdir('_10')
tmpdir.mkdir('.DS_Store')
tmpdir.mkdir('other')
return IntegerDirectories(str(t... |
def process_single_file(vertices, vertices_a_pose, Jtr, root_orient, pose_body, pose_hand, bone_transforms, abs_bone_transforms, trans, frame_name, gender, faces, args):
body_mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
bb_min = np.min(vertices, axis=0)
bb_max = np.max(vertices, axis=0)
total_... |
def copy_class_merge_token(hf_model, flax_params):
flax_class_token_params = flatten_nested_dict(flax_params['backbone']['merged_class_token'])
weight = torch.from_numpy(flax_class_token_params['scale'])
bias = torch.from_numpy(flax_class_token_params['bias'])
hf_model.layer_norm.weight = nn.Parameter(w... |
def compare_results(res1, res2):
if ((type(res1) is bool) or (type(res2) is bool)):
return (res1 == res2)
res1 = [list(x.values()) for x in res1]
res2 = [list(x.values()) for x in res2]
if (res1 == res2):
return True
else:
return False |
def get_training_dataset():
dataset = load_dataset(TRAIN_FILENAMES, train=True, ordered=False)
dataset = dataset.repeat()
dataset = dataset.map(img_aug, num_parallel_calls=AUTO)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset |
def make_copy_to_fpga_state(sdfg):
state = sdfg.add_state('copy_to_device')
sdfg.add_array('A', [N, K], dtype=dace.float32)
sdfg.add_array('B', [K, M], dtype=dace.float32)
sdfg.add_array('C', [N, M], dtype=dace.float32)
A_host = state.add_read('A')
B_host = state.add_read('B')
C_host = state... |
def computation_communication_ratio(comp_times, comm_times):
assert (len(comp_times) == len(comm_times))
ratio = {k: (comp_times[k] / (comm_times[k] + comp_times[k])) for k in comp_times}
return ratio |
class Tracks():
LINEAR_PROBING = 'linear_probing'
TRANSFER_LEARNING = 'transfer_learning'
ZERO_SHOT = 'zero_shot'
VALID_TYPES = [LINEAR_PROBING, TRANSFER_LEARNING, ZERO_SHOT]
def is_valid(task, track):
if (track not in Tracks.VALID_TYPES):
return False
if (task in [Tasks.... |
def trunc_normal_(tensor, mean=0.0, std=1.0):
__call_trunc_normal_(tensor, mean=mean, std=std, a=(- std), b=std) |
class BloomConfig(PretrainedConfig):
model_type = 'bloom'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_hidden_layers': 'n_layer', 'num_attention_heads': 'n_head'}
def __init__(self, vocab_size=250880, hidden_size=64, n_layer=2, n_head=8, layer_norm_epsilon=1e-05, initializer_r... |
class FreeTokenCostEstimator(TokenCostEstimator):
def estimate_tokens(self, request: Request, metric_service: MetricService) -> int:
return 0 |
class GlobalStep(layers.Layer):
def __init__(self, initial_step=0, **kwargs):
super().__init__(**kwargs)
self.initial_step = initial_step
self.supports_masking = True
def get_config(self):
return dict(**super().get_config(), initial_step=self.initial_step)
def build(self, inp... |
def save_video(env, expert):
env.reset()
env.render()
done = False
while (not done):
(actions, _) = expert.predict()
(_, _, done, _) = env.step(actions)
env.render()
env.close() |
def recursive_mark(ont, lbl):
item = ont[lbl]
if (lbl not in music_related):
music_related.add(lbl)
for i in item['child_ids']:
recursive_mark(ont, i) |
class WordVocab(SimpleVocab):
UNK = u'<unk>'
START = u'<start>'
STOP = u'<stop>'
SPECIAL_TOKENS = (UNK, START, STOP)
def __init__(self, tokens):
super(WordVocab, self).__init__([t.lower() for t in tokens])
for special in self.SPECIAL_TOKENS:
if (special not in self):
... |
class ConvCBP(object):
def __init__(self, net, step_size=0.001, loss='mse', opt='sgd', beta=0.9, beta_2=0.999, replacement_rate=0.0001, decay_rate=0.9, init='kaiming', util_type='contribution', maturity_threshold=100, device='cpu', momentum=0, weight_decay=0):
self.net = net
if (opt == 'sgd'):
... |
class Parameters():
Pr: float = config.configuration.mio.initial_config.random_test_or_from_archive_probability
n: int = config.configuration.mio.initial_config.number_of_tests_per_target
m: int = config.configuration.mio.initial_config.number_of_mutations
def is_valid(self):
assert (self.Pr >= ... |
def _conv(input_var, name, filter_size, num_filter, strides, hidden_w_init, hidden_b_init, padding):
input_shape = input_var.get_shape()[(- 1)]
w_shape = [filter_size[0], filter_size[1], input_shape, num_filter]
b_shape = [1, 1, 1, num_filter]
with tf.compat.v1.variable_scope(name):
weight = tf.... |
def tokenize_and_filter_function(examples):
tokenized_examples = tokenizer(examples['text'], truncation=True, max_length=context_length)
result = {'input_ids': [], 'attention_mask': []}
for (i, input_ids) in enumerate(tokenized_examples['input_ids']):
if (len(input_ids) == context_length):
... |
class Conv2d(nn.Conv2d):
def __init__(self, *args, **kwargs):
if ((len(args) == 2) and ('kernel_size' not in kwargs.keys())):
super(Conv2d, self).__init__(*args, (1, 1), **kwargs)
else:
super(Conv2d, self).__init__(*args, **kwargs) |
class EmbeddingBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, drop=0):
super(EmbeddingBlock, self).__init__()
self.block = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size, padding=1), nn.Dropout2d(drop), nn.BatchNorm2d(out_channels), nn.Conv2d(out_chann... |
def test_1406issue():
array = ak.Array(ak.contents.ListOffsetArray(ak.index.Index64(np.array([1, 3], dtype=np.int64)), ak.contents.ListOffsetArray(ak.index.Index64(np.array([0, 2, 2, 3], dtype=np.int64)), ak.contents.NumpyArray(np.array([0, 1, 2], dtype=np.int64)))), check_valid=True)
index = ak.Array(ak.conten... |
def sample_gumbel_softmax(logits, temperature):
g = sample_gumbel(logits.shape)
h = ((g + logits) / temperature)
h_max = h.max(dim=(- 1), keepdim=True)[0]
h = (h - h_max)
cache = torch.exp(h)
y = (cache / cache.sum(dim=(- 1), keepdim=True))
return y |
def instantiate_callbacks(callbacks_cfg: DictConfig) -> List[Callback]:
callbacks: List[Callback] = []
if (not callbacks_cfg):
log.warning('No callback configs found! Skipping..')
return callbacks
if (not isinstance(callbacks_cfg, DictConfig)):
raise TypeError('Callbacks config must ... |
def register_Ns3CallbackImplBase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsEqual', 'bool', [param('ns3::Pt... |
class OwlViTVisionConfig(PretrainedConfig):
model_type = 'owlvit_vision_model'
def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=768, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initialize... |
class GPT2ForSequenceClassification():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def up_stage(inputs, skip, filters, kernel_size=3, activation='relu', padding='SAME'):
up = UpSampling3D()(inputs)
up = Conv3D(filters, 2, activation=activation, padding=padding)(up)
up = GroupNormalization()(up)
merge = concatenate([skip, up])
merge = GroupNormalization()(merge)
conv = Conv3D(f... |
class OrderedMultisetPartitionsIntoSets_X(OrderedMultisetPartitionsIntoSets):
def __init__(self, X):
self._X = X
if all((((k in ZZ) and (k > 0)) for (k, v) in X)):
self._Xtup = tuple([k for (k, v) in sorted(X) for _ in range(v)])
else:
self._Xtup = tuple([k for (k, v)... |
class Clip(core.Clip):
def __init__(self, clip_id, data_home, dataset_name, index, metadata):
super().__init__(clip_id, data_home, dataset_name, index, metadata)
self.audio_path = self.get_path('audio')
self.jams_path = self.get_path('jams')
self.txt_path = self.get_path('txt')
d... |
class FluorescenceTask(SequenceToFloatTask):
def __init__(self):
d_output = 1
super().__init__(key_metric='MAE', deserialization_func=deserialize_fluorescence_sequence, d_output=d_output, label='log_fluorescence', input_name='encoder_output', output_name='prediction') |
def test_RecordArray_NumpyArray():
array = ak.Array(ak.contents.RecordArray([ak.contents.NumpyArray(np.array([0, 1, 2, 3, 4], np.int64)), ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5]))], ['x', 'y']), backend='cuda')
assert (array.to_list() == [{'x': 0, 'y': 0.0}, {'x': 1, 'y': 1.1}, {'x': 2, '... |
class ObsDictPathCollector(MdpPathCollector):
def __init__(self, *args, observation_key='observation', **kwargs):
def obs_processor(obs):
return obs[observation_key]
rollout_fn = partial(rollout, preprocess_obs_for_policy_fn=obs_processor)
super().__init__(*args, rollout_fn=rollo... |
def _validate_label_map(label_map):
for item in label_map.item:
if (item.id < 1):
raise ValueError('Label map ids should be >= 1.') |
def test_is_datetime_type_with_datetime():
data = datetime(2020, 1, 1)
is_datetime = is_datetime_type(data)
assert is_datetime |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--video_in', required=True, help='mp4 video to crop')
parser.add_argument('--x', type=int, required=True, help='X coordinate')
parser.add_argument('--y', type=int, required=True, help='Y coordinate')
parser.add_argument('--width', t... |
def get_left_span(span, sentence=None, window=None):
sentence = (sentence if sentence else span.sentence)
j = span.char_to_word_index(span.char_start)
i = (max((j - window), 0) if window else 0)
if (i == j == 0):
return Span(char_start=0, char_end=(- 1), sentence=sentence)
try:
(star... |
_dispatch
def dst(x, type=2, n=None, axis=(- 1), norm=None, overwrite_x=False, workers=None, orthogonalize=None):
return (Dispatchable(x, np.ndarray),) |
class MissingObjectException(SkyplaneException):
def pretty_print_str(self):
err = f'[red][bold]:x: MissingObjectException:[/bold] {str(self)}[/red]'
err += '\n[bold][red]Please ensure that the object exists and is accessible.[/red][/bold]'
return err |
def convert_kernel(state_dict):
new_state_dict = {}
for (k, v) in state_dict.items():
k = k.replace('kernel.kernel.L', 'kernel.kernel.l_kernel')
k = k.replace('kernel.kernel', 'kernel')
new_state_dict[k] = v
return new_state_dict |
class LoginInfoOper():
def get_login_info(cls):
return db_session.query(LoginInfo.name, LoginInfo.password, LoginInfo.enable).filter(text('enable=1')).all()
_commit_decorator
def freeze_account(cls, name, rs):
account = db_session.query(LoginInfo).filter((LoginInfo.name == name)).first()
... |
def project_to_image(project, points):
points = convert_points_to_homogeneous(points)
points = points.unsqueeze(dim=(- 1))
project = project.unsqueeze(dim=1)
points_t = (project points)
points_t = points_t.squeeze(dim=(- 1))
points_img = convert_points_from_homogeneous(points_t)
points_dept... |
class _TestMultiProcessing(object):
start_method = None
def test_success(self):
mp.start_processes(test_success_func, nprocs=2, start_method=self.start_method)
def test_success_non_blocking(self):
mp_context = mp.start_processes(test_success_func, nprocs=2, join=False, start_method=self.star... |
class SquashedGaussianDistribution(Distribution):
_mean: torch.Tensor
_std: torch.Tensor
_dist: Normal
def __init__(self, loc: torch.Tensor, std: torch.Tensor):
self._mean = loc
self._std = std
self._dist = Normal(self._mean, self._std)
def sample(self) -> torch.Tensor:
... |
def register_Ns3WifiMacQueueItem_methods(root_module, cls):
cls.add_constructor([param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::WifiMacHeader const &', 'header')])
cls.add_method('GetPacket', 'ns3::Ptr< ns3::Packet const >', [], is_const=True)
cls.add_method('GetHeader', 'ns3::WifiMacHeader const ... |
class NextFile(object):
filesPerDir = 100
def __init__(self, path_name):
self.path_name = path_name
self.dir_index = (- 1)
self.file_index = (- 1)
def next(self):
self.file_index = ((self.file_index + 1) % NextFile.filesPerDir)
if (self.file_index == 0):
s... |
class Term(Struct):
name = ''
arg_types = ()
arg_shapes = {}
diff_info = {}
integration = 'cell'
geometries = ['1_2', '2_3', '2_4', '3_4', '3_8']
def new(name, integral, region, **kwargs):
from sfepy.terms import term_table
arg_str = _match_args(name)
if (arg_str is n... |
_module
class ResNet(nn.Module):
arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, depth, in_channels=3, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1... |
class LayoutLMForMaskedLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
hidden_dim = round((inp * expand_ratio))
self.use_res_connect = ((self.stride == 1) and (inp == oup))... |
def setup_torchaudio():
try:
import libfb.py.ctypesmonkeypatch
libfb.py.ctypesmonkeypatch.install()
except ImportError:
pass |
class ControlBase(ABC):
def __init__(self, statestore):
self._state = ReducerState.setup
self.statestore = statestore
if self.statestore.is_inited():
self.network = Network(self, statestore)
try:
not_ready = True
tries = 0
while not_rea... |
class PrisonSetup():
from environment.prison import Actions
colors = {'#': WHITE, '0': GREEN, '1': BLUE, ' ': BLACK, 'C': GREEN, 'D': RED, 'missing': RED, 'background': BLACK}
action_map = {pygame.K_RIGHT: Actions.RIGHT, pygame.K_LEFT: Actions.LEFT, pygame.K_UP: Actions.STAY, pygame.K_SPACE: Actions.PUNISH} |
def prepare_data(config, split='train', batch_size=32):
dataset_name = config['dataset'].lower()
if (dataset_name == 'shapenet'):
from datasets.shapenet import ShapeNetDataset
dataset = ShapeNetDataset(root_dir=config['data_dir'], classes=config['classes'], split=split)
else:
raise V... |
def test(hparams, run_opts, locales, wer_file='wer_test.txt'):
for locale in locales:
run_on_main(prepare_common_voice, kwargs={'locales': [locale], 'data_folder': hparams['data_folder'], 'max_durations': hparams['max_durations']})
if (locale in ['zh-CN', 'ja']):
hparams['wer_computer'] ... |
class Calibrator(object):
def __init__(self, calibrator_type, calibrator_dir, task_name, eval=True):
self.calibrator_type = calibrator_type
self.path = (calibrator_dir / f'{calibrator_type}_{task_name}.pkl')
self.eval = eval
if self.eval:
self.load()
def predict(self,... |
class DotProduct(Layer):
def __init__(self, scale=False, name='dot_product'):
super(DotProduct, self).__init__(name)
self.scale = scale
def __call__(self, t0, t1):
dots = tf.matmul(t0, t1, transpose_b=True)
if self.scale:
last_dims = t0.shape.as_list()[(- 1)]
... |
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(nn.Linear(((512 * 7) * 7), 4096), nn.ReLU(True), nn.Dropout(), nn.... |
.skipif((not has_pytorch()), reason='Pytorch not installed.')
_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64)
def test_ad_select():
s = (4,)
_utils.torch_op(output_shapes=[s])
def test(x: ti.types.ndarray(), y: ti.types.ndarray(), z: ti.types.ndarray()):
for i in x:
z[i] = ... |
def velocity_estimation(twist, vicon):
global time, data, n, window_width, plt, t
t = twist.header.stamp.to_sec()
dvx = ((twist.twist.linear.x * vicon.pose.position.z) - vicon.vel.x)
dvy = ((twist.twist.linear.y * vicon.pose.position.z) - vicon.vel.y)
dvz = ((twist.twist.linear.z * vicon.pose.positi... |
def init_sem_name():
try:
librml = ctypes.CDLL(libirml)
librml.set_active_sem_name()
librml.set_stop_sem_name()
except Exception as e:
print('Warning: Can not initialize name of shared semaphores:', e, file=sys.stderr) |
class RobertaLongForMaskedLM(RobertaForMaskedLM):
def __init__(self, config):
super().__init__(config)
for (i, layer) in enumerate(self.roberta.encoder.layer):
layer.attention.self = RobertaLongSelfAttention(config, layer_id=i) |
def display_summary(event: events.Finished) -> None:
(message, color) = get_summary_output(event)
display_section_name(message, fg=color) |
def test_riesz_scalar_products(rng, CG1, geometry, config_ocp, F, bcs, J, y, u, p):
riesz_scalar_product = ((TrialFunction(CG1) * TestFunction(CG1)) * geometry.dx)
ocp = cashocs.OptimalControlProblem(F, bcs, J, y, u, p, config=config_ocp, riesz_scalar_products=riesz_scalar_product)
assert (cashocs.verificat... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.