code stringlengths 101 5.91M |
|---|
def create_window(width, height, title, monitor, share):
return _glfw.glfwCreateWindow(width, height, _to_char_p(title), monitor, share) |
class TDNN(Model):
def __init__(self, num_inputs, num_outputs, method='cls', name='TDNN'):
super().__init__(name=name)
self.method = method
self.model = nn.Sequential(nn.Conv1d(num_inputs, 512, 5, padding=2), nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Conv1d(512, 512, 3, dilation=2, padd... |
_module()
class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(Shared4Conv1FCBBoxHead, self).__init__(*args, num_shared_convs=4, num_shared_fcs=1, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, **kwar... |
def p_buffer_or_template(s, base_type_node, templates):
pos = s.position()
s.next()
(positional_args, keyword_args) = p_positional_and_keyword_args(s, (']',), templates)
s.expect(']')
if (s.sy == '['):
base_type_node = p_buffer_or_template(s, base_type_node, templates)
keyword_dict = Exp... |
.memoize(for_each_device=True)
def cupy_launch(strFunction, strKernel):
return cupy.cuda.compile_with_cache(strKernel).get_function(strFunction) |
def _add_speaker_and_signal(header, source, get_conversation=True):
BEGIN_SIGNAL = '### '
END_SIGNAL = '\n'
conversation = header
for sentence in source:
from_str = sentence['from']
if (from_str.lower() == 'human'):
from_str = conversation_lib.default_conversation.roles[0]
... |
def get_device(ts) -> torch.device:
for t in flatten(ts):
if isinstance(t, Tensor):
return t.device
return torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) |
def find_relationships(schema_graph, table, incoming=True):
relationships = []
for relationship_obj in schema_graph.relationships:
if ((relationship_obj.end == table) and incoming):
relationships.append(relationship_obj)
if ((relationship_obj.start == table) and (not incoming)):
... |
class DateField(DateTimeField):
def __init__(self, label=None, validators=None, format='%Y-%m-%d', **kwargs):
super(DateField, self).__init__(label, validators, format, **kwargs)
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
try:
... |
def convert_dict_to_openai_object(data: dict) -> openai_object.OpenAIObject:
return_data = openai_object.OpenAIObject()
return_data.update(data)
return return_data |
def _fused_bias_act_cuda(x, b, axis, act, alpha, gain):
x = tf.convert_to_tensor(x)
empty_tensor = tf.constant([], dtype=x.dtype)
b = (tf.convert_to_tensor(b) if (b is not None) else empty_tensor)
act_spec = activation_funcs[act]
assert ((b.shape.rank == 1) and ((b.shape[0] == 0) or (b.shape[0] == x... |
def test_detectorrs_resnet_backbone():
detectorrs_cfg = dict(depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True... |
def annotate_sentence(corenlp, gloss):
try:
parse = corenlp.annotate(gloss)
except:
time.sleep(10)
parse = corenlp.annotate(gloss)
token_str = ' '.join([token['word'] for sentence in parse.json['sentence'] for token in sentence['token']])
return token_str |
class StorageWeakRef(object):
def __init__(self, storage):
self.cdata = storage._weak_ref()
self._free_weak_ref = torch.Storage._free_weak_ref
def expired(self):
return torch.Storage._expired(self.cdata)
def __del__(self):
self._free_weak_ref(self.cdata) |
def average_vertex_var_in_cells(ths_in):
ths = dict.fromkeys(list(ths_in.keys()))
for (var, th) in six.iteritems(ths_in):
aux = dict.fromkeys(list(th.keys()))
for (ir, data) in six.iteritems(th):
if isinstance(data, dict):
for (ic, ndata) in six.iteritems(data):
... |
def load_module_from_path(path):
assert path.exists(), 'The expected file was not found.'
module_path = path.parent
module_name = path.name.split('.')[0]
module_path = f'{module_path.name}.{module_name}'
spec = importlib.util.spec_from_file_location(module_path, path)
module = importlib.util.mod... |
def evaluate_policy(model, env, lang_embeddings, args):
conf_dir = (Path(__file__).absolute().parents[2] / 'conf')
task_cfg = OmegaConf.load((conf_dir / 'callbacks/rollout/tasks/new_playtable_tasks.yaml'))
task_oracle = hydra.utils.instantiate(task_cfg)
val_annotations = OmegaConf.load((conf_dir / 'anno... |
def count_others(sql):
count = 0
agg_count = count_agg(sql['select'][1])
agg_count += count_agg(sql['where'][::2])
agg_count += count_agg(sql['groupBy'])
if (len(sql['orderBy']) > 0):
agg_count += count_agg(([unit[1] for unit in sql['orderBy'][1] if unit[1]] + [unit[2] for unit in sql['order... |
_module()
class PSEHead(PANHead):
def __init__(self, in_channels, out_channels, downsample_ratio=0.25, loss=dict(type='PSELoss'), postprocessor=dict(type='PSEPostprocessor', text_repr_type='poly'), train_cfg=None, test_cfg=None, init_cfg=None, **kwargs):
super().__init__(in_channels=in_channels, out_channel... |
def online_smallest_comp_node_matching(graph: Graph, node_weight_function, edge_weight_function, L, uf: UnionFind, verbose=False, record_history=False):
prev_graph = Graph.from_other(graph)
uf2 = UnionFind(elements=graph._nodes.keys())
hd = ValueSortedDict({n: node_weight_function(n) for n in graph.non_inpu... |
def test_no_xpos():
args = tagger.parse_args(args=[])
train_doc = CoNLL.conll2doc(input_str=TRAIN_DATA_NO_XPOS)
data = Dataset(train_doc, args, None)
assert data.has_upos
assert (not data.has_xpos)
assert data.has_feats |
class DatasetWriter():
def __init__(self):
self.args = self.load_config()
pprint.pprint(self.args.__dict__)
self.model = self.load_model()
def __getattr__(self, attr):
return getattr(self.args, attr)
def read_manifest(self, fname):
with open(fname, 'r') as fp:
... |
.sm70
_utils.test(arch=archs_support_f16)
def test_binary_op():
dtype = ti.f16
x = ti.field(dtype, shape=())
y = ti.field(dtype, shape=())
z = ti.field(dtype, shape=())
def add():
x[None] = (y[None] + z[None])
x[None] = (x[None] * z[None])
y[None] = 0.2
z[None] = 0.72
add... |
def discriminator_loss(loss_func, real, fake, real_blur):
real_loss = 0
fake_loss = 0
real_blur_loss = 0
if ((loss_func == 'wgan-gp') or (loss_func == 'wgan-lp')):
real_loss = (- tf.reduce_mean(real))
fake_loss = tf.reduce_mean(fake)
real_blur_loss = tf.reduce_mean(real_blur)
... |
class BasicBlock(nn.Module):
def __init__(self, inplane, outplane, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplane, outplane, padding=0, stride=stride)
self.bn1 = nn.BatchNorm3d(outplane)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x... |
def PlotShortPathDistr(tspec, *args):
if (type(tspec) == PUNGraph):
return PlotShortPathDistr_PUNGraph(tspec, *args)
if (type(tspec) == PUndirNet):
return PlotShortPathDistr_PUndirNet(tspec, *args)
if (type(tspec) == PDirNet):
return PlotShortPathDistr_PDirNet(tspec, *args)
if (t... |
def build_entity_swap_mask(data_fold):
with open(('%s/pseudo_tokenized_train.txt' % data_fold), 'r') as fr:
mask_lst = []
for line in fr.readlines():
ids = [int(token_id) for token_id in line.split()]
bos_index = ids.index(BOS)
masks = ((['0'] * (bos_index + 1)) +... |
class UntrackableCompositeLayer(Layer):
def __init__(self, attributes):
for (i, a) in enumerate(attributes):
setattr(self, f'var_{i}', a)
super().__init__() |
def stencil(A: dace.float64[N], B: dace.float64[N]):
tmp1 = np.ndarray(shape=[N], dtype=dace.float64)
tmp2 = np.ndarray(shape=[N], dtype=dace.float64)
tmp3 = np.ndarray(shape=[N], dtype=dace.float64)
def m1(i: _[1:N]):
(in1 << A[i])
(in2 << A[(i - 1)])
(out1 >> tmp1[i])
o... |
class SeparableConv(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1):
super(SeparableConv, self).__init__(nn.Conv2d(in_channels, in_channels, kernel_size, stride=stride, dilation=dilation, padding=(((stride - 1) + (dilation * (kernel_size - 1))) // 2), group... |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help=('Path... |
class ClassicControlAcrobotEnv(SingleAgentEnv):
name = 'ClassicControlAcrobotEnv'
def __init__(self, episode_length, env_backend='cpu', reset_pool_size=0, seed=None):
super().__init__(episode_length, env_backend, reset_pool_size, seed=seed)
self.gym_env = AcrobotEnv()
self.action_space =... |
def get_extensions():
this_dir = path.dirname(path.abspath(__file__))
extensions_dir = path.join(this_dir, 'detectron2', 'layers', 'csrc')
main_source = path.join(extensions_dir, 'vision.cpp')
sources = glob.glob(path.join(extensions_dir, '**', '*.cpp'))
source_cuda = (glob.glob(path.join(extensions... |
def cprint(st, c='r'):
if (c == 'r'):
CRED = '\x1b[91m'
elif (c == 'g'):
CRED = '\x1b[92m'
elif (c == 'b'):
CRED = '\x1b[94m'
elif (c == 'y'):
CRED = '\x1b[93m'
CEND = '\x1b[0m'
print(((CRED + st) + CEND)) |
def filter_blacked_out_images(image_locations: List[str]) -> List[str]:
return [image_location for image_location in image_locations if (not is_blacked_out_image(image_location))] |
def copy_conv2plus1d(module, blobs, i, j):
assert isinstance(module, Conv2Plus1D)
assert (len(module) == 4)
copy_conv(module[0], blobs, (((('comp_' + str(i)) + '_conv_') + str(j)) + '_middle'))
copy_bn(module[1], blobs, (((('comp_' + str(i)) + '_spatbn_') + str(j)) + '_middle'))
assert isinstance(mo... |
def set_key_file(self):
global DOTNET_KEY_FILE
if (not (DOTNET_KEY_FILE is None)):
self.key_file = DOTNET_KEY_FILE
if (not (self.key_file is None)):
if os.path.isfile(self.key_file):
self.key_file = os.path.abspath(self.key_file)
elif os.path.isfile(os.path.join(self.src_... |
def plot_result(args, train_loss, train_accuracy, test_loss, test_accuracy, save_plot=True):
xs = list(range(len(train_loss)))
(f, (fg1, fg2)) = plt.subplots(1, 2)
fg1.set_title('Loss during training')
fg1.plot(xs, train_loss, '-b', label='Train')
fg1.plot(xs, test_loss, '-r', label='Test')
fg1.... |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
def test_where_double_backward(seed, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
inshape = (2, 2)
inputs = [(rng.rand(*inshape) > 0.5).astype(np.float32), rng.randn(*inshape), rng... |
class IndexedOptionArray(Content):
def __init__(self, index, content):
assert isinstance(index, list)
assert isinstance(content, Content)
for x in index:
assert isinstance(x, int)
assert (x < len(content))
self.index = index
self.content = content
... |
def dataSorting():
alist = []
(basename, data) = (True, True)
while (basename and data):
(basename, data) = foo()
alist.append((basename, data))
alist.sort() |
_config
def padded_all_scenario():
LOCAL_TESTING = False
fixed_mdp = ['scenario2', 'simple', 'schelling_s', 'unident_s']
PADDED_MDP_SHAPE = (10, 5)
sim_threads = (10 if LOCAL_TESTING else 60)
PPO_RUN_TOT_TIMESTEPS = (40000 if (not LOCAL_TESTING) else 1000)
TOTAL_BATCH_SIZE = (20000 if (not LOCAL... |
def densenet121_model(img_rows, img_cols, color_type=1, nb_dense_block=4, growth_rate=32, nb_filter=64, reduction=0.5, dropout_rate=0.0, weight_decay=0.0001, num_classes=None):
eps = 1.1e-05
compression = (1.0 - reduction)
global concat_axis
if (K.image_dim_ordering() == 'tf'):
concat_axis = 3
... |
def _istft(y):
(_, hop_length, win_length) = _stft_parameters()
return librosa.istft(y, hop_length=hop_length, win_length=win_length) |
def require_tensorflow_probability(test_case):
if (not is_tensorflow_probability_available()):
return unittest.skip('test requires TensorFlow probability')(test_case)
else:
return test_case |
.torch
def test_invalid_tensor_schema(fake_schema):
with pytest.raises(ValueError) as exc1:
SequenceTokenizer(fake_schema.subset(['item_id', 'some_user_feature']))
with pytest.raises(ValueError) as exc2:
SequenceTokenizer(fake_schema.subset(['item_id', 'some_item_feature']))
with pytest.rais... |
def validate_with_david_generated_program(model, data, device, pretrained_dir):
program_generator = load_program_generator(os.path.join(pretrained_dir, 'program_generator.pt')).to(device)
david_vocab = json.load(open(os.path.join(pretrained_dir, 'david_vocab.json')))
david_vocab['program_idx_to_token'] = in... |
class cmd_dir_arg(cmd_arg):
def find_node(self, base_path):
assert isinstance(base_path, Node.Node)
self.node = base_path.find_dir(self.name)
if (self.node is None):
raise Errors.WafError(('Directory %s not found in ' % (self.name, base_path))) |
def split_video_mkvmerge(input_video_paths, scene_list, output_file_template, video_name, suppress_output=False):
if ((not input_video_paths) or (not scene_list)):
return
logging.info('Splitting input video%s using mkvmerge, output path template:\n %s', ('s' if (len(input_video_paths) > 1) else ''), ou... |
def facets_for_K3():
from sage.groups.perm_gps.permgroup import PermutationGroup
G = PermutationGroup([[(1, 3, 8, 4, 9, 16, 15, 2, 14, 12, 6, 7, 13, 5, 10)], [(1, 11, 16), (2, 10, 14), (3, 12, 13), (4, 9, 15), (5, 7, 8)]])
return ([tuple([g(i) for i in (1, 2, 3, 8, 12)]) for g in G] + [tuple([g(i) for i in ... |
def parse_ml_slot_classes(ml_slot_classes):
values = set()
entity_api_name = ''
extract_type = 'Value'
if isinstance(ml_slot_classes, dict):
return _parse_ml_slot_classes_dict(ml_slot_classes)
assert isinstance(ml_slot_classes, list)
for item in ml_slot_classes:
k = list(item.key... |
def create_json(metadata, audio_data_folder, folds_list, json_file):
json_dict = {}
for (ID, sample_metadata) in metadata.items():
fold_num = int(sample_metadata['fold'])
if (fold_num in folds_list):
wav_file = os.path.join(os.path.abspath(audio_data_folder), (('fold' + str(fold_num)... |
def rotate_y(angle_degrees: int, c2w: np.ndarray) -> np.ndarray:
angle_radians = np.radians(angle_degrees)
rotation_matrix = np.array([[np.cos(angle_radians), 0, np.sin(angle_radians), 0], [0, 1, 0, 0], [(- np.sin(angle_radians)), 0, np.cos(angle_radians), 0], [0, 0, 0, 1]])
return (c2w rotation_matrix) |
class InferenceResult():
ate: float = None
stderr: float = None
ci: tuple = (None, None)
individual_effects: np.ndarray = None
elapsed_time: float = None |
def srwl_opt_setup_bumps(_ampl, _sx, _sy, _n, _delta, _atten_len, _rx, _ry, _xc=0, _yc=0, _nx=1001, _ny=1001, _n_sig=4, _ampl_min=None, _sx_min=None, _sy_min=None, _seed=None):
def SortPair(_pair, _mult=1):
x1 = (_pair[0] * _mult)
x2 = (_pair[1] * _mult)
if (x1 > x2):
aux = x1
... |
class SELU_GoogLeNet(nn.Module):
def __init__(self):
super(SELU_GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(nn.Conv2d(3, 192, kernel_size=3, padding=1), nn.BatchNorm2d(192), nn.SELU(True))
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128,... |
def test_grad_test():
def fg(x):
n = len(x)
c = (np.arange(n) + 1)
f = (np.sum((c * (x ** 2))) + np.sum(x))
g = (((2 * x) * c) + np.ones(n))
return (f, g)
options = {'ls': 0, 'verbose': 10, 'grad_test': True}
x0 = np.ones(5)
c = (np.arange(5) + 1)
res = minimi... |
def load_optim(optimizer, weights):
checkpoint = torch.load(weights)
optimizer.load_state_dict(checkpoint['optimizer'])
for p in optimizer.param_groups:
lr = p['lr']
return lr |
def _read_img_worker(path, key, compress_level):
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if (img.ndim == 2):
(h, w) = img.shape
c = 1
else:
(h, w, c) = img.shape
(_, img_byte) = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level])
return (key, img_byt... |
def get_internal_scopes(state: SDFGState, entry: nodes.EntryNode, immediate: bool=False) -> List[Tuple[(SDFGState, nodes.EntryNode)]]:
stree = scope_tree_recursive(state, entry)
result = []
def traverse(state: SDFGState, treenode: ScopeTree):
for child in treenode.children:
if (child.ent... |
('sdmetrics.visualization.get_column_pair_plot')
def test_get_column_pair_plot_with_discrete_data(mock_get_plot):
columns = ['name', 'subscriber']
real_data = pd.DataFrame({'name': ['John', 'Emily'], 'subscriber': [True, False]})
synthetic_data = pd.DataFrame({'name': ['John', 'Johanna'], 'subscriber': [Fal... |
(frozen=True)
class Trace():
steps: List[Step]
low_level_steps: List[Step]
action_infos: Dict[(str, ActionInfo)]
task_description: str |
def set_global_backend(backend, coerce=False, only=False, *, try_last=False):
_uarray.set_global_backend(backend, coerce, only, try_last) |
_utils.test()
def test_nested_static():
def func():
for i in ti.static(ti.static(range(1))):
pass
with pytest.raises(ti.TaichiCompilationError):
func() |
def share_blobs(net, heads, namescope, dont_share_blobs=None, blob_shapes=None):
external_input = set(net.Proto().external_input)
def is_new_blob(b):
name = str(b)
return ((b not in external_input) and (name.startswith(namescope) or name.startswith(('_' + namescope))))
log.warn('NOTE: Execut... |
def fractal_dimension_test(image_filename: str, expected_fractal_dimension: float):
image_path: str = os.path.join(os.path.dirname(__file__), 'test_images', image_filename)
dim: float = compute_fractal_dimension(image_path)
assert (round(dim, 2) == expected_fractal_dimension) |
def merge(measurements):
if (not measurements):
return None
states = [m.__getstate__() for m in measurements]
for k in states[0].keys():
if (k in ('number_per_run', 'times', 'metadata')):
continue
assert all(((s[k] == states[0][k]) for s in states))
numbers_per_run = ... |
def debug_wrapper(func):
def func_wrapper(*args, **kwargs):
if DEBUG:
print(func.__name__)
return func(*args, **kwargs)
return func_wrapper |
class DecomposeSeparableConvTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test, depth=1):
self.depth_multiplier = depth
super().__init__(unit_test, experimental_exporter=True)
def get_quantization_config(self):
return mct.core.QuantizationConfig(weights_bias_correction=False... |
class LeanExprSimps():
const_div_rw: List[str] = dataclasses.field(default_factory=(lambda : []))
add_comm: List[str] = dataclasses.field(default_factory=(lambda : []))
def is_empty(self) -> bool:
return ((len(self.const_div_rw) == 0) and (len(self.add_comm) == 0)) |
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('GetMean', 'double', [], is_const=True)
cls.add_method('GetScale', 'double', [], is_const=True)
cls.add_method('GetShape', 'double', [... |
def compute_on_dataset(model, data_loader, device, bbox_aug, timer=None):
model.eval()
results_dict = {}
cpu_device = torch.device('cpu')
for (_, batch) in enumerate(tqdm(data_loader)):
(images, targets, image_ids) = batch
with torch.no_grad():
if timer:
timer... |
def test_k_best():
st = SelfTrainingClassifier(KNeighborsClassifier(n_neighbors=1), criterion='k_best', k_best=10, max_iter=None)
y_train_only_one_label = np.copy(y_train)
y_train_only_one_label[1:] = (- 1)
n_samples = y_train.shape[0]
n_expected_iter = ceil(((n_samples - 1) / 10))
st.fit(X_trai... |
class BasicTransform(nn.Module):
def __init__(self, w_in, w_out, stride, w_b=None, num_gs=1):
err_str = 'Basic transform does not support w_b and num_gs options'
assert ((w_b is None) and (num_gs == 1)), err_str
super(BasicTransform, self).__init__()
self.a = nn.Conv2d(w_in, w_out, 3... |
def load_weights_for_instance(model_instance):
from models.eye_net import EyeNet
from models.refine_net import RefineNet
if isinstance(model_instance, EyeNet):
model_fname = 'eve_eyenet_'
model_fname += (config.eye_net_rnn_type if config.eye_net_use_rnn else 'static')
model_fname += ... |
class SqliteAsDict():
def __init__(self, db):
cursor = db.cursor()
cursor.execute("PRAGMA synchronous='OFF'")
cursor.execute('PRAGMA locking_mode=EXCLUSIVE')
self.db = db
self.c = cursor
def __getitem__(self, key):
self.c.execute('SELECT sequence FROM sequences WH... |
def segment_signal(args):
(data_root, wav_file) = args
wlen = 3200
wshift = 80
en_th = 0.3
smooth_window = 40
smooth_th_low = 0.25
smooth_th_high = 0.6
avoid_sentences_less_that = 24000
wav_path = os.path.join(data_root, wav_file)
(signal, fs) = sf.read(wav_path)
signal = (si... |
class DownloadError(Exception):
FORMAT_MSG = 'Unable to download the dataset: {output} - {err}'
def __init__(self, output, err):
msg = self.FORMAT_MSG.format(output=output, err=err)
super().__init__(msg) |
def draw_grid(rows, cols, cell_size=50, fill='black', line_color='black'):
height = (rows * cell_size)
width = (cols * cell_size)
image = Image.new(mode='RGB', size=(width, height), color=fill)
draw = ImageDraw.Draw(image)
y_start = 0
y_end = image.height
step_size = cell_size
for x in r... |
def eval_step(params, batch):
targets = batch.pop('labels')
token_mask = jnp.where((targets > 0), 1.0, 0.0)
logits = model(**batch, params=params, train=False)[0]
return compute_metrics(logits, targets, token_mask) |
def assert_generates(testdir, raw_schema, expected, parameter):
schema = schemathesis.from_dict(raw_schema)
attribute = ('path_parameters' if (parameter == 'path') else parameter)
(case=schema['/teapot']['GET'].as_strategy())
def test(case):
assert (getattr(case, attribute) in expected)
test... |
def check_beliefs(content: str, level: int) -> None:
expected_beliefs = {1: {'Sally': {'marble A': 'basket S'}, 'Anne': {'marble A': 'basket A'}}, 2: {'Sally': {'marble A': 'sofa', 'marble B': 'lost'}, 'Anne': {'marble A': 'green box', 'marble B': 'basket A'}, 'Bob': {'marble B': 'basket A'}, 'Charlie': {'marble A'... |
def is_a_private_model(model):
if (model in PRIVATE_MODELS):
return True
if model.endswith('Wrapper'):
return True
if model.endswith('Encoder'):
return True
if model.endswith('Decoder'):
return True
return False |
class Modality(ABC):
def build_projector(self, lm_hidden_size: int) -> nn.Module:
pass
def name(self) -> str:
pass
def token(self) -> str:
pass
def data_key(self) -> str:
pass
def token_width(self) -> int:
pass
_property
def token_idx(self) -> int:
... |
_level_function()
def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False, *, highlevel=True, behavior=None, attrs=None):
(yield (a, b))
return _impl(a, b, rtol, atol, equal_nan, highlevel, behavior, attrs) |
def insert_translation_into_existing_dataset(data: List[Data], translations: List[str]) -> List[Data]:
for index in range(len(data)):
corresponding_translation = translations[index]
data[index].translations.append(corresponding_translation)
return data |
class Scale_only_img(object):
def __init__(self, scale):
self.scale = scale
def __call__(self, sample):
img = sample['image']
mask = sample['label']
(w, h) = img.size
ow = int((w * self.scale))
oh = int((h * self.scale))
img = img.resize((ow, oh), Image.BI... |
def get_dataset_dois(files, datasets):
result = []
for doi in datasets:
Zenodo(doi)
for file in files:
if (file in datasets[doi]['contents'].values()):
result.append(doi)
else:
for zip_file in datasets[doi]['zip_files']:
... |
def fit_one_epoch(epoch, model, train_loader, optimizer, steps_per_epoch, lr_params):
(TOTAL_STEPS, WARMPUP_STEPS, LR_INIT, LR_END) = lr_params
for (epoch_step, data) in enumerate(train_loader):
GLOBAL_STEPS = (((epoch * steps_per_epoch) + epoch_step) + 1)
(batch_imgs, batch_boxes, batch_classes... |
class MSELoss(LossBase):
def __init__(self, pred=None, target=None, reduction='mean'):
super(MSELoss, self).__init__()
self._init_param_map(pred=pred, target=target)
assert (reduction in ('mean', 'sum', 'none'))
self.reduction = reduction
def get_loss(self, pred, target):
... |
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument('--checkpoint', help='the dir to checkpoint which the model read f... |
def fuse_first_mul_add(net, params, removed_tensors):
net = copy.deepcopy(net)
params = copy.deepcopy(params)
for ((i, current), (j, next_)) in pairwise(enumerate(net.op)):
if ((current.type != 'Mul') or (next_.type != 'Add')):
continue
if (next_.input[0] != current.output[0]):
... |
def eval(config):
n_support = config['data.test_support']
n_query = config['data.test_query']
(w, h, c) = list(map(int, config['model.x_dim'].split(',')))
model = Prototypical(n_support, n_query, w, h, c)
model_path = f"{config['model.save_path']}"
model.load(model_path)
print('Model loaded.... |
def force_fp32(apply_to=None, out_fp16=False):
def force_fp32_wrapper(old_func):
(old_func)
def new_func(*args, **kwargs):
if (not isinstance(args[0], torch.nn.Module)):
raise TypeError('_fp32 can only be used to decorate the method of nn.Module')
if (not (has... |
def cluster_acc(y_true, y_pred):
(_, ind, w) = best_cluster_fit(y_true, y_pred)
return ((sum([w[(i, j)] for (i, j) in ind]) * 1.0) / y_pred.size) |
class InitialPopulationProvider():
def __init__(self, test_cluster: ModuleTestCluster, test_factory: tf.TestFactory, constant_provider: ConstantProvider):
self._testcases: list[dtc.DefaultTestCase] = []
self._test_cluster: ModuleTestCluster = test_cluster
self._test_factory: tf.TestFactory =... |
class SparseHalfCheetahEnv(MujocoEnv, utils.EzPickle):
def __init__(self):
MujocoEnv.__init__(self, 'half_cheetah.xml', 5)
utils.EzPickle.__init__(self)
def _step(self, action):
xposbefore = self.model.data.qpos[(0, 0)]
self.do_simulation(action, self.frame_skip)
xposafte... |
def sympy_integrator(expression, v, a=None, b=None):
import sympy
ex = expression._sympy_()
v = v._sympy_()
if (a is None):
result = sympy.integrate(ex, v)
else:
result = sympy.integrate(ex, (v, a._sympy_(), b._sympy_()))
return result._sage_() |
class halfgennorm_gen(rv_continuous):
def _shape_info(self):
return [_ShapeInfo('beta', False, (0, np.inf), (False, False))]
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return ((np.log(beta) - sc.gammaln((1.0 / beta))) - (x ** beta))
... |
class NormalizedInputMLPModel(MLPModel):
def __init__(self, input_shape, output_dim, name='NormalizedInputMLPModel', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.glorot_uniform_i... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.