code stringlengths 101 5.91M |
|---|
def query_yes_no(question, default='yes'):
valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}
if (default is None):
prompt = ' [y/n] '
elif (default == 'yes'):
prompt = ' [Y/n] '
elif (default == 'no'):
prompt = ' [y/N] '
else:
raise ValueError(("invalid default answer: '%s'" % default))
while True:
sys.stdout.write((question + prompt))
choice = input().lower()
if ((default is not None) and (choice == '')):
return valid[default]
elif (choice in valid):
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") |
def before_generate_case(context: HookContext, strategy: st.SearchStrategy[Case]) -> st.SearchStrategy[Case]:
seen = set()
def is_not_seen(case: Case) -> bool:
hashed = hash(case)
if (hashed not in seen):
seen.add(hashed)
return True
return False
return strategy.filter(is_not_seen) |
.parametrize('statement_type', [stmt.IntPrimitiveStatement, stmt.FloatPrimitiveStatement, stmt.StringPrimitiveStatement, stmt.BytesPrimitiveStatement, stmt.BooleanPrimitiveStatement, stmt.ComplexPrimitiveStatement, stmt.ClassPrimitiveStatement])
def test_primitive_statement_value_none(statement_type, default_test_case):
statement = statement_type(default_test_case, None)
assert (statement.value is not None) |
class StayAgent(Agent):
def __init__(self, sim_threads=None):
self.sim_threads = sim_threads
def action(self, state):
return Action.STAY
def direct_action(self, obs):
return ([Action.ACTION_TO_INDEX[Action.STAY]] * self.sim_threads) |
class MessageCollection(object):
def __init__(self):
self.messages = set()
def error(self, pos, message):
self.messages.add((pos, True, message))
def warning(self, pos, message):
self.messages.add((pos, False, message))
def report(self):
for (pos, is_error, message) in sorted(self.messages):
if is_error:
error(pos, message)
else:
warning(pos, message, 2) |
def get_annotation(mtg_path, split_type=False):
if split_type:
train = read_file(os.path.join(mtg_path, 'split-0', f'{split_type}-train.tsv'))
validation = read_file(os.path.join(mtg_path, 'split-0', f'{split_type}-validation.tsv'))
test = read_file(os.path.join(mtg_path, 'split-0', f'{split_type}-test.tsv'))
else:
train = read_file(os.path.join(mtg_path, 'split-0', 'autotagging-train.tsv'))
validation = read_file(os.path.join(mtg_path, 'split-0', 'autotagging-validation.tsv'))
test = read_file(os.path.join(mtg_path, 'split-0', 'autotagging-test.tsv'))
total = {}
total.update(train)
total.update(validation)
total.update(test)
annotation = {}
for (track_id, path_tags) in total.items():
annotation[track_id] = {'track_id': track_id, 'path': path_tags['path'], 'tag': [tag.split('---')[1] for tag in path_tags['tag']]}
if split_type:
_json_dump(os.path.join(mtg_path, f'{split_type}_annotation.json'), annotation)
else:
_json_dump(os.path.join(mtg_path, 'annotation.json'), annotation)
return pd.DataFrame(total).T |
def dma_reg_fmt_base(reg: Union[(DMA_tensor_0x000__reg, DMA_matrix_reg)]):
if isinstance(reg, DMA_tensor_0x000__reg):
addr = [(reg.src_start_addr_h8, reg.src_start_addr_l32), (reg.dst_start_addr_h8, reg.dst_start_addr_l32)]
elif isinstance(reg, DMA_matrix_reg):
addr = [(reg.src_start_addr_l8, reg.src_start_addr_h32), (reg.dst_start_addr_l8, reg.dst_start_addr_h32)]
lane_mask = ((reg.localmem_mask_h32 * (2 ** 32)) + reg.localmem_mask_l32)
opd0 = dict(address=dma_addr(*addr[0]), dtype=DType(reg.src_data_format), shape=tuple((reg[f'src_{d}size'] for d in 'nchw')), stride=tuple((reg[f'src_{d}stride'] for d in 'nchw')), layout=Layout.DMAstride(lane_mask))
res0 = dict(address=dma_addr(*addr[1]), dtype=DType(reg.src_data_format), shape=tuple((reg[f'dst_{d}size'] for d in 'nchw')), stride=tuple((reg[f'dst_{d}stride'] for d in 'nchw')), layout=Layout.DMAstride(lane_mask))
if reg.nchw_copy:
res0['shape'] = opd0['shape']
attr = dict()
if (lane_mask != ((2 ** 64) - 1)):
attr['lane_mask'] = hex(lane_mask)
if reg.fill_constant_en:
attr = {}
opd0 = dict(address=reg.constant_value, dtype=DType(reg.src_data_format), is_const=True)
return (res0, attr, opd0) |
class _MutationMetrics():
num_created_mutants: int
num_killed_mutants: int
num_timeout_mutants: int
def get_score(self) -> float:
divisor = (self.num_created_mutants - self.num_timeout_mutants)
assert (divisor >= 0)
if (divisor == 0):
return 1.0
return (self.num_killed_mutants / divisor) |
_utils.test(arch=get_host_arch_list())
def test_order_vector():
X = 4
Y = 2
Z = 2
S = 4
a = ti.Vector.field(Z, ti.i32, shape=(X, Y), order='ij', layout=ti.Layout.AOS)
b = ti.Vector.field(Z, ti.i32, shape=(X, Y), order='ji', layout=ti.Layout.AOS)
c = ti.Vector.field(Z, ti.i32, shape=(X, Y), order='ij', layout=ti.Layout.SOA)
d = ti.Vector.field(Z, ti.i32, shape=(X, Y), order='ji', layout=ti.Layout.SOA)
def fill():
for (i, j) in b:
a[(i, j)] = [i, j]
b[(i, j)] = [i, j]
c[(i, j)] = [i, j]
d[(i, j)] = [i, j]
def get_field_addr(a: ti.template(), i: ti.i32, j: ti.i32) -> ti.u64:
return ti.get_addr(a, [i, j])
fill()
a_addr = get_field_addr(a, 0, 0)
b_addr = get_field_addr(b, 0, 0)
c_addr = get_field_addr(c, 0, 0)
d_addr = get_field_addr(d, 0, 0)
for i in range(X):
for j in range(Y):
assert (a[(i, j)] == b[(i, j)] == c[(i, j)] == d[(i, j)] == [i, j])
for k in range(Z):
assert ((a_addr + ((((i * (Y * Z)) + (j * Z)) + k) * S)) == get_field_addr(a.get_scalar_field(k), i, j))
assert ((b_addr + ((((j * (X * Z)) + (i * Z)) + k) * S)) == get_field_addr(b.get_scalar_field(k), i, j))
assert ((c_addr + ((((k * (X * Y)) + (i * Y)) + j) * S)) == get_field_addr(c.get_scalar_field(k), i, j))
assert ((d_addr + ((((k * (Y * X)) + (j * X)) + i) * S)) == get_field_addr(d.get_scalar_field(k), i, j)) |
class StaticCuboid(RigidObject):
def __init__(self, pybullet_client_ids, name, size=np.array([0.065, 0.065, 0.065]), position=np.array([0.0, 0.0, 0.0425]), orientation=np.array([0, 0, 0, 1]), color=np.array([1, 0, 0]), lateral_friction=1):
super(StaticCuboid, self).__init__(pybullet_client_ids=pybullet_client_ids, name=name, size=size, initial_position=position, initial_orientation=orientation, mass=0, color=color, fixed_bool=True, lateral_friction=lateral_friction, spinning_friction=0.001, restitution=0, initial_linear_velocity=[0, 0, 0], initial_angular_velocity=[0, 0, 0])
def _create_object(self, pybullet_client_id, **kwargs):
position = np.array(self._initial_position)
position[(- 1)] += WorldConstants.FLOOR_HEIGHT
shape_id = pybullet.createCollisionShape(shapeType=pybullet.GEOM_BOX, halfExtents=(np.array(self._size) / 2), physicsClientId=pybullet_client_id)
block_id = pybullet.createMultiBody(baseCollisionShapeIndex=shape_id, basePosition=position, baseOrientation=self._initial_orientation, baseMass=self._mass, physicsClientId=pybullet_client_id)
return (shape_id, block_id)
def _define_type_id(self):
self._type_id = 10
return
def get_recreation_params(self):
recreation_params = dict()
recreation_params['name'] = self._name
recreation_params['size'] = self._size
(position, orientation) = pybullet.getBasePositionAndOrientation(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
position = np.array(position)
position[(- 1)] -= WorldConstants.FLOOR_HEIGHT
recreation_params['position'] = position
recreation_params['orientation'] = orientation
recreation_params['color'] = self._color
recreation_params['lateral_friction'] = self._lateral_friction
return copy.deepcopy(recreation_params) |
class MemRef(MemRefBase):
device = Target.BM1684X
def __init__(self, address, shape, dtype: DType, stride=None, layout=None):
super().__init__(address, shape, dtype, stride, layout)
if ((self.mtype == MType.R) and (layout != Layout.stride)):
self.stride = local_layout_to_stride(self)
def r_addr(self):
if (self.mtype == MType.UNKNOWN):
return self.address
r_addr = (self.address - memmap[self.mtype][0])
return r_addr
def get_mtype(self, address) -> MType:
return get_memory_type(address)
def npu_offset(self):
assert (self.mtype == MType.R)
return (self.r_addr // LANE_SIZE)
def bank_index(self):
assert (self.mtype == MType.R)
addr_len = (self.r_addr - (self.npu_offset * LANE_SIZE))
return (addr_len // BANK_SIZE)
def bank_offset(self):
assert (self.mtype == MType.R)
addr_len = (self.r_addr - (self.npu_offset * LANE_SIZE))
return (addr_len % BANK_SIZE)
_cache()
def local_shape(self):
NPU_OFFSET = self.npu_offset
(n, c, h, w, *_) = (*self.shape, 1, 1)
def get_cnum(c):
return ((((c + NPU_OFFSET) + NPU_NUM) - 1) // NPU_NUM)
if (self.layout == Layout._64IC):
return (((c + 63) // 64), get_cnum(n), h, (w * 64))
if (self.layout == Layout._32IC):
return (((c + 32) // 32), get_cnum(n), h, (w * 32))
if (self.layout == Layout._1IC):
return (c, get_cnum(n), h, w)
if (self.layout == Layout.matrix):
w = self.layout.args[0]
return (n, get_cnum((((c + w) - 1) // w)), 1, w)
if (self.layout == Layout.matrix2):
return (1, get_cnum(n), 1, c)
if (self.layout == Layout.DMA4Bank):
return (n, get_cnum(c), h, w)
if (self.layout == Layout.DMAstride):
return (n, get_cnum(c), h, w)
if (self.layout == Layout.DMAmatrix):
w = self.layout.args[1]
return (n, get_cnum((((c + w) - 1) // w)), 1, w)
if (self.layout == Layout.DMAlinear):
return self.shape
return (n, get_cnum(c), h, w)
_cache()
def local_stride(self):
(n, c, h, w, *_) = (*self.shape, 1, 1)
NPU_OFFSET = self.npu_offset
def get_eu_align_stride(shape):
(_, _c, _h, _w) = shape
align_type = (64 // self.itemsize)
c_stride = (((((_w * _h) + align_type) - 1) // align_type) * align_type)
n_stride = (((((_c + NPU_OFFSET) + NPU_NUM) - 1) // NPU_NUM) * c_stride)
return (n_stride, c_stride, _w, 1)
if (self.layout == Layout._64IC):
return (((64 * h) * w), (((((c + 63) // 64) * 64) * h) * w), (64 * w), 1)
if (self.layout == Layout._32IC):
return (((32 * h) * w), (((((c + 32) // 32) * 32) * h) * w), (32 * w), 1)
if (self.layout == Layout._1IC):
return ((h * w), ((c * h) * w), w, 1)
if (self.layout == Layout.matrix):
w = self.layout.args[0]
shape = (n, (((c + w) - 1) // w), 1, w)
return get_eu_align_stride(shape)
if (self.layout == Layout.matrix2):
shape = (1, n, 1, c)
return get_eu_align_stride(shape)
return self.stride |
def filter_pathlist(path_list, expr):
if (expr == 'all'):
return path_list
elif (expr[:2] == 'I:'):
fl = eval(f'path_list[{expr[2:]}]')
if (type(fl) == str):
return [fl]
return fl
elif (expr[:2] == 'R:'):
regexp = re.compile(expr[2:])
return [e for e in path_list if regexp.match(e.name)]
elif isinstance(expr, list):
not_included = (set(path_list) - set(expr))
assert (not not_included), f'Some experiences could not be found: {not_included}'
return expr
raise NotImplementedError |
class InnerAngleRepresentation():
def __call__(self, p1s: tf.Tensor, p2s: tf.Tensor, p3s: tf.Tensor) -> tf.Tensor:
v1 = (p1s - p2s)
v2 = (p3s - p2s)
v1_norm = get_vectors_norm(v1)
v2_norm = get_vectors_norm(v2)
slopes = tf.reduce_sum((v1_norm * v2_norm), axis=3)
angles = tf.acos(slopes)
angles = tf.where(tf.math.is_nan(angles), 0.0, angles)
return angles |
def save_to_hub(pred_nets, domain_net, theory_hub, theory_type, theory_add_threshold, is_Lagrangian):
if load_previous:
theory_hub = load_model_dict_at_theory_hub(pickle.load(open(filename_hub, 'rb')))
added_theory_info = theory_hub.add_theories(name=(hub_theory_name if (theory_type == 'neural') else (hub_theory_name + '_simplified')), pred_nets=pred_nets, domain_net=domain_net, dataset=dataset, threshold=theory_add_threshold, is_Lagrangian=is_Lagrangian)
if (theory_type == 'neural'):
name = 'theory'
elif (theory_type == 'simplified'):
name = 'simplified_theory'
else:
raise
info_dict[env_name]['added_{0}_info'.format(name)] = added_theory_info
pickle.dump(theory_hub.model_dict, open(filename_hub, 'wb')) |
class BasicDeconv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, activate=None):
super(BasicDeconv, self).__init__()
bias = (False if (activate == 'bn') else True)
self.tconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=0, bias=(not self.use_bn))
if (activate == 'bn'):
self.bn = nn.BatchNorm2d(out_channels)
elif (activate == 'in'):
self.bn = nn.InstanceNorm2d(out_channels)
elif (activate == None):
self.bn = None
def forward(self, x):
x = self.tconv(x)
x = self.bn(x)
return F.relu(x, inplace=True) |
def import_scheme(scheme_name):
full_name = f'{SCHEME_LIB}.{scheme_name}.{SCHEME_CLS}'
(module_name, object_name) = full_name.rsplit('.', 1)
imported_module = importlib.import_module(module_name)
return getattr(imported_module, object_name) |
('euler_maruyama')
class EulerMaruyamaPredictor(Predictor):
def __init__(self, sde, score_fn, probability_flow=False):
super().__init__(sde, score_fn, probability_flow=probability_flow)
def update_fn(self, x, t, *args, **kwargs):
dt = ((- 1.0) / self.rsde.N)
z = torch.randn_like(x)
(f, g) = self.rsde.sde(x, t, *args, **kwargs)
x_mean = (x + (f * dt))
if (g.ndim < x.ndim):
g = g.view(*g.size(), *((1,) * (x.ndim - g.ndim)))
x = (x_mean + ((g * np.sqrt((- dt))) * z))
return (x, x_mean) |
def main(args):
imgaug.seed(42)
torch.random.manual_seed(42)
random.seed(42)
if os.path.isabs(args.cfg):
cfg.merge_from_file(args.cfg)
else:
cfg.merge_from_file(os.path.join(RepoPaths.configs_dir(), args.cfg))
if (args.dataset == 'coco'):
dataset = CocoDataLoader(CocoPaths.images_dir(), CocoPaths.ids_file(), category_agnostic=False)
elif (args.dataset == 'mapillary'):
dataset = MapillaryDataLoader(MapillaryPaths.images_dir(), MapillaryPaths.ids_file())
elif (args.dataset == 'pascalvoc'):
dataset = PascalVOCDataLoader(PascalVOCPaths.images_dir(), PascalVOCPaths.ids_file(), category_agnostic=False)
elif (args.dataset == 'ytvis'):
dataset = YoutubeVISDataLoader(YoutubeVISPaths.training_base_dir(), YoutubeVISPaths.train_vds_file(), cfg.TRAINING.TRACKER.MAX_ITERATIONS, category_agnostic=False, single_instance_duplication=cfg.DATA.YOUTUBE_VIS.SINGLE_INSTANCE_DUPLICATION)
elif (args.dataset == 'davis'):
dataset = DavisDataLoader(DavisUnsupervisedPaths.trainval_base_dir(), DavisUnsupervisedPaths.train_vds_file(), apply_augmentation=False, samples_to_create=cfg.DATA.DAVIS.TRAINING_SUBSEQUENCES, single_instance_duplication=cfg.DATA.DAVIS.SINGLE_INSTANCE_DUPLICATION)
elif (args.dataset == 'kittimots'):
dataset = MOTSDataLoader(KITTIMOTSPaths.train_images_dir(), KITTIMOTSPaths.train_vds_file(), samples_to_create=cfg.TRAINING.TRACKER.MAX_ITERATIONS, apply_augmentation=cfg.DATA.KITTI_MOTS.AUGMENTATION, frame_gap_lower=cfg.DATA.KITTI_MOTS.FRAME_GAP_LOWER, frame_gap_upper=cfg.DATA.KITTI_MOTS.FRAME_GAP_UPPER)
else:
raise ValueError('Invalid dataset name given')
visualize_data_loader_output(dataset, args.num_workers, args.batch_size, args.shuffle) |
def normal_kld(q_mean, q_std, p_mean, p_std):
return (((((p_std.pow(2) + (p_mean - q_mean).pow(2)).div(q_std.pow(2)) * 0.5) - 0.5) + q_std.log()) - p_std.log()).sum(1).mean() |
def write_data(data, folder):
ase.io.write(str((folder / 'data.traj')), data.as_Atoms(), format='traj', parallel=False) |
def _impl(array, highlevel, behavior, attrs):
from awkward._connect.pyarrow import import_pyarrow_compute
pc = import_pyarrow_compute('c')
with HighLevelContext(behavior=behavior, attrs=attrs) as ctx:
layout = ctx.unwrap(array)
out = ak._do.recursively_apply(layout, ak.operations.str._get_ufunc_action(pc.utf8_is_numeric, pc.utf8_is_numeric, bytestring_to_string=True))
return ctx.wrap(out, highlevel=highlevel) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, norm_type='batch', stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = normalization(planes, norm_type)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)
self.bn2 = normalization(planes, norm_type)
self.conv3 = nn.Conv2d(planes, (planes * Bottleneck.expansion), kernel_size=1, bias=False)
self.bn3 = normalization((planes * Bottleneck.expansion), norm_type)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
if (self.stride != 1):
out = F.avg_pool2d(out, kernel_size=self.stride, stride=self.stride)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def checkpoint(acc, epoch):
print('Saving..')
state = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'acc': acc, 'epoch': epoch, 'seed': args.manualSeed}
torch.save(state, (args.save_dir + 'checkpoint.t7')) |
class ImprimitiveLocalComponent(LocalComponentBase):
def __init__(self, newform, prime, twist_factor, min_twist, chi):
LocalComponentBase.__init__(self, newform, prime, twist_factor)
self._min_twist = min_twist
self._chi = chi
def is_primitive(self):
return False
def minimal_twist(self):
return self._min_twist
def twisting_character(self):
return self._chi
def species(self):
return self._min_twist.species()
def _repr_(self):
return (LocalComponentBase._repr_(self) + (', twist of representation of conductor %s^%s' % (self.prime(), self._min_twist.conductor())))
def characters(self):
minchars = self._min_twist.characters()
G = minchars[0].parent()
chi = self._chi
if (self.species() == 'Supercuspidal'):
H = SmoothCharacterGroupQp(self.prime(), chi.base_ring())
Hchi = H.from_dirichlet((~ chi))
Gchi = G.compose_with_norm(Hchi)
else:
Gchi = G.from_dirichlet((~ chi))
return Sequence([(c * Gchi) for c in minchars], cr=True, universe=G)
def check_tempered(self):
self.minimal_twist().check_tempered() |
.spark
.parametrize('dataset, column, number_of_unique', [('full_spark_dataset', 'user_id', 3), ('full_pandas_dataset', 'user_id', 3), ('full_spark_dataset', 'item_id', 4), ('full_pandas_dataset', 'item_id', 4)])
def test_number_of_unique_values(dataset, column, number_of_unique, request):
dataset = request.getfixturevalue(dataset)['interactions']
assert (nunique(dataset, column) == number_of_unique) |
def create_network():
num_conv_channels = 4
kernel = 3
conv_w1 = get_random_weights(kernel, num_conv_channels, num_conv_channels)
conv_w2 = get_random_weights(kernel, num_conv_channels, num_conv_channels)
inputs = Input(shape=(16, 16, num_conv_channels))
x = Conv2D(num_conv_channels, kernel, use_bias=False)(inputs)
outputs = Conv2DTranspose(num_conv_channels, kernel, use_bias=False)(x)
model = Model(inputs=inputs, outputs=outputs)
model.layers[1].set_weights([conv_w1])
model.layers[2].set_weights([conv_w2])
return model |
class Assembly():
def __init__(self, path, assembler):
self.assembler = assembler
if (not os.path.exists(path)):
raise Error(('Input path to Assembly.__init__ not found: ' + path))
elif os.path.isdir(path):
self.assembler_dir = os.path.abspath(path)
else:
self.contigs_fasta = os.path.abspath(path)
self.assembler_dir = None
self._set_filenames()
def _file_exists(filename):
if os.path.exists(filename):
return filename
else:
return None
def _set_filenames(self):
self.contigs_gfa = None
self.contigs_fastg = None
self.contigs_paths = None
self.assembly_graph_fastg = None
if (self.assembler_dir is None):
return
contigs_fasta = os.path.join(self.assembler_dir, 'contigs.fasta')
self.contigs_fasta = self._file_exists(contigs_fasta)
if (self.contigs_fasta is None):
raise Error(('Error finding contigs file: ' + contigs_fasta))
self.contigs_gfa = self._file_exists(os.path.join(self.assembler_dir, 'contigs.gfa'))
self.contigs_fastg = self._file_exists(os.path.join(self.assembler_dir, 'contigs.fastg'))
self.contigs_paths = self._file_exists(os.path.join(self.assembler_dir, 'contigs.paths'))
self.assembly_graph_fastg = self._file_exists(os.path.join(self.assembler_dir, 'assembly_graph.fastg'))
if (self.assembler == 'spades'):
if ((None == self.contigs_fastg == self.contigs_paths == self.assembly_graph_fastg) or ((self.contigs_fastg is None) and (None in {self.contigs_paths, self.assembly_graph_fastg})) or ((self.contigs_fastg is not None) and ((self.contigs_paths is not None) or (self.assembly_graph_fastg is not None)))):
error_message = '\n'.join([('Error finding SPAdes graph files in the directory ' + self.assembler_dir), 'Expected either:', ' contigs.fastg (SPAdes <3.6.1)', 'or:', ' contigs.paths and assembly_graph.fastg (SPAdes >3.6.1)'])
raise Error(error_message)
elif (self.assembler == 'canu'):
if ((self.contigs_fasta is None) or (self.contigs_gfa is None)):
raise Error('Error finding canu contigs fasta and/or gfa file')
else:
raise Error((('Assembler "' + self.assembler) + '" not recognised. Cannot continue'))
def get_contigs(self):
contigs = {}
pyfastaq.tasks.file_to_dict(self.contigs_fasta, contigs)
return contigs
def _circular_contigs_from_spades_before_3_6_1(cls, fastg_file):
seq_reader = pyfastaq.sequences.file_reader(fastg_file)
names = set([x.id.rstrip(';') for x in seq_reader if (':' in x.id)])
found_fwd = set()
found_rev = set()
for name in names:
l = name.split(':')
if (len(l) != 2):
continue
if (l[0] == l[1]):
if (l[0][(- 1)] == "'"):
found_rev.add(l[0][:(- 1)])
else:
found_fwd.add(l[0])
return found_fwd.intersection(found_rev)
def _spades_contigs_paths_to_dict(cls, filename):
d = {}
node = None
with open(filename) as f:
for line in f:
if (node is None):
if (not line.startswith('NODE_')):
raise Error(('Error loading info from SPAdes contigs path file ' + filename))
node = line.rstrip()
else:
if (line.startswith('NODE_') or (node in d)):
raise Error(('Error loading info from SPAdes contigs path file ' + filename))
d[node] = line.rstrip()
node = None
return d
def _circular_edges_to_edge_numbers_dict(cls, circular_edges):
return {x.split('_')[1]: x for x in circular_edges}
def _circular_contigs_from_spades_after_3_6_1(assembly_graph_fastg, contigs_paths):
circular_graph_edges = Assembly._circular_contigs_from_spades_before_3_6_1(assembly_graph_fastg)
circular_edge_dict = Assembly._circular_edges_to_edge_numbers_dict(circular_graph_edges)
paths_dict = Assembly._spades_contigs_paths_to_dict(contigs_paths)
circular_nodes = set()
for node in paths_dict:
if node.endswith("'"):
continue
edges = paths_dict[node].split(',')
rev_node = (node + "'")
if ((len(edges) != 1) or (rev_node not in paths_dict)):
continue
rev_edges = paths_dict[rev_node].split(',')
if (len(rev_edges) != 1):
continue
edge = list(edges)[0][:(- 1)]
edge_strand = list(edges)[0][(- 1)]
rev_edge = list(rev_edges)[0][:(- 1)]
rev_edge_strand = list(rev_edges)[0][(- 1)]
if (({'-', '+'} == {edge_strand, rev_edge_strand}) and (edge == rev_edge) and (edge in circular_edge_dict)):
circular_nodes.add(node)
return circular_nodes
def _circular_contigs_from_canu_gfa(gfa_file):
self_matches = {}
other_matches = set()
with open(gfa_file) as f:
for line in f:
if line.startswith('L\t'):
(L, node1, dir1, node2, dir2, *the_rest) = line.rstrip().split('\t')
if (node1 == node2):
if (dir1 == dir2):
if (node1 not in self_matches):
self_matches[node1] = set()
self_matches[node1].add(dir1)
else:
other_matches.update({node1, node2})
return {x for x in self_matches if ((self_matches[x] == {'+', '-'}) and (x not in other_matches))}
def circular_contigs(self):
if (self.assembler == 'spades'):
if (self.contigs_fastg is not None):
return self._circular_contigs_from_spades_before_3_6_1(self.contigs_fastg)
elif (None not in [self.contigs_paths, self.assembly_graph_fastg]):
return self._circular_contigs_from_spades_after_3_6_1(self.assembly_graph_fastg, self.contigs_paths)
else:
return set()
elif (self.assembler == 'canu'):
return self._circular_contigs_from_canu_gfa(self.contigs_gfa)
else:
return set() |
class UnityCommunicationException(Exception):
def __init__(self, message):
self.message = message |
def evaluate(trainer: Algorithm, env, cfg: EvalConfig, timesteps_total):
if (trainer._timesteps_total is None):
trainer._timesteps_total = timesteps_total
eval_stats = {'timesteps_total': trainer._timesteps_total}
n_params = 0
for param in trainer.get_policy().model.parameters():
n_params += param.numel()
eval_stats['n_params'] = n_params
if CONTROL_DOORS:
if ('holey' in cfg.task.name):
door_stats = test_doors(trainer, env, cfg)
eval_stats |= door_stats
else:
print('Not a holey environment, so not evaluating door placement.')
return
if CONTROLS:
if (len(cfg.controls) == 1):
control_stats = test_control(trainer, env, cfg)
eval_stats.update(control_stats)
else:
print('Not a single control, so not evaluating control.')
if cfg.vary_map_shapes:
evaluate_map_shapes(trainer, env, cfg)
if GENERAL_EVAL:
general_stats = general_eval(trainer, env, cfg)
eval_stats.update(general_stats)
with open(os.path.join(cfg.log_dir, 'eval_stats.json'), 'w') as f:
json.dump(eval_stats, f, indent=4)
if (cfg.static_prob is not None):
evaluate_static(trainer, env, cfg) |
def recursive_partitioning(inp_file, out_dir, modulename, path):
modulenames = []
print('Partitioning input circuit...')
part_dir = os.path.join(out_dir, 'partition')
num_parts = ((number_of_cell(inp_file, path['yosys']) // 1500) + 1)
lsoracle_command = ((((((('read_verilog ' + inp_file) + '; partitioning ') + str(num_parts)) + ' -c ') + path['part_config']) + '; get_all_partitions ') + part_dir)
log_partition = os.path.join(out_dir, 'lsoracle.log')
with open(log_partition, 'w') as file_handler:
subprocess.call([path['lsoracle'], '-c', lsoracle_command], stderr=file_handler, stdout=file_handler)
partitioned = [((modulename + '_') + str(i)) for i in range(num_parts)]
toplevel = os.path.join(part_dir, (modulename + '.v'))
while (len(partitioned) > 0):
mod = partitioned.pop()
mod_path = os.path.join(part_dir, (mod + '.v'))
if (not os.path.exists(mod_path)):
continue
num_cell = number_of_cell(mod_path, path['yosys'])
if (num_cell > 2000):
num_part = ((num_cell // 2000) + 1)
lsoracle_command = ((((((('read_verilog ' + mod_path) + '; partitioning ') + str(num_part)) + ' -c ') + path['part_config']) + '; get_all_partitions ') + part_dir)
with open(log_partition, 'a') as file_handler:
subprocess.call([path['lsoracle'], '-c', lsoracle_command], stderr=file_handler, stdout=file_handler)
partitioned.extend([((mod + '_') + str(i)) for i in range(num_parts)])
with open(toplevel, 'a') as top:
subprocess.call(['cat', mod_path], stdout=top)
os.remove(mod_path)
else:
modulenames.append(mod)
print('Number of partitions', len(modulenames))
return (modulenames, toplevel) |
class MaskFormerFeatureExtractor(MaskFormerImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class MaskFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use MaskFormerImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs) |
class InteractingLayer(nn.Module):
def __init__(self, embedding_size, head_num=2, use_res=True, scaling=False, seed=1024, device='cpu'):
super(InteractingLayer, self).__init__()
if (head_num <= 0):
raise ValueError('head_num must be a int > 0')
if ((embedding_size % head_num) != 0):
raise ValueError('embedding_size is not an integer multiple of head_num!')
self.att_embedding_size = (embedding_size // head_num)
self.head_num = head_num
self.use_res = use_res
self.scaling = scaling
self.seed = seed
self.W_Query = nn.Parameter(torch.Tensor(embedding_size, embedding_size))
self.W_key = nn.Parameter(torch.Tensor(embedding_size, embedding_size))
self.W_Value = nn.Parameter(torch.Tensor(embedding_size, embedding_size))
if self.use_res:
self.W_Res = nn.Parameter(torch.Tensor(embedding_size, embedding_size))
for tensor in self.parameters():
nn.init.normal_(tensor, mean=0.0, std=0.05)
self.to(device)
def forward(self, inputs):
if (len(inputs.shape) != 3):
raise ValueError(('Unexpected inputs dimensions %d, expect to be 3 dimensions' % len(inputs.shape)))
querys = torch.tensordot(inputs, self.W_Query, dims=([(- 1)], [0]))
keys = torch.tensordot(inputs, self.W_key, dims=([(- 1)], [0]))
values = torch.tensordot(inputs, self.W_Value, dims=([(- 1)], [0]))
querys = torch.stack(torch.split(querys, self.att_embedding_size, dim=2))
keys = torch.stack(torch.split(keys, self.att_embedding_size, dim=2))
values = torch.stack(torch.split(values, self.att_embedding_size, dim=2))
inner_product = torch.einsum('bnik,bnjk->bnij', querys, keys)
if self.scaling:
inner_product /= (self.att_embedding_size ** 0.5)
self.normalized_att_scores = F.softmax(inner_product, dim=(- 1))
result = torch.matmul(self.normalized_att_scores, values)
result = torch.cat(torch.split(result, 1), dim=(- 1))
result = torch.squeeze(result, dim=0)
if self.use_res:
result += torch.tensordot(inputs, self.W_Res, dims=([(- 1)], [0]))
result = F.relu(result)
return result |
class BenchmarkDiscreteTabular(BenchmarkDiscreteTabularBase):
def __init__(self, algo_dict: Dict=None, kargs_dict: Dict=None, num_exp: int=20, custom_metric_dict: Optional[Dict]={}, **kargs):
BenchmarkDiscreteTabularBase.__init__(self, algo_dict=algo_dict, num_exp=num_exp, kargs_dict=kargs_dict, custom_metric_dict=custom_metric_dict, **kargs)
def benchmark_variable_complexity(self, num_vars_list: List[int]=[2, 10, 20, 40], graph_density: float=0.1, T: int=1000, fn: Callable=(lambda x: x), coef: float=0.1, noise_fn: Callable=np.random.randn):
all_results = []
self.variant_values = num_vars_list
self.variant_name = 'Number of Variables'
for num_vars in num_vars_list:
noise_fn_list = ([noise_fn] * num_vars)
result_list = base_synthetic_tabular_benchmark(self.algo_dict, self.kargs_dict, noise_fn_list, num_vars=num_vars, graph_density=graph_density, T=T, num_exp=self.num_exp, fn=fn, coef=coef, discrete=True, nstates=5, custom_metric_dict=self.custom_metric_dict)
all_results.append(result_list)
self.results_full = all_results
def benchmark_sample_complexity(self, T_list: List[int]=[100, 500, 1000, 5000], num_vars: int=20, graph_density: float=0.1, fn: Callable=(lambda x: x), coef: float=0.1, noise_fn: Callable=np.random.randn):
all_results = []
self.variant_values = T_list
self.variant_name = 'Number of Samples'
for T in T_list:
noise_fn_list = ([noise_fn] * num_vars)
result_list = base_synthetic_tabular_benchmark(self.algo_dict, self.kargs_dict, noise_fn_list, num_vars=num_vars, graph_density=graph_density, T=T, num_exp=self.num_exp, fn=fn, coef=coef, discrete=True, custom_metric_dict=self.custom_metric_dict)
all_results.append(result_list)
self.results_full = all_results
def benchmark_graph_density(self, graph_density_list: List[float]=[0.05, 0.1, 0.2, 0.5], num_vars: int=20, T: int=1000, fn: Callable=(lambda x: x), coef: float=0.1, noise_fn: Callable=np.random.randn):
all_results = []
self.variant_values = graph_density_list
self.variant_name = 'Graph Density'
for graph_density in graph_density_list:
noise_fn_list = ([noise_fn] * num_vars)
result_list = base_synthetic_tabular_benchmark(self.algo_dict, self.kargs_dict, noise_fn_list, num_vars=num_vars, graph_density=graph_density, T=T, num_exp=self.num_exp, fn=fn, coef=coef, discrete=True, nstates=5, custom_metric_dict=self.custom_metric_dict)
all_results.append(result_list)
self.results_full = all_results |
def register_Ns3CallbackImplBase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected')
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'void'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::NetDevice> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned short'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Address const&'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::NetDevice::PacketType'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ipv4Address const&'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Socket> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'bool'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned int'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Time'])
return |
def make_stub_as(asn: int, exchange: str):
stub_as = base.createAutonomousSystem(asn)
if (bot_desc.get(asn) == 'c2'):
botnet_server = stub_as.createHost('c2_server')
else:
botnet_server = stub_as.createHost('bot')
router = stub_as.createRouter('router0')
net = stub_as.createNetwork('net0')
botnet_server.joinNetwork('net0')
router.joinNetwork('net0')
router.joinNetwork(exchange)
if (bot_desc.get(asn) == 'c2'):
bot.install('c2_server')
sim.addBinding(Binding('c2_server', filter=Filter(asn=asn)))
else:
c: BotnetClientServer = bot_client.install('bot')
c.setServer(c2_server_ip)
sim.addBinding(Binding('bot', filter=Filter(asn=asn))) |
class SequentialNetwork(SequenceNetwork):
def __init_subclass__(cls, **kwargs):
warn(f'{cls.__name__} will be deprecated. Use `SequenceNetwork` instead.', DeprecationWarning, stacklevel=2)
super().__init_subclass__(**kwargs)
def __init__(self, *args, **kwargs):
warn(f'{self.__class__.__name__} will be deprecated. Use `SequenceNetwork` instead.', DeprecationWarning, stacklevel=2)
super().__init__(*args, **kwargs) |
def test_get_item_1d_errors():
with pytest.raises(IndexError, match='index [0-9]+ is out of bounds for dimension 0 with size [0-9]+'):
(x, y) = mamoDataset1.__getitem__(55)
with pytest.raises(IndexError):
(x, y) = mamoDataset1.__getitem__(5.5) |
def to_double(img):
img = np.atleast_3d(img)
channels = img.shape[2]
if (channels < 3):
img = np.tile(img, 3)
img[np.isnan(img)] = 0
img -= np.amin(img)
img /= np.amax(img)
return img |
def test_out_of_bounds():
left = ak.Array([1, 2, 3])
right = ak.Array([['lambda', 'sigma', 'eta', 'phi'], ['delta']])
with pytest.raises(np.AxisError):
ak.cartesian([left, right], axis=2) |
def test_case111():
url = (brokerIp + '/ngsi-ld/v1/entityOperations/upsert')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata111), headers=headers)
print(r.content)
print(r.status_code)
assert (r.status_code == 204) |
def adjust_learning_rate(optimizers, init_lr, epoch):
lr = (init_lr * (0.5 ** (epoch // 30)))
for optimizer in optimizers:
for param_group in optimizer.param_groups:
param_group['lr'] = lr |
.spark
.parametrize('row_count', [None, 1000, 1500])
.parametrize('column_count', [None, 2000, 1700])
.usefixtures('interactions_spark', 'true_size')
def test_CSRConverter_user_column_counts(row_count, column_count, interactions_spark, true_size):
current_size = ((row_count if (row_count is not None) else true_size[0]), (column_count if (column_count is not None) else true_size[1]))
csr = CSRConverter(first_dim_column='user_id', second_dim_column='item_id', row_count=row_count, column_count=column_count).transform(interactions_spark)
assert (csr.shape == current_size) |
class FakeRolloutWorker(RolloutWorker):
def init_agent_interfaces(self, env_desc: Dict[(str, Any)], runtime_ids: Sequence[AgentID]) -> Dict[(AgentID, Any)]:
return {}
def init_actor_pool(self, env_desc: Dict[(str, Any)], rollout_config: Dict[(str, Any)], agent_mapping_func: Callable) -> ActorPool:
return NotImplementedError
def init_servers(self):
pass
def rollout(self, runtime_strategy_specs: Dict[(str, StrategySpec)], stopping_conditions: Dict[(str, Any)], data_entrypoints: Dict[(str, str)], trainable_agents: List[AgentID]=None):
self.set_running(True)
return {}
def simulate(self, runtime_strategy_specs: Dict[(str, StrategySpec)]):
time.sleep(0.5)
return {}
def step_rollout(self, eval_step: bool, rollout_config: Dict[(str, Any)], dataset_writer_info_dict: Dict[(str, Any)]) -> List[Dict[(str, Any)]]:
pass
def step_simulation(self, runtime_strategy_specs_list: Dict[(str, StrategySpec)], rollout_config: Dict[(str, Any)]) -> Dict[(str, Any)]:
pass |
def register_Ns3Names_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::Names const &', 'arg0')])
cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')], is_static=True)
cls.add_method('Add', 'void', [param('std::string', 'path'), param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')], is_static=True)
cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Object >', 'context'), param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')], is_static=True)
cls.add_method('Clear', 'void', [], is_static=True)
cls.add_method('FindName', 'std::string', [param('ns3::Ptr< ns3::Object >', 'object')], is_static=True)
cls.add_method('FindPath', 'std::string', [param('ns3::Ptr< ns3::Object >', 'object')], is_static=True)
cls.add_method('Rename', 'void', [param('std::string', 'oldpath'), param('std::string', 'newname')], is_static=True)
cls.add_method('Rename', 'void', [param('std::string', 'path'), param('std::string', 'oldname'), param('std::string', 'newname')], is_static=True)
cls.add_method('Rename', 'void', [param('ns3::Ptr< ns3::Object >', 'context'), param('std::string', 'oldname'), param('std::string', 'newname')], is_static=True)
return |
class deeplab_xception_transfer_projection(deeplab_xception_transfer_basemodel):
def __init__(self, nInputChannels=3, n_classes=7, os=16, input_channels=256, hidden_layers=128, out_channels=256, transfer_graph=None, source_classes=20):
super(deeplab_xception_transfer_projection, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, os=os, input_channels=input_channels, hidden_layers=hidden_layers, out_channels=out_channels)
self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=source_classes)
self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.transpose_graph = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=source_classes, end_nodes=n_classes)
self.fc_graph = gcn.GraphConvolution((hidden_layers * 3), hidden_layers)
def forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
source_graph = self.source_featuremap_2_graph(x)
source_graph1 = self.source_graph_conv1.forward(source_graph, adj=adj2_source, relu=True)
source_graph2 = self.source_graph_conv2.forward(source_graph1, adj=adj2_source, relu=True)
source_graph3 = self.source_graph_conv2.forward(source_graph2, adj=adj2_source, relu=True)
source_2_target_graph1_v5 = self.transpose_graph.forward(source_graph1, adj=adj3_transfer, relu=True)
source_2_target_graph2_v5 = self.transpose_graph.forward(source_graph2, adj=adj3_transfer, relu=True)
source_2_target_graph3_v5 = self.transpose_graph.forward(source_graph3, adj=adj3_transfer, relu=True)
graph = self.target_featuremap_2_graph(x)
source_2_target_graph1 = self.similarity_trans(source_graph1, graph)
graph = torch.cat((graph, source_2_target_graph1.squeeze(0), source_2_target_graph1_v5.squeeze(0)), dim=(- 1))
graph = self.fc_graph.forward(graph, relu=True)
graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True)
source_2_target_graph2 = self.similarity_trans(source_graph2, graph)
graph = torch.cat((graph, source_2_target_graph2, source_2_target_graph2_v5), dim=(- 1))
graph = self.fc_graph.forward(graph, relu=True)
graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True)
source_2_target_graph3 = self.similarity_trans(source_graph3, graph)
graph = torch.cat((graph, source_2_target_graph3, source_2_target_graph3_v5), dim=(- 1))
graph = self.fc_graph.forward(graph, relu=True)
graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_2_fea.forward(graph, x)
x = self.target_skip_conv(x)
x = (x + graph)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def similarity_trans(self, source, target):
sim = torch.matmul(F.normalize(target, p=2, dim=(- 1)), F.normalize(source, p=2, dim=(- 1)).transpose((- 1), (- 2)))
sim = F.softmax(sim, dim=(- 1))
return torch.matmul(sim, source)
def load_source_model(self, state_dict):
own_state = self.state_dict()
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
name = name.replace('module.', '')
if (('graph' in name) and ('source' not in name) and ('target' not in name) and ('fc_' not in name) and ('transpose_graph' not in name)):
if ('featuremap_2_graph' in name):
name = name.replace('featuremap_2_graph', 'source_featuremap_2_graph')
else:
name = name.replace('graph', 'source_graph')
new_state_dict[name] = 0
if (name not in own_state):
if ('num_batch' in name):
continue
print('unexpected key "{}" in state_dict'.format(name))
continue
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}, ...'.format(name, own_state[name].size(), param.size()))
continue
own_state[name].copy_(param)
missing = (set(own_state.keys()) - set(new_state_dict.keys()))
if (len(missing) > 0):
print('missing keys in state_dict: "{}"'.format(missing)) |
def main(args):
options = parser.parse_args(args)
build = confu.Build.from_options(options)
build.export_cpath('include', ['clog.h'])
with build.options(source_dir='src', extra_include_dirs='src'):
build.static_library('clog', build.cc('clog.c'))
with build.options(source_dir='test', deps={(build, build.deps.googletest): all, 'log': build.target.is_android}):
build.unittest('clog-test', build.cxx('clog.cc'))
return build |
def test_two_arrays():
str = '{"one": 1, "two": 2.2}{"one": 10, "two": 22}'
with pytest.raises(ValueError):
ak.operations.from_json(str)
str = '{"one": 1, "two": 2.2} {"one": 10, "two": 22}'
with pytest.raises(ValueError):
ak.operations.from_json(str)
str = '{"one": 1, \t "two": 2.2}{"one": 10, "two": 22}'
with pytest.raises(ValueError):
ak.operations.from_json(str)
str = '{"one": 1, "two": 2.2} \t {"one": 10, "two": 22}'
with pytest.raises(ValueError):
ak.operations.from_json(str)
str = '{"one": 1, "two": 2.2}\n{"one": 10, "two": 22}'
array = ak.operations.from_json(str, line_delimited=True)
assert (array.to_list() == [{'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}])
str = '{"one": 1, "two": 2.2}\n\r{"one": 10, "two": 22}'
array = ak.operations.from_json(str, line_delimited=True)
assert (array.to_list() == [{'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}])
str = '{"one": 1, "two": 2.2} \n {"one": 10, "two": 22}'
array = ak.operations.from_json(str, line_delimited=True)
assert (array.to_list() == [{'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}])
str = '{"one": 1, "two": 2.2} \n\r {"one": 10, "two": 22}'
array = ak.operations.from_json(str, line_delimited=True)
assert (array.to_list() == [{'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}])
str = '{"one": 1, "two": 2.2}\n{"one": 10, "two": 22}\n'
array = ak.operations.from_json(str, line_delimited=True)
assert (array.to_list() == [{'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}])
str = '{"one": 1, "two": 2.2}\n\r{"one": 10, "two": 22}\n\r'
array = ak.operations.from_json(str, line_delimited=True)
assert (array.to_list() == [{'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}])
str = '["one", "two"]\n["uno", "dos"]'
array = ak.operations.from_json(str, line_delimited=True)
assert (array.to_list() == [['one', 'two'], ['uno', 'dos']])
str = '["one", "two"]\n\r["uno", "dos"]'
array = ak.operations.from_json(str, line_delimited=True)
assert (array.to_list() == [['one', 'two'], ['uno', 'dos']])
str = '["one", "two"] \n ["uno", "dos"]'
array = ak.operations.from_json(str, line_delimited=True)
assert (array.to_list() == [['one', 'two'], ['uno', 'dos']])
str = '["one", "two"] \n\r ["uno", "dos"]'
array = ak.operations.from_json(str, line_delimited=True)
assert (array.to_list() == [['one', 'two'], ['uno', 'dos']])
str = '"one"\n"two"'
array = ak.operations.from_json(str, line_delimited=True)
assert (array.to_list() == ['one', 'two'])
str = '"one"\n\r"two"'
array = ak.operations.from_json(str, line_delimited=True)
assert (array.to_list() == ['one', 'two'])
str = '"one" \n "two"'
array = ak.operations.from_json(str, line_delimited=True)
assert (array.to_list() == ['one', 'two'])
array = ak.operations.from_json((samples_path / 'test-two-arrays.json'), line_delimited=True)
assert (array.to_list() == [{'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}, {'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}, {'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}, {'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}, {'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}, {'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}, {'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}, {'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}, {'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}, {'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}, {'one': 1, 'two': 2.2}, {'one': 10, 'two': 22.0}, ['one', 'two'], ['uno', 'dos'], ['one', 'two'], ['uno', 'dos'], ['one', 'two'], ['uno', 'dos'], ['one', 'two'], ['uno', 'dos'], ['one', 'two'], ['uno', 'dos'], ['one', 'two'], ['uno', 'dos'], 'one', 'two', 'one', 'two', 'one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']) |
class TestFlowInclude(FLSpec):
include_error_list = []
def start(self):
print((f'{bcolors.OKBLUE}Testing FederatedFlow - Starting Test for Include Attributes ' + f'{bcolors.ENDC}'))
self.collaborators = self.runtime.collaborators
self.exclude_agg_to_agg = 10
self.include_agg_to_agg = 100
self.next(self.test_include_agg_to_agg, include=['include_agg_to_agg', 'collaborators'])
def test_include_agg_to_agg(self):
if ((hasattr(self, 'include_agg_to_agg') is True) and (hasattr(self, 'exclude_agg_to_agg') is False)):
print((f'{bcolors.OKGREEN} ... Include test passed in test_include_agg_to_agg ' + f'{bcolors.ENDC}'))
else:
TestFlowInclude.include_error_list.append('test_include_agg_to_agg')
print(f'{bcolors.FAIL} ... Include test failed in test_include_agg_to_agg {bcolors.ENDC}')
self.include_agg_to_collab = 100
self.exclude_agg_to_collab = 78
self.next(self.test_include_agg_to_collab, foreach='collaborators', include=['include_agg_to_collab', 'collaborators'])
def test_include_agg_to_collab(self):
if ((hasattr(self, 'include_agg_to_agg') is False) and (hasattr(self, 'exclude_agg_to_agg') is False) and (hasattr(self, 'exclude_agg_to_collab') is False) and (hasattr(self, 'include_agg_to_collab') is True)):
print((f'{bcolors.OKGREEN} ... Include test passed in test_include_agg_to_collab ' + f'{bcolors.ENDC}'))
else:
TestFlowInclude.include_error_list.append('test_include_agg_to_collab')
print((f'{bcolors.FAIL} ... Include test failed in test_include_agg_to_collab ' + f'{bcolors.ENDC}'))
self.exclude_collab_to_collab = 10
self.include_collab_to_collab = 44
self.next(self.test_include_collab_to_collab, include=['include_collab_to_collab'])
def test_include_collab_to_collab(self):
if ((hasattr(self, 'include_agg_to_agg') is False) and (hasattr(self, 'include_agg_to_collab') is False) and (hasattr(self, 'include_collab_to_collab') is True) and (hasattr(self, 'exclude_agg_to_agg') is False) and (hasattr(self, 'exclude_agg_to_collab') is False) and (hasattr(self, 'exclude_collab_to_collab') is False)):
print((f'{bcolors.OKGREEN} ... Include test passed in test_include_collab_to_collab ' + f'{bcolors.ENDC}'))
else:
TestFlowInclude.include_error_list.append('test_include_collab_to_collab')
print((f'{bcolors.FAIL} ... Include test failed in test_include_collab_to_collab ' + f'{bcolors.ENDC}'))
self.exclude_collab_to_agg = 20
self.include_collab_to_agg = 56
self.next(self.join, include=['include_collab_to_agg'])
def join(self, inputs):
validate = ((hasattr(self, 'include_agg_to_agg') is True) and (hasattr(self, 'include_agg_to_collab') is True) and (hasattr(self, 'exclude_agg_to_collab') is True) and (hasattr(self, 'exclude_agg_to_agg') is False))
for input in inputs:
validation = (validate and ((hasattr(input, 'include_collab_to_collab') is False) and (hasattr(input, 'exclude_collab_to_collab') is False) and (hasattr(input, 'exclude_collab_to_agg') is False) and (hasattr(input, 'include_collab_to_agg') is True)))
if validation:
print(f'{bcolors.OKGREEN} ... Include test passed in join {bcolors.ENDC}')
else:
TestFlowInclude.include_error_list.append('join')
print(f'{bcolors.FAIL} ... Include test failed in join {bcolors.ENDC}')
print(f'''
{bcolors.UNDERLINE}Include attribute test summary: {bcolors.ENDC}
''')
if TestFlowInclude.include_error_list:
validated_include_variables = ','.join(TestFlowInclude.include_error_list)
print((f'{bcolors.FAIL} ...Test case failed for {validated_include_variables} ' + f'{bcolors.ENDC}'))
self.next(self.end)
def end(self):
print((f'{bcolors.OKBLUE}Testing FederatedFlow - Ending Test for Include Attributes ' + f'{bcolors.ENDC}'))
if TestFlowInclude.include_error_list:
raise AssertionError(f'''{bcolors.FAIL}
...Test case failed ... {bcolors.ENDC}''') |
_utils.test()
def test_matrix_field_dynamic_index_different_path_length():
v = ti.Vector.field(2, ti.i32)
x = v.get_scalar_field(0)
y = v.get_scalar_field(1)
ti.root.dense(ti.i, 8).place(x)
ti.root.dense(ti.i, 2).dense(ti.i, 4).place(y)
impl.get_runtime().materialize()
assert (v._get_dynamic_index_stride() is None) |
def run_defense_method(graph, method, k=3, seed=None):
protected = []
if ((method in methods) and (k > 0)):
if (seed is not None):
np.random.seed(seed)
protected = methods[method](graph, k)
else:
print('{} not implemented or k <= 0'.format(method))
return protected |
_metric
def overlap50k_alignment50k_layoutwise_iou50k_layoutwise_docsim50k_val(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
(overlap, alignment, layoutwiseIoU, layoutwiseDocSim) = overlap50k_alignment50k_layoutwise_iou50k_layoutwise_docsim50k.compute_overlap_alignment_laywise_IoU_layerwise_DocSim(opts, max_real=None, num_gen=50000)
return dict(overlap_50k_val=overlap, alignment_50k_val=alignment, layoutwise_iou50k_val=layoutwiseIoU, layoutwise_docsim50k_val=layoutwiseDocSim) |
def get_agent_cls(agent_class_name):
sub_classes = [sub_class for sub_class in get_all_subclasses(habitat.Agent) if (sub_class.__name__ == agent_class_name)]
return sub_classes[0] |
def plot_UCI():
fname = 'datasets/UCI_processed/OCnodeslinks_chars.txt'
max_nodes = 1901
G_times = UCI_loader.load_temporarl_edgelist(fname, max_nodes=max_nodes)
graph_name = 'UCI_Message'
labels_dict = {}
print('edge')
labels_dict['edge'] = normal_util.plot_edges(G_times, graph_name)
print('acc')
labels_dict['acc'] = normal_util.plot_avg_clustering(G_times, graph_name)
print('component')
labels_dict['component'] = normal_util.plot_num_components_directed(G_times, graph_name)
print('weights')
labels_dict['weights'] = normal_util.plot_weighted_edges(G_times, graph_name)
print('degree')
labels_dict['degree'] = normal_util.plot_degree_changes(G_times, graph_name)
return labels_dict |
def _central_crop(image_list, crop_height, crop_width):
outputs = []
for image in image_list:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = ((image_height - crop_height) / 2)
offset_width = ((image_width - crop_width) / 2)
outputs.append(_crop(image, offset_height, offset_width, crop_height, crop_width))
return outputs |
('simple')
class SimpleWordSplitter(WordSplitter):
def __init__(self):
self.special_cases = set(['mr.', 'mrs.', 'etc.', 'e.g.', 'cf.', 'c.f.', 'eg.', 'al.'])
self.contractions = set(["n't", "'s", "'ve", "'re", "'ll", "'d", "'m"])
self.contractions |= set([x.replace("'", '') for x in self.contractions])
self.ending_punctuation = set(['"', "'", '.', ',', ';', ')', ']', '}', ':', '!', '?', '%', '', ''])
self.beginning_punctuation = set(['"', "'", '(', '[', '{', '#', '$', '', ''])
def split_words(self, sentence: str) -> List[Token]:
fields = sentence.split()
tokens: List[Token] = []
for field in fields:
add_at_end: List[Token] = []
while (self._can_split(field) and (field[0] in self.beginning_punctuation)):
tokens.append(Token(field[0]))
field = field[1:]
while (self._can_split(field) and (field[(- 1)] in self.ending_punctuation)):
add_at_end.insert(0, Token(field[(- 1)]))
field = field[:(- 1)]
remove_contractions = True
while remove_contractions:
remove_contractions = False
for contraction in self.contractions:
if (self._can_split(field) and field.lower().endswith(contraction)):
add_at_end.insert(0, Token(field[(- len(contraction)):]))
field = field[:(- len(contraction))]
remove_contractions = True
if field:
tokens.append(Token(field))
tokens.extend(add_at_end)
return tokens
def _can_split(self, token: str):
return (token and (token.lower() not in self.special_cases)) |
class func_persist():
def __init__(self, f, dir='func_persist'):
self.__func = f
self.__dir = dir
os.makedirs(dir, exist_ok=True)
self.__doc__ = ('%s%s%s' % (f.__name__, inspect.signature(f), f.__doc__))
def __call__(self, *args, **kwds):
key = (tuple(args), tuple(kwds.items()))
h = hash(key)
name = ('%s/%s_%s.sobj' % (self.__dir, self.__func.__name__, h))
if os.path.exists(name):
(key2, val) = persist.load(name)
if (key == key2):
return val
val = self.__func(*args, **kwds)
persist.save((key, val), name)
return val |
def notna(target_column, features, df):
out = pd.Series(features, index=features).apply((lambda feature: df.select([feature, target_column]).na.drop('any').count())).astype(float)
return out |
def ffmpeg_merge_video_audio(video, audio, output, vcodec='copy', acodec='copy', ffmpeg_output=False, logger='bar'):
cmd = [get_setting('FFMPEG_BINARY'), '-y', '-i', audio, '-i', video, '-vcodec', vcodec, '-acodec', acodec, output]
subprocess_call(cmd, logger=logger) |
class LSTMCell(BaseCell):
def __call__(self, inputs, state, scope=None):
with tf.variable_scope((scope or type(self).__name__)):
(cell_tm1, hidden_tm1) = tf.split(state, 2, axis=1)
input_list = [inputs, hidden_tm1]
lin = linear(input_list, self.output_size, add_bias=True, n_splits=4, moving_params=self.moving_params)
(cell_act, input_act, forget_act, output_act) = lin
cell_tilde_t = tanh(cell_act)
input_gate = gate(input_act)
forget_gate = gate((forget_act - self.forget_bias))
output_gate = gate(output_act)
cell_t = ((input_gate * cell_tilde_t) + ((1 - forget_gate) * cell_tm1))
hidden_tilde_t = self.recur_func(cell_t)
hidden_t = (hidden_tilde_t * output_gate)
return (hidden_t, tf.concat([cell_t, hidden_t], 1))
def state_size(self):
return (self.output_size * 2) |
class XmlJoint(XmlElem):
tag = 'joint'
JOINT_TYPES = {'revolute': Box2D.b2RevoluteJoint, 'friction': Box2D.b2FrictionJoint, 'prismatic': Box2D.b2PrismaticJoint}
class Meta():
bodyA = XmlAttr('bodyA', String(), required=True)
bodyB = XmlAttr('bodyB', String(), required=True)
anchor = XmlAttr('anchor', Tuple(Float(), Float()))
localAnchorA = XmlAttr('localAnchorA', Tuple(Float(), Float()))
localAnchorB = XmlAttr('localAnchorB', Tuple(Float(), Float()))
axis = XmlAttr('axis', Tuple(Float(), Float()))
limit = XmlAttr('limit', Tuple(Angle(), Angle()))
ctrllimit = XmlAttr('ctrllimit', Tuple(Angle(), Angle()))
typ = XmlAttr('type', Choice('revolute', 'friction', 'prismatic'), required=True)
name = XmlAttr('name', String())
motor = XmlAttr('motor', Bool())
def __init__(self):
self.bodyA = None
self.bodyB = None
self.anchor = None
self.localAnchorA = None
self.localAnchorB = None
self.limit = None
self.ctrllimit = None
self.motor = False
self.typ = None
self.name = None
self.axis = None
def to_box2d(self, world, xml_world, extra_data):
bodyA = find_body(world, self.bodyA)
bodyB = find_body(world, self.bodyB)
args = dict()
if (self.typ == 'revolute'):
if self.localAnchorA:
args['localAnchorA'] = self.localAnchorA
if self.localAnchorB:
args['localAnchorB'] = self.localAnchorB
if self.anchor:
args['anchor'] = self.anchor
if self.limit:
args['enableLimit'] = True
args['lowerAngle'] = self.limit[0]
args['upperAngle'] = self.limit[1]
elif (self.typ == 'friction'):
if self.anchor:
args['anchor'] = self.anchor
elif (self.typ == 'prismatic'):
if self.axis:
args['axis'] = self.axis
else:
raise NotImplementedError
userData = dict(ctrllimit=self.ctrllimit, motor=self.motor, name=self.name)
joint = world.CreateJoint(type=self.JOINT_TYPES[self.typ], bodyA=bodyA, bodyB=bodyB, **args)
joint.userData = userData
return joint |
class TensorboardLogger():
def __init__(self, run_dir, py_logger: logging.Logger, *, enabled_tb):
self.run_dir = run_dir
self.py_log = py_logger
if enabled_tb:
self.tb_log = SummaryWriter(run_dir)
else:
self.tb_log = None
try:
import git
repo = git.Repo('.')
git_info = ((str(repo.active_branch) + ' ') + str(repo.head.commit.hexsha))
except (ImportError, RuntimeError):
print('Failed to fetch git info. Defaulting to None')
git_info = 'None'
self.log_string('git', git_info)
self.time_estimator: TimeEstimator = None
def log_scalar(self, tag, x, it):
if (self.tb_log is None):
return
self.tb_log.add_scalar(tag, x, it)
def log_metrics(self, exp_id, prefix, metrics: Dict, it):
msg = f'{exp_id}-{prefix} - it {it:6d}: '
metrics_msg = ''
for (k, v) in sorted(metrics.items()):
self.log_scalar(f'{prefix}/{k}', v, it)
metrics_msg += f'{k: >10}:{v:.7f}, '
if (self.time_estimator is not None):
self.time_estimator.update()
avg_time = self.time_estimator.get_and_reset_avg_time()
est = self.time_estimator.get_est_remaining(it)
est = datetime.timedelta(seconds=est)
if (est.days > 0):
remaining_str = f'{est.days}d {(est.seconds // 3600)}h'
else:
remaining_str = f'{(est.seconds // 3600)}h {((est.seconds % 3600) // 60)}m'
eta = (datetime.datetime.now() + est)
eta_str = eta.strftime('%Y-%m-%d %H:%M:%S')
time_msg = f'avg_time:{avg_time:.3f},remaining:{remaining_str},eta:{eta_str}, '
msg = f'{msg} {time_msg}'
msg = f'{msg} {metrics_msg}'
self.py_log.info(msg)
def log_image(self, stage_name, tag, image, it):
image_dir = os.path.join(self.run_dir, f'{stage_name}_images')
os.makedirs(image_dir, exist_ok=True)
image = Image.fromarray(image)
image.save(os.path.join(image_dir, f'{tag}_{it}.png'))
def log_string(self, tag, x):
self.py_log.info(f'{tag} - {x}')
if (self.tb_log is None):
return
self.tb_log.add_text(tag, x)
def debug(self, x):
self.py_log.debug(x)
def info(self, x):
self.py_log.info(x)
def warning(self, x):
self.py_log.warning(x)
def error(self, x):
self.py_log.error(x)
def critical(self, x):
self.py_log.critical(x) |
class DDPGAgent(object):
def __init__(self, state_dim, action_dim, max_action, device, discount=0.99, tau=0.005):
self.device = device
self.discount = discount
self.tau = tau
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters())
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters())
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, (- 1))).to(self.device)
return self.actor(state).cpu().data.numpy().flatten()
def soft_update(local_model, target_model, tau):
for (target_param, local_param) in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(((tau * local_param.data) + ((1.0 - tau) * target_param.data)))
def save_checkpoint(self, filename):
torch.save(self.critic.state_dict(), (filename + '_critic'))
torch.save(self.critic_optimizer.state_dict(), (filename + '_critic_optimizer'))
torch.save(self.actor.state_dict(), (filename + '_actor'))
torch.save(self.actor_optimizer.state_dict(), (filename + '_actor_optimizer'))
def load_checkpoint(self, filename):
self.critic.load_state_dict(torch.load((filename + '_critic'), map_location=torch.device('cpu')))
self.critic_optimizer.load_state_dict(torch.load((filename + '_critic_optimizer'), map_location=torch.device('cpu')))
self.critic_target = deepcopy(self.critic)
self.actor.load_state_dict(torch.load((filename + '_actor'), map_location=torch.device('cpu')))
self.actor_optimizer.load_state_dict(torch.load((filename + '_actor_optimizer'), map_location=torch.device('cpu')))
self.actor_target = deepcopy(self.actor)
def train(self, replay_buffer, batch_size=100):
(state, action, next_state, reward, not_done) = replay_buffer.sample(batch_size)
target_q = self.critic_target(next_state, self.actor_target(next_state))
target_q = (reward + ((not_done * self.discount) * target_q).detach())
current_q = self.critic(state, action)
critic_loss = F.mse_loss(current_q, target_q)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
actor_loss = (- self.critic(state, self.actor(state)).mean())
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
DDPGAgent.soft_update(self.critic, self.critic_target, self.tau)
DDPGAgent.soft_update(self.actor, self.actor_target, self.tau) |
class ScispaCy(BaseLinker):
def __init__(self, args):
import scispacy, spacy
from scispacy.abbreviation import AbbreviationDetector
from scispacy.umls_linking import UmlsEntityLinker
self.nlp = spacy.load('en_core_sci_sm')
self.nlp.add_pipe('abbreviation_detector')
self.nlp.add_pipe('scispacy_linker', config={'resolve_abbreviations': True, 'linker_name': 'umls'})
def __call__(self, text):
sci_res = self.nlp(text)
men_list = ddict(list)
for ent in sci_res.ents:
(start, end) = (ent.start_char, ent.end_char)
for (cand, score) in ent._.umls_ents:
men_list[(start, end)].append([cand, round(score, 3)])
return self.reformat(men_list, text) |
def tf_toposort(ts, within_ops=None):
all_ops = ge.get_forward_walk_ops([x.op for x in ts], within_ops=within_ops)
deps = {}
for op in all_ops:
for o in op.outputs:
deps[o] = set(op.inputs)
sorted_ts = toposort(deps)
ts_sorted_lists = []
for l in sorted_ts:
keep = list(set(l).intersection(ts))
if keep:
ts_sorted_lists.append(keep)
return ts_sorted_lists |
def tcd(xs, base=2):
xis = [entropyd(column(xs, i), base) for i in range(0, len(xs[0]))]
hx = entropyd(xs, base)
return (np.sum(xis) - hx) |
def evaluate_model(epoch):
combined_model.eval()
val_loss = 0.0
total = 0.0
correct = 0.0
with torch.no_grad():
for (batch_idx, (img, text_match, text_diff, seq_len_match, seq_len_diff, bboxes, bbox_classes)) in enumerate(tqdm(val_loader, desc='')):
(text_match, text_diff) = process_text_embedding(text_match, text_diff)
batch = len(img)
(z_img, z_t_match, z_t_diff) = combined_model(img, text_match, text_diff, batch, seq_len_match, seq_len_diff, bboxes, bbox_classes)
loss = margin_loss_text_combined(z_img, z_t_match, z_t_diff)
val_loss += float(loss.item())
correct += get_match_vs_no_match_acc(z_img, z_t_match, z_t_diff)
total += batch
torch.cuda.empty_cache()
del img, text_match, text_diff, seq_len_match, seq_len_diff, bboxes, bbox_classes
logger.log(mode='val', scalar_value=(val_loss / len(val_loader)), epoch=epoch, scalar_name='loss')
logger.log(mode='val', scalar_value=(correct / total), epoch=epoch, scalar_name='accuracy')
print(' Val Epoch: {} Avg loss: {:.4f} Acc: {:.2f}'.format(epoch, (val_loss / len(val_loader)), (correct / total)))
return val_loss |
.usefixtures('spark')
def interactions_users_spark_dataset(spark):
events = spark.createDataFrame(pd.DataFrame({'user_id': [0, 0, 1, 1, 1, 2], 'item_id': [0, 1, 0, 2, 3, 1], 'timestamp': [0, 1, 2, 3, 4, 5], 'rating': [1.1, 1.2, 1.3, 2, 3, 4]}))
users = spark.createDataFrame(pd.DataFrame({'user_id': [0, 1, 2], 'gender': [0, 1, 0]}))
return {'interactions': events, 'users': users, 'user_col': 'user_id', 'item_col': 'item_id', 'timestamp_col': 'timestamp', 'ratings_col': 'rating', 'users_cardinality': 3, 'items_cardinality': 4} |
def calc_one_mrr(data):
score = 0
data = sorted(data, key=(lambda d: d[1]), reverse=True)
for (idx, item) in enumerate(data):
if (int(item[0][2]) == 1):
score = (1.0 / (idx + 1))
break
return score |
def run(size):
FLAGS.min_dec_steps = (size // 4)
FLAGS.max_dec_steps = size
FLAGS.max_enc_steps = size
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info('Starting seq2seq_attention in %s mode...', FLAGS.mode)
FLAGS.log_root = log_path
FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)
if (not os.path.exists(FLAGS.log_root)):
if (FLAGS.mode == 'train'):
os.makedirs(FLAGS.log_root)
else:
raise Exception(("Logdir %s doesn't exist. Run in train mode to create it." % FLAGS.log_root))
print('vocab path is ', FLAGS.vocab_path)
vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size)
if (FLAGS.mode == 'decode'):
FLAGS.batch_size = FLAGS.beam_size
if (FLAGS.single_pass and (FLAGS.mode != 'decode')):
raise Exception('The single_pass flag should only be True in decode mode')
hparam_list = ['mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag', 'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim', 'batch_size', 'max_dec_steps', 'max_enc_steps', 'coverage', 'cov_loss_wt', 'pointer_gen']
hps_dict = {}
for val in FLAGS:
if (val in hparam_list):
hps_dict[val] = FLAGS[val].value
hps = namedtuple('HParams', hps_dict.keys())(**hps_dict)
batcher = Batcher(FLAGS.data_path, vocab, hps, single_pass=FLAGS.single_pass)
tf.set_random_seed(111)
if (hps.mode == 'train'):
print('creating model...')
model = SummarizationModel(hps, vocab)
setup_training(model, batcher)
elif (hps.mode == 'eval'):
model = SummarizationModel(hps, vocab)
run_eval(model, batcher, vocab)
elif (hps.mode == 'decode'):
decode_model_hps = hps
decode_model_hps = hps._replace(max_dec_steps=1)
model = SummarizationModel(decode_model_hps, vocab)
decoder = BeamSearchDecoder(model, batcher, vocab)
decoder.decode()
else:
raise ValueError("The 'mode' flag must be one of train/eval/decode") |
class SelfAttentionBlock(_SelfAttentionBlock):
def __init__(self, low_in_channels, high_in_channels, channels, out_channels, share_key_query, query_scale, key_pool_scales, conv_cfg, norm_cfg, act_cfg):
key_psp = PPMConcat(key_pool_scales)
if (query_scale > 1):
query_downsample = nn.MaxPool2d(kernel_size=query_scale)
else:
query_downsample = None
super(SelfAttentionBlock, self).__init__(key_in_channels=low_in_channels, query_in_channels=high_in_channels, channels=channels, out_channels=out_channels, share_key_query=share_key_query, query_downsample=query_downsample, key_downsample=key_psp, key_query_num_convs=1, key_query_norm=True, value_out_num_convs=1, value_out_norm=False, matmul_norm=True, with_out=True, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) |
def truncate_seq_pair(tokens_a, tokens_b, max_length):
is_too_long = False
while True:
total_length = (len(tokens_a) + len(tokens_b))
if (total_length <= max_length):
break
is_too_long = True
if (len(tokens_a) > len(tokens_b)):
tokens_a.pop()
else:
tokens_b.pop()
return is_too_long |
class Artanh(torch.autograd.Function):
def forward(ctx, x):
x = x.clamp(((- 1) + 1e-05), (1 - 1e-05))
ctx.save_for_backward(x)
res = torch.log_((1 + x)).sub_(torch.log_((1 - x))).mul_(0.5)
return res
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
return (grad_output / (1 - (input ** 2))) |
class SAB(nn.Module):
def __init__(self, dim_in, dim_out, num_heads=4, ln=False, attention_dropout=0.1, dim_feedforward=512, attn_mode='Normal'):
super(SAB, self).__init__()
self.mab = MultiHeadSelfAttentionBlock(dim_in, dim_out, num_heads, ln=ln, attention_dropout=attention_dropout, dim_feedforward=dim_feedforward, attn_mode=attn_mode)
def forward(self, X):
return self.mab(X, X) |
class JointDataLoader(object):
def __init__(self, cfg):
self.cfg = cfg
self.dataloader_A = None
self.dataloader_B = None
self.stop_A = False
self.stop_B = False
self.max_dataset_size = None
self.is_train = None
def build(self, dataloader_A, dataloader_B, is_train, max_dataset_size=float('inf')):
self.dataloader_A = dataloader_A
self.dataloader_B = dataloader_B
self.stop_A = False
self.stop_B = False
self.max_dataset_size = max_dataset_size
self.is_train = is_train
def __iter__(self):
self.stop_A = False
self.stop_B = False
self.dataloader_A_iter = iter(self.dataloader_A)
self.dataloader_B_iter = iter(self.dataloader_B)
self.iter = 0
if (self.is_train is False):
np.random.seed(0)
return self
def __len__(self):
if (not self.is_train):
return len(self.dataloader_A)
return max(len(self.dataloader_A), len(self.dataloader_B))
def __next__(self):
A = None
B = None
try:
A = next(self.dataloader_A_iter)
except StopIteration:
if (A is None):
self.stop_A = True
self.dataloader_A_iter = iter(self.dataloader_A)
A = next(self.dataloader_A_iter)
try:
B = next(self.dataloader_B_iter)
except StopIteration:
if (B is None):
self.stop_B = True
self.dataloader_B_iter = iter(self.dataloader_B)
B = next(self.dataloader_B_iter)
if ((self.stop_A and self.stop_B) or (self.iter > self.max_dataset_size)):
self.stop_A = False
self.stop_B = False
raise StopIteration()
else:
self.iter += 1
return {'source': A, 'target': B} |
def __validate_extra_deps(extra_section: str, error: bool=False) -> None:
ignore_deps = os.environ.get('DOCUMENTATION_ENV', False)
md = distribution('lightautoml').metadata
extra_pattern = 'extra == "{}"'.format(extra_section)
reqs_info = []
for (k, v) in md.items():
if ((k == 'Requires-Dist') and (extra_pattern in v)):
req = v.split(';')[0].split()[0]
reqs_info.append(req)
for req_info in reqs_info:
lib_name: str = req_info.split()[0]
try:
distribution(lib_name)
except PackageNotFoundError as e:
logger.warning("'%s' extra dependecy package '%s' isn't installed. Look at README.md in repo 'LightAutoML' for installation instructions.", extra_section, lib_name)
if (not ignore_deps):
if error:
raise e |
def validate_yaml_file(file: str):
try:
with open(file, encoding='utf-8') as fp:
yaml.load(fp.read(), Loader=yaml.FullLoader)
except FileNotFoundError:
return (False, f"The file {Fore.CYAN}`{file}`{Fore.RESET} wasn't found")
except yaml.YAMLError as e:
return (False, f'There was an issue while trying to read with your AI Settings file: {e}')
return (True, f'Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!') |
def test_transfer_fields_correct_batch(adata1, adata2):
del adata2.obs['batch']
adata1_manager = generic_setup_adata_manager(adata1, batch_key='batch')
with pytest.raises(KeyError):
adata1_manager.transfer_fields(adata2) |
def keras_train_and_save(estimator, model_params, save, FLAGS, train_dataset_fn, val_dataset_fn, label_meta, epochs, verbose, metric_names, validation_steps, load, model_meta, is_pai):
print('Start training using keras model...')
classifier = None
try:
(classifier, has_none_optimizer) = keras_compile(estimator, model_params, metric_names)
except Exception:
if hasattr(estimator, 'sqlflow_train_loop'):
sys.stderr.write('compile keras model failed, ignoring this error since the model seems to defined sqlflow_train_loop.')
classifier = init_model_with_feature_column(estimator, model_params, has_none_optimizer=True)
has_none_optimizer = True
else:
six.reraise(*sys.exc_info())
train_dataset = train_dataset_fn()
if (val_dataset_fn is not None):
validate_dataset = val_dataset_fn()
else:
validate_dataset = None
if load:
(inputs, targets) = next(iter(train_dataset.take(1)))
classifier.evaluate(inputs, targets)
load_keras_model_weights(classifier, load)
if (len(FLAGS.worker_hosts.split(',')) > 1):
keras_train_distributed(classifier, model_params, save, model_meta, FLAGS, train_dataset_fn, val_dataset_fn, is_pai)
else:
keras_train_compiled(classifier, save, train_dataset, validate_dataset, label_meta, epochs, verbose, model_meta, validation_steps, has_none_optimizer) |
def clip_grad_norm(named_parameters, max_norm, clip=False, verbose=False):
max_norm = float(max_norm)
total_norm = 0
param_to_norm = {}
param_to_shape = {}
for (n, p) in named_parameters:
if (p.grad is not None):
param_norm = p.grad.data.norm(2)
total_norm += (param_norm ** 2)
param_to_norm[n] = param_norm
param_to_shape[n] = p.size()
total_norm = (total_norm ** (1.0 / 2))
clip_coef = (max_norm / (total_norm + 1e-06))
if ((clip_coef < 1) and clip):
for (_, p) in named_parameters:
if (p.grad is not None):
p.grad.data.mul_(clip_coef)
if verbose:
print('---Total norm {:.3f} clip coef {:.3f}'.format(total_norm, clip_coef))
return total_norm |
def weights_init_classifier(m):
classname = m.__class__.__name__
if (classname.find('Linear') != (- 1)):
if (m.weight is not None):
init.normal_(m.weight.data, std=0.001)
if (m.bias is not None):
init.constant_(m.bias.data, 0.0) |
class Net(BaseNet):
def __init__(self, config):
super(Net, self).__init__(config)
self.build_net()
def build_net(self):
num_agents = self.config.num_agents
num_items = self.config.num_items
num_a_layers = self.config.net.num_a_layers
num_p_layers = self.config.net.num_p_layers
num_a_hidden_units = self.config.net.num_a_hidden_units
num_p_hidden_units = self.config.net.num_p_hidden_units
w_init = self.init
b_init = tf.keras.initializers.Zeros()
wd = (None if ('wd' not in self.config.train) else self.config.train.wd)
self.w_a = []
self.b_a = []
self.w_p = []
self.b_p = []
num_in = (num_agents * num_items)
with tf.variable_scope('alloc'):
self.w_a.append(create_var('w_a_0', [num_in, num_a_hidden_units], initializer=w_init, wd=wd))
for i in range(1, (num_a_layers - 1)):
wname = ('w_a_' + str(i))
self.w_a.append(create_var(wname, [num_a_hidden_units, num_a_hidden_units], initializer=w_init, wd=wd))
wname = ('w_a_' + str((num_a_layers - 1)))
self.w_a.append(create_var(wname, [num_a_hidden_units, ((num_agents + 1) * (num_items + 1))], initializer=w_init, wd=wd))
for i in range((num_a_layers - 1)):
wname = ('b_a_' + str(i))
self.b_a.append(create_var(wname, [num_a_hidden_units], initializer=b_init))
wname = ('b_a_' + str((num_a_layers - 1)))
self.b_a.append(create_var(wname, [((num_agents + 1) * (num_items + 1))], initializer=b_init))
with tf.variable_scope('pay'):
self.w_p.append(create_var('w_p_0', [num_in, num_p_hidden_units], initializer=w_init, wd=wd))
for i in range(1, (num_p_layers - 1)):
wname = ('w_p_' + str(i))
self.w_p.append(create_var(wname, [num_p_hidden_units, num_p_hidden_units], initializer=w_init, wd=wd))
wname = ('w_p_' + str((num_p_layers - 1)))
self.w_p.append(create_var(wname, [num_p_hidden_units, num_agents], initializer=w_init, wd=wd))
for i in range((num_p_layers - 1)):
wname = ('b_p_' + str(i))
self.b_p.append(create_var(wname, [num_p_hidden_units], initializer=b_init))
wname = ('b_p_' + str((num_p_layers - 1)))
self.b_p.append(create_var(wname, [num_agents], initializer=b_init))
def inference(self, x):
x_in = tf.reshape(x, [(- 1), (self.config.num_agents * self.config.num_items)])
a = (tf.matmul(x_in, self.w_a[0]) + self.b_a[0])
a = self.activation(a, 'alloc_act_0')
activation_summary(a)
for i in range(1, (self.config.net.num_a_layers - 1)):
a = (tf.matmul(a, self.w_a[i]) + self.b_a[i])
a = self.activation(a, ('alloc_act_' + str(i)))
activation_summary(a)
a = (tf.matmul(a, self.w_a[(- 1)]) + self.b_a[(- 1)])
a = tf.nn.softmax(tf.reshape(a, [(- 1), (self.config.num_agents + 1), (self.config.num_items + 1)]), axis=1)
a = tf.slice(a, [0, 0, 0], size=[(- 1), self.config.num_agents, self.config.num_items], name='alloc_out')
activation_summary(a)
p = (tf.matmul(x_in, self.w_p[0]) + self.b_p[0])
p = self.activation(p, 'pay_act_0')
activation_summary(p)
for i in range(1, (self.config.net.num_p_layers - 1)):
p = (tf.matmul(p, self.w_p[i]) + self.b_p[i])
p = self.activation(p, ('pay_act_' + str(i)))
activation_summary(p)
p = (tf.matmul(p, self.w_p[(- 1)]) + self.b_p[(- 1)])
p = tf.sigmoid(p, 'pay_sigmoid')
activation_summary(p)
u = tf.reduce_sum((a * tf.reshape(x, [(- 1), self.config.num_agents, self.config.num_items])), axis=(- 1))
p = (p * u)
activation_summary(p)
return (a, p) |
def to_cuda(batch):
for (key, value) in batch.items():
if isinstance(value, torch.Tensor):
batch[key] = value.cuda()
return batch |
class WandbCallback(TrainerCallback):
def __init__(self):
assert _has_wandb, 'WandbCallback requires wandb to be installed. Run `pip install wandb`.'
self._initialized = False
def setup(self, args, state, model):
self._initialized = True
if state.is_world_process_zero:
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**args.to_sanitized_dict()}
if (getattr(model, 'config', None) is not None):
combined_dict = {**model.config.to_dict(), **combined_dict}
wandb.init(project=os.getenv('WANDB_PROJECT', 'huggingface'), config=combined_dict, name=args.run_name)
if ((not is_torch_tpu_available()) and (os.getenv('WANDB_WATCH') != 'false')):
wandb.watch(model, log=os.getenv('WANDB_WATCH', 'gradients'), log_freq=max(100, args.logging_steps))
def on_train_begin(self, args, state, control, model=None, **kwargs):
if (not self._initialized):
self.setup(args, state, model)
def on_log(self, args, state, control, model=None, logs=None, **kwargs):
if (not self._initialized):
self.setup(args, state, model)
if state.is_world_process_zero:
wandb.log(logs, step=state.global_step) |
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
exit_status = p.wait(timeout=5)
if (exit_status is not None):
return exit_status
else:
p.kill()
raise
except:
p.kill()
raise
finally:
p.wait() |
def maybe_do_theoretical_analysis(DO_THEORETICAL, PRINT_THEORETICAL, PRINT_MIN_MAX_BALANCE, async_pipeline, graph, recomputation):
s = ''
if ((graph is not None) and DO_THEORETICAL):
(sequential_f, sequential_b, parallel_f, parallel_b) = theoretical_analysis(graph, recomputation=recomputation, async_pipeline=async_pipeline)
edges = edge_cut(graph)
theoretical_sequential_b_balance = worst_balance(sequential_b)
theoretical_sequential_f_balance = worst_balance(sequential_f)
theoretical_parallel_b_balance = worst_balance(parallel_b)
theoretical_parallel_f_balance = worst_balance(parallel_f)
if (edges is not None):
s += f'''cutting edges are edges between partitions
'''
s += f'''number of cutting edges: {len(edges)}
'''
if PRINT_THEORETICAL:
s += f'''
theoretical times are execution time based on sum of graph weights ms
'''
s += f'''
sequential forward {sequential_f}
sequential backward {sequential_b}
'''
s += f'''parallel forward {parallel_f}
parallel backward {parallel_b}
'''
if PRINT_MIN_MAX_BALANCE:
s += f'''
balance is ratio of computation time between fastest and slowest parts.'''
s += ' (between 0 and 1 higher is better)\n'
if PRINT_THEORETICAL:
s += f'''theoretical sequential balance:
'''
s += f'''forward {theoretical_sequential_f_balance:.3f}
backward {theoretical_sequential_b_balance:.3f}
'''
s += f'''theoretical parallel balance:
'''
s += f'''forward {theoretical_parallel_f_balance:.3f}
backward {theoretical_parallel_b_balance:.3f}
'''
return s |
class LieAlgebraWithGenerators(LieAlgebra):
def __init__(self, R, names=None, index_set=None, category=None, prefix='L', **kwds):
self._indices = index_set
LieAlgebra.__init__(self, R, names, category)
_method
def lie_algebra_generators(self):
return Family(self._indices, self.monomial, name='monomial map')
_method
def gens(self):
G = self.lie_algebra_generators()
try:
return tuple((G[i] for i in self.variable_names()))
except (KeyError, IndexError):
return tuple((G[i] for i in self.indices()))
except ValueError:
return tuple(G)
def gen(self, i):
return self.gens()[i]
def indices(self):
return self._indices |
class _Missing(object):
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing' |
class DetrConfig(PretrainedConfig):
model_type = 'detr'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads'}
def __init__(self, num_queries=100, max_position_embeddings=1024, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, classifier_dropout=0.0, scale_embedding=False, auxiliary_loss=False, position_embedding_type='sine', backbone='resnet50', dilation=False, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, **kwargs):
self.num_queries = num_queries
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding
self.auxiliary_loss = auxiliary_loss
self.position_embedding_type = position_embedding_type
self.backbone = backbone
self.dilation = dilation
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
self.mask_loss_coefficient = mask_loss_coefficient
self.dice_loss_coefficient = dice_loss_coefficient
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.eos_coefficient = eos_coefficient
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
def hidden_size(self) -> int:
return self.d_model |
def test_init_objects():
trainer = SingleObjectiveTrainer(dataHandler, model, correctness_loss, validation_metrics, save_to_path, params)
assert (type(trainer._train_dataloader) == DataLoader)
assert (type(trainer.pareto_manager) == ParetoManager)
assert (trainer.pareto_manager.path == save_to_path)
assert (type(trainer.validator) == Validator) |
def get_optimizer(student, len_dataloader, args):
params_groups = get_params_groups(student)
if (args.optimizer == 'adamw'):
optimizer = torch.optim.AdamW(params_groups)
elif (args.optimizer == 'sgd'):
optimizer = torch.optim.SGD(params_groups, lr=0, momentum=0.9)
elif (args.optimizer == 'lars'):
optimizer = LARS(params_groups)
fp16_scaler = None
if args.use_fp16:
fp16_scaler = torch.cuda.amp.GradScaler()
lr_schedule = cosine_scheduler(((args.lr * (args.batch_size_per_gpu * utils.get_world_size())) / 256.0), args.min_lr, args.epochs, len_dataloader, warmup_epochs=args.warmup_epochs)
wd_schedule = cosine_scheduler(args.weight_decay, args.weight_decay_end, args.epochs, len_dataloader)
momentum_schedule = cosine_scheduler(args.momentum_teacher, 1, args.epochs, len_dataloader)
print('Loss, optimizer and schedulers ready.')
return (optimizer, fp16_scaler, lr_schedule, wd_schedule, momentum_schedule) |
def thread_safe_generator(f):
def g(*a, **kw):
return ThreadSafeIter(f(*a, **kw))
return g |
def obtain_wikihow_step_task_occurrence(args, logger):
with open(os.path.join(args.wikihow_dir, 'step_label_text.json'), 'r') as f:
wikihow = json.load(f)
step_id = 0
step_id_to_article_po = defaultdict(tuple)
for article_id in range(len(wikihow)):
for article_step_idx in range(len(wikihow[article_id])):
step_id_to_article_po[step_id] = (article_id, article_step_idx)
step_id += 1
with open(os.path.join(args.wikihow_dir, 'article_id_to_title.txt'), 'r') as f:
article_id_to_wikhow_taskname = {int(line.rstrip().split('\t')[0]): line.rstrip().split('\t')[1] for line in f.readlines()}
wikihow_tasknames = set(article_id_to_wikhow_taskname.values())
wikihow_taskname_to_taskid = dict()
wikihow_taskid_to_taskname = dict()
for task_name in wikihow_tasknames:
wikihow_taskname_to_taskid[task_name] = len(wikihow_taskname_to_taskid)
wikihow_taskid_to_taskname[wikihow_taskname_to_taskid[task_name]] = task_name
wikihow_step_task_occurrence = np.zeros((len(step_id_to_article_po), len(wikihow_tasknames)))
for step_id in range(len(step_id_to_article_po)):
(article_id, _) = step_id_to_article_po[step_id]
wikihow_step_task_occurrence[(step_id, wikihow_taskname_to_taskid[article_id_to_wikhow_taskname[article_id]])] += 1
return (wikihow_step_task_occurrence, wikihow_taskid_to_taskname, wikihow_taskname_to_taskid) |
class DWCPatchEmbed(nn.Module):
def __init__(self, in_chans=3, embed_dim=768, patch_size=16, stride=1, act_layer=nn.Hardswish):
super().__init__()
self.patch_conv = DWConv2d_BN(in_chans, embed_dim, kernel_size=patch_size, stride=stride, act_layer=act_layer)
def forward(self, x):
x = self.patch_conv(x)
return x |
class MaskedLMDictionary(Dictionary):
def __init__(self, pad='<pad>', eos='</s>', unk='<unk>', mask='<mask>'):
super().__init__(pad, eos, unk)
self.mask_word = mask
self.mask_index = self.add_symbol(mask)
self.nspecial = len(self.symbols)
def mask(self):
return self.mask_index |
def StochasticResNet56_08(num_classes=10):
return StochasticResNet(StochasticBlock, layers=([9] * 3), filters=[16, 32, 64], min_survival_rate=0.8, decay='linear', num_classes=num_classes) |
def dynamic_mix_data_prep(hparams):
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(csv_path=hparams['train_data'], replacements={'data_root': hparams['data_folder']})
(spk_hashtable, spk_weights) = build_spk_hashtable(hparams)
spk_list = [x for x in spk_hashtable.keys()]
spk_weights = [(x / sum(spk_weights)) for x in spk_weights]
if ('wham' in Path(hparams['data_folder']).stem):
noise_files = get_wham_noise_filenames(hparams)
.data_pipeline.takes('mix_wav')
.data_pipeline.provides('mix_sig', 's1_sig', 's2_sig', 's3_sig', 'noise_sig')
def audio_pipeline(mix_wav):
speakers = np.random.choice(spk_list, hparams['num_spks'], replace=False, p=spk_weights)
if ('wham' in Path(hparams['data_folder']).stem):
noise_file = np.random.choice(noise_files, 1, replace=False)
(noise, fs_read) = torchaudio.load(noise_file[0])
noise = noise.squeeze()
sources = []
first_lvl = None
spk_files = [np.random.choice(spk_hashtable[spk], 1, False)[0] for spk in speakers]
minlen = min(*[torchaudio.info(x).num_frames for x in spk_files], hparams['training_signal_len'])
for (i, spk_file) in enumerate(spk_files):
length = torchaudio.info(spk_file).num_frames
start = 0
stop = length
if (length > minlen):
start = np.random.randint(0, (length - minlen))
stop = (start + minlen)
(tmp, fs_read) = torchaudio.load(spk_file, frame_offset=start, num_frames=(stop - start))
tmp = tmp[0]
if (i == 0):
gain = np.clip(random.normalvariate((- 27.43), 2.57), (- 45), 0)
tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale='dB')
first_lvl = gain
else:
gain = np.clip((first_lvl + random.normalvariate((- 2.51), 2.66)), (- 45), 0)
tmp = rescale(tmp, torch.tensor(len(tmp)), gain, scale='dB')
sources.append(tmp)
sources = torch.stack(sources)
mixture = torch.sum(sources, 0)
if ('wham' in Path(hparams['data_folder']).stem):
len_noise = len(noise)
len_mix = len(mixture)
min_len = min(len_noise, len_mix)
mixture = (mixture[:min_len] + noise[:min_len])
max_amp = max(torch.abs(mixture).max().item(), *[x.item() for x in torch.abs(sources).max(dim=(- 1))[0]])
mix_scaling = ((1 / max_amp) * 0.9)
sources = (mix_scaling * sources)
mixture = (mix_scaling * mixture)
(yield mixture)
for i in range(hparams['num_spks']):
(yield sources[i])
if (hparams['num_spks'] == 2):
(yield None)
if ('wham' in Path(hparams['data_folder']).stem):
mean_source_lvl = sources.abs().mean()
mean_noise_lvl = noise.abs().mean()
noise = ((mean_source_lvl / mean_noise_lvl) * noise)
(yield noise)
else:
(yield None)
sb.dataio.dataset.add_dynamic_item([train_data], audio_pipeline)
sb.dataio.dataset.set_output_keys([train_data], ['id', 'mix_sig', 's1_sig', 's2_sig', 's3_sig', 'noise_sig'])
train_data = torch.utils.data.DataLoader(train_data, batch_size=hparams['dataloader_opts']['batch_size'], num_workers=hparams['dataloader_opts']['num_workers'], collate_fn=PaddedBatch, worker_init_fn=(lambda x: np.random.seed((int.from_bytes(os.urandom(4), 'little') + x))))
return train_data |
def register_Ns3UlGrant_s_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::UlGrant_s const &', 'arg0')])
cls.add_instance_attribute('m_cqiRequest', 'bool', is_const=False)
cls.add_instance_attribute('m_hopping', 'bool', is_const=False)
cls.add_instance_attribute('m_mcs', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_rbLen', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_rbStart', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_rnti', 'uint16_t', is_const=False)
cls.add_instance_attribute('m_tbSize', 'uint16_t', is_const=False)
cls.add_instance_attribute('m_tpc', 'int8_t', is_const=False)
cls.add_instance_attribute('m_ulDelay', 'bool', is_const=False)
return |
class AuxData(Generic[T]):
def __init__(self, layout: (contents.Content | record.Record), is_highlevel: bool, behavior: (dict | None)=None):
self._layout = layout
self._behavior = behavior
self._is_highlevel = is_highlevel
def from_array_or_layout(cls, obj: T):
is_highlevel = isinstance(obj, (highlevel.Array, highlevel.Record))
if is_highlevel:
layout = obj.layout
elif isinstance(obj, (contents.Content, record.Record)):
layout = obj
else:
raise TypeError
jax_backend = JaxBackend.instance()
layout = layout.to_backend(jax_backend)
buffers = find_all_buffers(layout)
return (buffers, AuxData(layout=layout, is_highlevel=is_highlevel, behavior=behavior_of(obj)))
def layout(self) -> (contents.Content | record.Record):
return self._layout
def behavior(self) -> (dict | None):
return self._behavior
def is_highlevel(self) -> bool:
return self._is_highlevel
def unflatten(self, buffers: tuple) -> T:
for buffer in buffers:
dtype = getattr(buffer, 'dtype', None)
if (dtype == np.dtype([('float0', 'V')])):
raise TypeError(f'a buffer with the dtype {buffer.dtype} was encountered during unflattening. JAX uses this dtype for the tangents of integer/boolean outputs; these cannot reasonably be differentiated. Make sure that you are not computing the derivative of a boolean/integer (array) valued function.')
layout = replace_all_buffers(self._layout, list(buffers), backend=JaxBackend.instance())
return wrap_layout(layout, behavior=self._behavior, highlevel=self._is_highlevel)
def __eq__(self, other: AuxData) -> bool:
return self.layout.is_equal_to(other.layout, index_dtype=False, numpyarray=False) |
.parametrize('edges', (False, True))
.parametrize('texture', (False, True))
def test_multiscale_basic_features_gray(edges, texture):
img = np.zeros((20, 20))
img[:10] = 1
img += (0.05 * np.random.randn(*img.shape))
features = multiscale_basic_features(img, edges=edges, texture=texture)
n_sigmas = 6
intensity = True
assert (features.shape[(- 1)] == (n_sigmas * ((int(intensity) + int(edges)) + (2 * int(texture)))))
assert (features.shape[:(- 1)] == img.shape[:]) |
def get_version() -> str:
init = open(os.path.join('offlinerl', '__init__.py'), 'r').read().split()
return init[(init.index('__version__') + 2)][1:(- 1)] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.