code stringlengths 101 5.91M |
|---|
class Collator(object):
def __init__(self, lthresh=None):
self.lthresh = lthresh
def __call__(self, batch):
(waveforms, targets) = ([], [])
for data in batch:
if (self.lthresh == None):
waveforms += [data['array'].numpy().flatten()]
else:
waveforms += [data['array'].numpy().flatten()[:self.lthresh]]
targets += [data['label']]
targets = torch.tensor(targets)
return (waveforms, targets) |
.parametrize('seed', [412])
.parametrize('batch_size', [2, 16])
.parametrize('grid_size', [2, 8])
.parametrize('feature_size', [4])
.parametrize('m, M', [((- 1), 1)])
def test_query_on_triplane_forward_backward(seed, batch_size, grid_size, feature_size, m, M):
nn.clear_parameters()
ctx = get_extension_context('cudnn', device_id='0')
nn.set_default_context(ctx)
B = batch_size
G = grid_size
D = feature_size
rng = np.random.RandomState(seed)
query_data = (m + (rng.rand(batch_size, 3) * (M - m)))
initializer_data = (rng.randn(3, G, G, D) * 0.01)
query_data0 = query_data.astype(np.float32)
initializer_data0 = initializer_data.astype(np.float32)
query0 = nn.Variable.from_numpy_array(query_data0).apply(need_grad=True)
feature0 = nn.parameter.get_parameter_or_create('F0', (3, G, G, D), initializer_data0)
output0 = query_on_triplane_composite(query0, feature0, m, M)
query_data1 = query_data.astype(np.float32)
initializer_data1 = initializer_data.astype(np.float32)
query1 = nn.Variable.from_numpy_array(query_data1).apply(need_grad=True)
feature1 = nn.parameter.get_parameter_or_create('F1', (3, G, G, D), initializer_data1)
output1 = F.query_on_triplane(query1, feature1, ([m] * 3), ([M] * 3))
output0.forward(clear_no_need_grad=True)
output1.forward(clear_no_need_grad=True)
np.testing.assert_allclose(output0.d, output1.d, atol=1e-06)
query0.grad.fill(0)
query1.grad.fill(0)
feature0.grad.fill(0)
feature1.grad.fill(0)
ograd = rng.randn(*output0.shape).astype(np.float32)
output0.backward(ograd, clear_buffer=True)
output1.backward(ograd, clear_buffer=True)
np.testing.assert_allclose(feature0.g, feature1.g, atol=1e-06) |
class BodyDef(BaseDef):
ctype: str = Field(regex='^(application/x-www-form-urlencoded|application/json)$')
content: Dict[(str, FieldDefUnion)] |
def main():
args = _parse_args()
if args.tsv:
(data, discrete_columns) = read_tsv(args.data, args.metadata)
else:
(data, discrete_columns) = read_csv(args.data, args.metadata, args.header, args.discrete)
if args.load:
model = CTGAN.load(args.load)
else:
generator_dim = [int(x) for x in args.generator_dim.split(',')]
discriminator_dim = [int(x) for x in args.discriminator_dim.split(',')]
model = CTGAN(embedding_dim=args.embedding_dim, generator_dim=generator_dim, discriminator_dim=discriminator_dim, generator_lr=args.generator_lr, generator_decay=args.generator_decay, discriminator_lr=args.discriminator_lr, discriminator_decay=args.discriminator_decay, batch_size=args.batch_size, epochs=args.epochs)
model.fit(data, discrete_columns)
if (args.save is not None):
model.save(args.save)
num_samples = (args.num_samples or len(data))
if (args.sample_condition_column is not None):
assert (args.sample_condition_column_value is not None)
sampled = model.sample(num_samples, args.sample_condition_column, args.sample_condition_column_value)
if args.tsv:
write_tsv(sampled, args.metadata, args.output)
else:
sampled.to_csv(args.output, index=False) |
def _get_activation_fn(activation: str) -> Callable[([Tensor], Tensor)]:
if (activation == 'relu'):
return F.relu
elif (activation == 'gelu'):
return F.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(activation)) |
class NormalNoise(Explorer):
_mean: float
_std: float
def __init__(self, mean: float=0.0, std: float=0.1):
self._mean = mean
self._std = std
def sample(self, algo: QLearningAlgoProtocol, x: Observation, step: int) -> NDArray:
action = algo.predict(x)
noise = np.random.normal(self._mean, self._std, size=action.shape)
minimum: Union[(float, NDArray)]
maximum: Union[(float, NDArray)]
if isinstance(algo.action_scaler, MinMaxActionScaler):
assert (algo.action_scaler.minimum is not None)
assert (algo.action_scaler.maximum is not None)
minimum = algo.action_scaler.minimum
maximum = algo.action_scaler.maximum
else:
minimum = (- 1.0)
maximum = 1.0
return np.clip((action + noise), minimum, maximum) |
def gen_truth(fname, modulename):
with open((fname + '.v')) as file:
f = open((fname + '_tb.v'), 'w+')
line = file.readline()
inp = 0
out = 0
n_inputs = 0
n_outputs = 0
while line:
line.strip()
tokens = re.split('[ ,;\n]', line)
for t in tokens:
t.strip()
if (t != ''):
if ((inp == 1) and (t != 'output')):
n_inputs += 1
if ((out == 1) and (t != 'wire') and (t != 'assign')):
n_outputs += 1
if (t == 'input'):
inp = 1
elif (t == 'output'):
out = 1
inp = 0
elif ((t == 'wire') or (t == 'assign')):
out = 0
line = file.readline()
file.close()
if (n_inputs > 16):
print('BLASYS cannot handle more than 16 inputs per partition; reduce parition sizes')
exit((- 1))
f.write((('module ' + modulename) + '_tb;\n'))
f.write((('reg [' + str((n_inputs - 1))) + ':0] pi;\n'))
f.write((('wire [' + str((n_outputs - 1))) + ':0] po;\n'))
f.write((modulename + ' dut('))
with open((fname + '.v')) as file:
line = file.readline()
inp = 0
out = 0
first = 1
end = 0
i = 0
while line:
line.strip()
tokens = re.split('[ ,;\n]', line)
for t in tokens:
t.strip()
if (t != ''):
if ((inp == 1) and (t != 'output')):
if (first == 0):
f.write(((', pi[' + str(((n_inputs - i) - 1))) + ']'))
else:
first = 0
f.write((('pi[' + str(((n_inputs - i) - 1))) + ']'))
i = (i + 1)
if ((out == 1) and (t != 'wire') and (t != 'assign')):
if (first == 0):
f.write(((', po[' + str(((n_outputs - i) - 1))) + ']'))
else:
first = 0
f.write(((', po[' + str(((n_outputs - i) - 1))) + ']'))
i += 1
if (t == 'input'):
inp = 1
elif (t == 'output'):
i = 0
first = 1
out = 1
inp = 0
elif ((t == 'wire') or (t == 'assign')):
if (not end):
f.write(');\n')
end = 1
out = 0
line = file.readline()
file.close()
f.write('initial\n')
f.write('begin\n')
j = 0
while (j < (2 ** n_inputs)):
f.write((('# 1 pi=' + str(n_inputs)) + "'b"))
str1 = bin(j).replace('0b', '')
str2 = ('0' * (n_inputs - len(str1)))
n = (str2 + str1)
f.write(n)
f.write(';\n')
f.write('#1 $display("%b", po);\n')
j += 1
f.write('end\n')
f.write('endmodule\n')
f.close()
return (n_inputs, n_outputs) |
def should_strip_ansi(stream=None, color=None):
if (color is None):
if (stream is None):
stream = sys.stdin
return ((not isatty(stream)) and (not _is_jupyter_kernel_output(stream)))
return (not color) |
def uniform32_from_uint(x, bits):
if (bits == 64):
return uniform32_from_uint64(x)
elif (bits == 53):
return uniform32_from_uint53(x)
elif (bits == 32):
return uniform32_from_uint32(x)
else:
raise NotImplementedError |
def main():
(rank, world_size) = dist_init()
logger.info('init done')
cfg.merge_from_file(args.cfg)
if (rank == 0):
if (not os.path.exists(cfg.TRAIN.LOG_DIR)):
os.makedirs(cfg.TRAIN.LOG_DIR)
init_log('global', logging.INFO)
if cfg.TRAIN.LOG_DIR:
add_file_handler('global', os.path.join(cfg.TRAIN.LOG_DIR, 'logs.txt'), logging.INFO)
logger.info('Version Information: \n{}\n'.format(commit()))
logger.info('config \n{}'.format(json.dumps(cfg, indent=4)))
model = ModelBuilder().cuda().train()
if cfg.BACKBONE.PRETRAINED:
cur_path = os.path.dirname(os.path.realpath(__file__))
backbone_path = os.path.join(cur_path, '../', cfg.BACKBONE.PRETRAINED)
load_pretrain(model.backbone, backbone_path)
if ((rank == 0) and cfg.TRAIN.LOG_DIR):
tb_writer = SummaryWriter(cfg.TRAIN.LOG_DIR)
else:
tb_writer = None
train_loader = build_data_loader()
(optimizer, lr_scheduler) = build_opt_lr(model, cfg.TRAIN.START_EPOCH)
if cfg.TRAIN.RESUME:
logger.info('resume from {}'.format(cfg.TRAIN.RESUME))
assert os.path.isfile(cfg.TRAIN.RESUME), '{} is not a valid file.'.format(cfg.TRAIN.RESUME)
(model, optimizer, cfg.TRAIN.START_EPOCH) = restore_from(model, optimizer, cfg.TRAIN.RESUME)
elif cfg.TRAIN.PRETRAINED:
load_pretrain(model, cfg.TRAIN.PRETRAINED)
dist_model = DistModule(model)
logger.info(lr_scheduler)
logger.info('model prepare done')
train(train_loader, dist_model, optimizer, lr_scheduler, tb_writer) |
class WheelFile(ZipFile):
_default_algorithm = hashlib.sha256
def __init__(self, file, mode='r'):
basename = os.path.basename(file)
self.parsed_filename = WHEEL_INFO_RE.match(basename)
if ((not basename.endswith('.whl')) or (self.parsed_filename is None)):
raise WheelError('Bad wheel filename {!r}'.format(basename))
super(WheelFile, self).__init__(file, mode, compression=ZIP_DEFLATED, allowZip64=True)
self.dist_info_path = '{}.dist-info'.format(self.parsed_filename.group('namever'))
self.record_path = (self.dist_info_path + '/RECORD')
self._file_hashes = OrderedDict()
self._file_sizes = {}
if (mode == 'r'):
self._file_hashes[self.record_path] = (None, None)
self._file_hashes[(self.record_path + '.jws')] = (None, None)
self._file_hashes[(self.record_path + '.p7s')] = (None, None)
try:
record = self.open(self.record_path)
except KeyError:
raise WheelError('Missing {} file'.format(self.record_path))
with record:
for line in record:
line = line.decode('utf-8')
(path, hash_sum, size) = line.rsplit(u',', 2)
if hash_sum:
(algorithm, hash_sum) = hash_sum.split(u'=')
if (algorithm not in hashlib.algorithms_available):
raise WheelError('Unsupported hash algorithm: {}'.format(algorithm))
elif (algorithm.lower() in {'md5', 'sha1'}):
raise WheelError('Weak hash algorithm ({}) is not permitted by PEP 427'.format(algorithm))
self._file_hashes[path] = (algorithm, urlsafe_b64decode(hash_sum.encode('ascii')))
def open(self, name_or_info, mode='r', pwd=None):
def _update_crc(newdata, eof=None):
if (eof is None):
eof = ef._eof
update_crc_orig(newdata)
else:
update_crc_orig(newdata, eof)
running_hash.update(newdata)
if (eof and (running_hash.digest() != expected_hash)):
raise WheelError("Hash mismatch for file '{}'".format(native(ef_name)))
ef = super(WheelFile, self).open(name_or_info, mode, pwd)
ef_name = as_unicode((name_or_info.filename if isinstance(name_or_info, ZipInfo) else name_or_info))
if ((mode == 'r') and (not ef_name.endswith('/'))):
if (ef_name not in self._file_hashes):
raise WheelError("No hash found for file '{}'".format(native(ef_name)))
(algorithm, expected_hash) = self._file_hashes[ef_name]
if (expected_hash is not None):
running_hash = hashlib.new(algorithm)
(update_crc_orig, ef._update_crc) = (ef._update_crc, _update_crc)
return ef
def write_files(self, base_dir):
logger.info("creating '%s' and adding '%s' to it", self.filename, base_dir)
deferred = []
for (root, dirnames, filenames) in os.walk(base_dir):
dirnames.sort()
for name in sorted(filenames):
path = os.path.normpath(os.path.join(root, name))
if os.path.isfile(path):
arcname = os.path.relpath(path, base_dir)
if (arcname == self.record_path):
pass
elif root.endswith('.dist-info'):
deferred.append((path, arcname))
else:
self.write(path, arcname)
deferred.sort()
for (path, arcname) in deferred:
self.write(path, arcname)
def write(self, filename, arcname=None, compress_type=None):
with open(filename, 'rb') as f:
st = os.fstat(f.fileno())
data = f.read()
zinfo = ZipInfo((arcname or filename), date_time=get_zipinfo_datetime(st.st_mtime))
zinfo.external_attr = (st.st_mode << 16)
zinfo.compress_type = ZIP_DEFLATED
self.writestr(zinfo, data, compress_type)
def writestr(self, zinfo_or_arcname, bytes, compress_type=None):
super(WheelFile, self).writestr(zinfo_or_arcname, bytes, compress_type)
fname = (zinfo_or_arcname.filename if isinstance(zinfo_or_arcname, ZipInfo) else zinfo_or_arcname)
logger.info("adding '%s'", fname)
if (fname != self.record_path):
hash_ = self._default_algorithm(bytes)
self._file_hashes[fname] = (hash_.name, native(urlsafe_b64encode(hash_.digest())))
self._file_sizes[fname] = len(bytes)
def close(self):
if ((self.fp is not None) and (self.mode == 'w') and self._file_hashes):
content = '\n'.join(('{},{}={},{}'.format(fname, algorithm, hash_, self._file_sizes[fname]) for (fname, (algorithm, hash_)) in self._file_hashes.items()))
content += '\n{},,\n'.format(self.record_path)
zinfo = ZipInfo(native(self.record_path), date_time=get_zipinfo_datetime())
zinfo.compress_type = ZIP_DEFLATED
self.writestr(zinfo, as_bytes(content))
super(WheelFile, self).close() |
.operations('failure', 'multiple_failures')
def test_exit_first(any_app_schema):
results = list(from_schema(any_app_schema, exit_first=True).execute())
assert (results[(- 1)].has_failures is True)
assert (results[(- 1)].failed_count == 1) |
def create_stmt_from_unaryop(unaryop: ast.UnaryOp, testcase: tc.TestCase, constant_provider: ConstantProvider) -> (stmt.VariableCreatingStatement | None):
val = unaryop.operand.value
if isinstance(val, bool):
return stmt.BooleanPrimitiveStatement(testcase, (not val))
if isinstance(val, float):
return stmt.FloatPrimitiveStatement(testcase, ((- 1) * val), constant_provider=constant_provider)
if isinstance(val, int):
return stmt.IntPrimitiveStatement(testcase, ((- 1) * val), constant_provider=constant_provider)
logger.info('Could not find case for unary operator while handling assign statement.')
return None |
class Diagram(ClonableArray, metaclass=InheritComparisonClasscallMetaclass):
def __classcall_private__(self, cells, n_rows=None, n_cols=None, check=True):
return Diagrams()(cells, n_rows, n_cols, check)
def __init__(self, parent, cells, n_rows=None, n_cols=None, check=True):
self._cells = frozenset(cells)
if self._cells:
N_rows = max((c[0] for c in self._cells))
N_cols = max((c[1] for c in self._cells))
else:
N_rows = (- 1)
N_cols = (- 1)
if (n_rows is not None):
if (n_rows <= N_rows):
raise ValueError('n_rows is too small')
self._n_rows = n_rows
else:
self._n_rows = (N_rows + 1)
if (n_cols is not None):
if (n_cols <= N_cols):
raise ValueError('n_cols is too small')
self._n_cols = n_cols
else:
self._n_cols = (N_cols + 1)
self._n_nonempty_rows = len(set((i for (i, j) in self._cells)))
self._n_nonempty_cols = len(set((j for (i, j) in self._cells)))
ClonableArray.__init__(self, parent, sorted(cells), check)
def pp(self):
if ((self._n_rows == 0) or (self._n_cols == 0)):
print('-')
return
print('\n'.join(self._pretty_print()))
def _ascii_art_(self):
from sage.typeset.ascii_art import ascii_art
if ((self._n_rows == 0) or (self._n_cols == 0)):
return ascii_art('-')
return ascii_art('\n'.join(self._pretty_print()))
def _unicode_art_(self):
from sage.typeset.unicode_art import unicode_art
if ((self._n_rows == 0) or (self._n_cols == 0)):
return unicode_art('')
ndivs = (self._n_cols - 1)
cell = 'X'
empty = ' '
it = self._pretty_print(cell, empty)
ret = (('' + ('' * ndivs)) + '')
ret += (('\n' + next(it)) + '')
for row in it:
ret += (('\n' + ('' * ndivs)) + '')
ret += (('\n' + row) + '')
ret += (('\n' + ('' * ndivs)) + '')
return unicode_art(ret)
def _pretty_print(self, cell='O ', empty='. '):
for i in range(self._n_rows):
output_str = ''
for j in range(self._n_cols):
if ((i, j) in self):
output_str += cell
else:
output_str += empty
(yield output_str)
def _latex_(self):
if ((self._n_rows == 0) or (self._n_cols == 0)):
return '{\\emptyset}'
lr = '\\def\\lr#1{\\multicolumn{1}{|{\\hspace{.6ex}}{\\hspace{.6ex}}|}{\\raisebox{-.3ex}{$#1$}}}'
array = []
for i in range(self._n_rows):
row = []
for j in range(self._n_cols):
row.append(('\\phantom{x}' if ((i, j) in self) else None))
array.append(row)
def end_line(r):
if (r == 0):
return ''.join((('\\cline{%s-%s}' % ((i + 1), (i + 1))) for (i, j) in enumerate(array[0]) if (j is not None)))
elif (r == len(array)):
return ('\\\\' + ''.join((('\\cline{%s-%s}' % ((i + 1), (i + 1))) for (i, j) in enumerate(array[(r - 1)]) if (j is not None))))
else:
out = ('\\\\' + ''.join((('\\cline{%s-%s}' % ((i + 1), (i + 1))) for (i, j) in enumerate(array[(r - 1)]) if (j is not None))))
out += ''.join((('\\cline{%s-%s}' % ((i + 1), (i + 1))) for (i, j) in enumerate(array[r]) if (j is not None)))
return out
tex = ('\\raisebox{-.6ex}{$\\begin{array}[b]{*{%s}{p{0.6ex}}}' % max(map(len, array)))
tex += (end_line(0) + '\n')
for r in range(len(array)):
tex += '&'.join((('' if (c is None) else ('\\lr{%s}' % (c,))) for c in array[r]))
tex += (end_line((r + 1)) + '\n')
return ('{%s\n%s\n}' % (lr, (tex + '\\end{array}$}')))
def number_of_rows(self):
return self._n_rows
nrows = number_of_rows
def number_of_cols(self):
return self._n_cols
ncols = number_of_cols
def cells(self):
return sorted(self._cells)
def number_of_cells(self):
return len(self._cells)
n_cells = number_of_cells
size = number_of_cells
def check(self):
from sage.sets.non_negative_integers import NonNegativeIntegers
NN = NonNegativeIntegers()
if (not all(((i in NN) for c in self._cells for i in c))):
raise ValueError('diagrams must be indexed by non-negative integers')
def specht_module(self, base_ring=None):
from sage.combinat.specht_module import SpechtModule
from sage.combinat.symmetric_group_algebra import SymmetricGroupAlgebra
if (base_ring is None):
from sage.rings.rational_field import QQ
base_ring = QQ
R = SymmetricGroupAlgebra(base_ring, len(self))
return SpechtModule(R, self)
def specht_module_dimension(self, base_ring=None):
from sage.combinat.specht_module import specht_module_rank
return specht_module_rank(self, base_ring) |
class ModelEma():
def __init__(self, model, decay=0.9999, device='', resume='', batch_size=1024, epoch=350):
self.ema = deepcopy(model)
self.ema.eval()
self.decay = decay
self.device = device
if device:
self.ema.to(device=device)
self.ema_has_module = hasattr(self.ema, 'module')
if resume:
self._load_checkpoint(resume)
for p in self.ema.parameters():
p.requires_grad_(False)
def _load_checkpoint(self, checkpoint_path):
checkpoint = torch.load(checkpoint_path)
assert isinstance(checkpoint, dict)
if ('state_dict_ema' in checkpoint):
new_state_dict = OrderedDict()
for (k, v) in checkpoint['state_dict_ema'].items():
if self.ema_has_module:
name = (('module.' + k) if (not k.startswith('module')) else k)
else:
name = k
new_state_dict[name] = v
self.ema.load_state_dict(new_state_dict)
print('=> Loaded state_dict_ema')
else:
print('=> Failed to find state_dict_ema, starting from loaded model weights')
def update(self, model, steps):
needs_module = (hasattr(model, 'module') and (not self.ema_has_module))
decay_rate = self.decay
with torch.no_grad():
msd = model.state_dict()
for (k, ema_v) in self.ema.state_dict().items():
if needs_module:
k = ('module.' + k)
model_v = msd[k].detach()
if self.device:
model_v = model_v.to(device=self.device)
ema_v.copy_(((ema_v * decay_rate) + ((1.0 - decay_rate) * model_v))) |
def generic_setup_mudata_manager(mdata: MuData, layer_mod, layer: Optional[str]=None, batch_mod: Optional[str]=None, batch_key: Optional[str]=None, categorical_covariate_mod: Optional[str]=None, categorical_covariate_keys: Optional[list[str]]=None, continuous_covariate_mod: Optional[str]=None, continuous_covariate_keys: Optional[list[str]]=None, protein_expression_mod: Optional[str]=None, protein_expression_layer: Optional[str]=None) -> AnnDataManager:
setup_args = locals()
setup_args.pop('mdata')
setup_method_args = {_MODEL_NAME_KEY: 'TestModel', _SETUP_ARGS_KEY: setup_args}
batch_field = MuDataCategoricalObsField(REGISTRY_KEYS.BATCH_KEY, batch_key, mod_key=batch_mod)
anndata_fields = [MuDataLayerField(REGISTRY_KEYS.X_KEY, layer, mod_key=layer_mod, is_count_data=True, mod_required=True), batch_field, MuDataCategoricalJointObsField(REGISTRY_KEYS.CAT_COVS_KEY, categorical_covariate_keys, mod_key=categorical_covariate_mod), MuDataNumericalJointObsField(REGISTRY_KEYS.CONT_COVS_KEY, continuous_covariate_keys, mod_key=continuous_covariate_mod)]
if (protein_expression_mod is not None):
anndata_fields.append(MuDataProteinLayerField(REGISTRY_KEYS.PROTEIN_EXP_KEY, protein_expression_layer, mod_key=protein_expression_mod, mod_required=True, use_batch_mask=True, batch_field=batch_field))
mdata_manager = AnnDataManager(fields=anndata_fields, setup_method_args=setup_method_args)
mdata_manager.register_fields(mdata)
return mdata_manager |
class Converter():
def __init__(self, use_fake_div=False):
self.use_fake_div = use_fake_div
def __call__(self, ex=None):
if (ex is None):
ex = self.ex
try:
obj = ex.pyobject()
return self.pyobject(ex, obj)
except TypeError as err:
if ('self must be a numeric expression' not in err.args):
raise err
operator = ex.operator()
if (operator is None):
return self.symbol(ex)
if (operator in arithmetic_operators):
if (getattr(self, 'use_fake_div', False) and ((operator is mul) or (operator is mul_vararg))):
div = self.get_fake_div(ex)
return self.arithmetic(div, div.operator())
return self.arithmetic(ex, operator)
elif (operator in relation_operators):
return self.relation(ex, operator)
elif isinstance(operator, FDerivativeOperator):
return self.derivative(ex, operator)
elif (operator == tuple):
return self.tuple(ex)
else:
return self.composition(ex, operator)
def get_fake_div(self, ex):
d = []
n = []
for arg in ex.operands():
ops = arg.operands()
try:
if ((arg.operator() is pow) and (repr(ops[1]) == '-1')):
d.append(ops[0])
else:
n.append(arg)
except TypeError:
n.append(arg)
len_d = len(d)
if (len_d == 0):
repr_n = [repr(_) for _ in n]
if ((len(n) == 2) and ('-1' in repr_n)):
a = (n[0] if (repr_n[1] == '-1') else n[1])
return FakeExpression([a], neg)
else:
return ex
elif (len_d == 1):
d = d[0]
else:
d = FakeExpression(d, mul)
if (len(n) == 0):
return FakeExpression([SR.one(), d], truediv)
elif (len(n) == 1):
n = n[0]
else:
n = FakeExpression(n, mul)
return FakeExpression([n, d], truediv)
def pyobject(self, ex, obj):
raise NotImplementedError('pyobject')
def symbol(self, ex):
raise NotImplementedError('symbol')
def relation(self, ex, operator):
raise NotImplementedError('relation')
def derivative(self, ex, operator):
raise NotImplementedError('derivative')
def arithmetic(self, ex, operator):
raise NotImplementedError('arithmetic')
def composition(self, ex, operator):
raise NotImplementedError('composition') |
class Tensor(bb.Object):
def __init__(self, shape: List[int]=None, *, dtype=bb.DType.FP32, host_only=False, core_tensor=None):
if (core_tensor is None):
if (shape is not None):
core_tensor = core.Tensor(shape, dtype.value, host_only)
super(Tensor, self).__init__(core_object=core_tensor)
def from_core(core_tensor):
new_tensor = Tensor(shape=None, core_tensor=core_tensor)
return new_tensor
def is_host_only(self) -> bool:
return self.get_core().is_host_only()
def get_type(self) -> int:
return bb.DType(self.get_core().get_type())
def get_shape(self) -> List[int]:
return self.get_core().get_shape()
def numpy(self) -> np.ndarray:
dtype = self.get_core().get_type()
if (dtype == bb.DType.FP32):
return self.get_core().numpy_fp32()
elif (dtype == bb.DType.FP64):
return self.get_core().numpy_fp64()
elif (dtype == bb.DType.INT8):
return self.get_core().numpy_int8()
elif (dtype == bb.DType.INT16):
return self.get_core().numpy_int16()
elif (dtype == bb.DType.INT32):
return self.get_core().numpy_int32()
elif (dtype == bb.DType.INT64):
return self.get_core().numpy_int64()
elif (dtype == bb.DType.UINT8):
return self.get_core().numpy_uint8()
elif (dtype == bb.DType.UINT16):
return self.get_core().numpy_uint16()
elif (dtype == bb.DType.UINT32):
return self.get_core().numpy_uint32()
elif (dtype == bb.DType.UINT64):
return self.get_core().numpy_uint64()
def set_numpy(self, ndarray: np.ndarray):
dtype = self.get_core().get_type()
assert (bb.dtype_numpy_to_bb(ndarray.dtype) == dtype)
assert (ndarray.shape == tuple(self.get_shape()))
if (dtype == bb.DType.FP32):
return self.get_core().set_numpy_fp32(ndarray)
elif (dtype == bb.DType.FP64):
return self.get_core().set_numpy_fp64(ndarray)
elif (dtype == bb.DType.INT8):
return self.get_core().set_numpy_int8(ndarray)
elif (dtype == bb.DType.INT16):
return self.get_core().set_numpy_int16(ndarray)
elif (dtype == bb.DType.INT32):
return self.get_core().set_numpy_int32(ndarray)
elif (dtype == bb.DType.INT64):
return self.get_core().set_numpy_int64(ndarray)
elif (dtype == bb.DType.UINT8):
return self.get_core().set_numpy_uint8(ndarray)
elif (dtype == bb.DType.UINT16):
return self.get_core().set_numpy_uint16(ndarray)
elif (dtype == bb.DType.UINT32):
return self.get_core().set_numpy_uint32(ndarray)
elif (dtype == bb.DType.UINT64):
return self.get_core().set_numpy_uint64(ndarray)
else:
assert 0
def from_numpy(ndarray: np.ndarray, host_only=False):
if (not ndarray.flags['C_CONTIGUOUS']):
ndarray = ndarray.copy(order='C')
if (ndarray.dtype == np.float32):
core_tensor = bb.core.Tensor.from_numpy_fp32(ndarray, host_only)
elif (ndarray.dtype == np.float64):
core_tensor = bb.core.Tensor.from_numpy_fp64(ndarray, host_only)
elif (ndarray.dtype == np.int8):
core_tensor = bb.core.Tensor.from_numpy_int8(ndarray, host_only)
elif (ndarray.dtype == np.int16):
core_tensor = bb.core.Tensor.from_numpy_int16(ndarray, host_only)
elif (ndarray.dtype == np.int32):
core_tensor = bb.core.Tensor.from_numpy_int32(ndarray, host_only)
elif (ndarray.dtype == np.int64):
core_tensor = bb.core.Tensor.from_numpy_int64(ndarray, host_only)
elif (ndarray.dtype == np.uint8):
core_tensor = bb.core.Tensor.from_numpy_uint8(ndarray, host_only)
elif (ndarray.dtype == np.uint16):
core_tensor = bb.core.Tensor.from_numpy_uint16(ndarray, host_only)
elif (ndarray.dtype == np.uint32):
core_tensor = bb.core.Tensor.from_numpy_uint32(ndarray, host_only)
elif (ndarray.dtype == np.uint64):
core_tensor = bb.core.Tensor.from_numpy_uint64(ndarray, host_only)
else:
core_tensor = None
raise TypeError('unsupported')
return Tensor(core_tensor=core_tensor)
def fill_zero(self):
self.get_core().fill_zero()
def fill(self, x):
self.get_core().fill(x)
def isnan(self):
core_tensor = self.get_core().isnan()
return Tensor(core_tensor=core_tensor)
def min(self):
core_tensor = self.get_core().min()
return Tensor(core_tensor=core_tensor)
def max(self):
core_tensor = self.get_core().max()
return Tensor(core_tensor=core_tensor)
def quantize(self, bits, scale=0.0, offset=0):
core_tensor = self.get_core().quantize(bits, scale, offset)
return Tensor(core_tensor=core_tensor)
def clamp_inplace(self, a, b):
self.get_core().clamp_inplace(a, b)
def sqrt_inplace(self):
self.get_core().sqrt_inplace()
def exp_inplace(self):
self.get_core().exp_inplace()
def sum(self):
core_tensor = self.get_core().sum()
return Tensor(core_tensor=core_tensor)
def mean(self):
core_tensor = self.get_core().mean()
return Tensor(core_tensor=core_tensor)
def var(self):
core_tensor = self.get_core().var()
return Tensor(core_tensor=core_tensor)
def std(self):
core_tensor = self.get_core().std()
return Tensor(core_tensor=core_tensor)
def __add__(self, x):
if (type(x) == Tensor):
core_tensor = (self.get_core() + x.get_core())
else:
core_tensor = (self.get_core() + float(x))
return Tensor(core_tensor=core_tensor)
def __sub__(self, x):
if (type(x) == Tensor):
core_tensor = (self.get_core() - x.get_core())
else:
core_tensor = (self.get_core() - float(x))
return Tensor(core_tensor=core_tensor)
def __mul__(self, x):
if (type(x) == Tensor):
core_tensor = (self.get_core() * x.get_core())
else:
core_tensor = (self.get_core() * float(x))
return Tensor(core_tensor=core_tensor)
def __truediv__(self, x):
if (type(x) == Tensor):
core_tensor = (self.get_core() / x.get_core())
else:
core_tensor = (self.get_core() / float(x))
return Tensor(core_tensor=core_tensor)
def __radd__(self, x):
if (type(x) == Tensor):
core_tensor = (x.get_core() + self.get_core())
else:
core_tensor = (float(x) + self.get_core())
return Tensor(core_tensor=core_tensor)
def __rsub__(self, x):
if (type(x) == Tensor):
core_tensor = (x.get_core() - self.get_core())
else:
core_tensor = (float(x) - self.get_core())
return Tensor(core_tensor=core_tensor)
def __rmul__(self, x):
if (type(x) == Tensor):
core_tensor = (x.get_core() * self.get_core())
else:
core_tensor = (float(x) * self.get_core())
return Tensor(core_tensor=core_tensor)
def __rtruediv__(self, x):
if (type(x) == Tensor):
core_tensor = (x.get_core() / self.get_core())
else:
core_tensor = (float(x) / self.get_core())
return Tensor(core_tensor=core_tensor)
def __iadd__(self, x):
core_tensor = self.get_core()
if (type(x) == Tensor):
core_tensor += x.get_core()
else:
core_tensor += float(x)
return self
def __isub__(self, x):
core_tensor = self.get_core()
if (type(x) == Tensor):
core_tensor -= x.get_core()
else:
core_tensor -= float(x)
return self
def __imul__(self, x):
core_tensor = self.get_core()
if (type(x) == Tensor):
core_tensor *= x.get_core()
else:
core_tensor *= float(x)
return self
def __itruediv__(self, x):
core_tensor = self.get_core()
if (type(x) == Tensor):
core_tensor /= x.get_core()
else:
core_tensor /= float(x)
return self |
def constrained_birkhoff_von_neumann_decomposition(X, constraint_structure):
S = {index for (index, x) in np.ndenumerate(X)}
feasibility_test(X, constraint_structure)
return solution_cleaner(X, iterate_constrained_birkhoff_von_neumann_iterator(X, graph_constructor(X, bihierarchy_test(constraint_structure), constraint_structure))) |
def test_arrow_union_dense_null():
a = pyarrow.UnionArray.from_dense(pyarrow.array([0, 1, 0, 0, 0, 1, 1], type=pyarrow.int8()), pyarrow.array([0, 0, 1, 2, 3, 1, 2], type=pyarrow.int32()), [pyarrow.array([0.0, 1.1, None, 3.3]), pyarrow.array([True, True, False])])
assert (to_list(ak._connect.pyarrow.handle_arrow(a)) == [0.0, True, 1.1, None, 3.3, True, False]) |
class FP16(nn.Module):
def __init__(self, module):
super(FP16, self).__init__()
self.module = BN_convert_float(module.half())
def forward(self, input, **kwargs):
return self.module(input.half(), **kwargs) |
def split_by_worker(urls):
import torch
urls = [url for url in urls]
assert isinstance(urls, list)
worker_info = torch.utils.data.get_worker_info()
if (worker_info is not None):
wid = worker_info.id
num_workers = worker_info.num_workers
if ((wid == 0) and (len(urls) < num_workers)):
warnings.warn(f'num_workers {num_workers} > num_shards {len(urls)}')
return urls[wid::num_workers]
else:
return urls |
def load_weights(weight_file):
if (weight_file == None):
return
try:
weights_dict = np.load(weight_file).item()
except:
weights_dict = np.load(weight_file, encoding='bytes').item()
return weights_dict |
def create_causal_relation_table(relations=None, height=500):
if ((relations is None) or (len(relations) == 0)):
data = [{'Node A': '', 'Relation': '', 'Node B': ''}]
else:
data = []
for (key, val) in relations.items():
(i, j) = key.split('<split>')
data.append({'Node A': i, 'Relation': val, 'Node B': j})
table = dash_table.DataTable(id='causal-relations', data=data, columns=[{'id': 'Node A', 'name': 'Node A'}, {'id': 'Relation', 'name': 'Relation'}, {'id': 'Node B', 'name': 'Node B'}], editable=False, sort_action='native', style_header_conditional=[{'textAlign': 'center'}], style_cell_conditional=[{'textAlign': 'center'}], style_header=dict(backgroundColor=TABLE_HEADER_COLOR), style_data=dict(backgroundColor=TABLE_DATA_COLOR), style_table={'overflowX': 'scroll', 'overflowY': 'scroll', 'height': height})
return table |
def _imgpath(img_dir, name):
img_path = os.path.join(img_dir, name)
if (not os.path.exists(img_path)):
return 'nofile'
return img_path |
class Trainer(RONet):
def __init__(self):
RONet.__init__(self, FLAGS)
def placeholder_inputs(self, batch_size, img_size, lab_size, channels):
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, img_size, img_size, channels))
labels_placeholder = tf.placeholder(tf.float32, shape=(batch_size, lab_size, lab_size, channels))
return (images_placeholder, labels_placeholder)
def fill_feed_dict(self, sess, img_batch, images_pl, labels_pl):
(images_feed, labels_feed) = sess.run(img_batch)
feed_dict = {images_pl: images_feed, labels_pl: labels_feed}
return feed_dict
def run_training(self):
with tf.Graph().as_default():
dataloader = Dataloader(FLAGS)
(train_dataset, valid_dataset) = dataloader.generate_dataset(FLAGS.input_data_dir)
train_batch = train_dataset.make_one_shot_iterator().get_next()
valid_batch = valid_dataset.make_one_shot_iterator().get_next()
(images_pl, labels_pl) = self.placeholder_inputs(FLAGS.train_batch_size, FLAGS.train_patch_size, (FLAGS.train_patch_size * self.upscale), self.out_channel)
(images_pl_valid, labels_pl_valid) = self.placeholder_inputs(FLAGS.valid_batch_size, FLAGS.valid_patch_size, (FLAGS.valid_patch_size * self.upscale), self.out_channel)
(err_decomp, output) = self.infer(images_pl, labels_pl, summary=True)
valid_output = self.test_infer(images_pl_valid)
global_step = tf.Variable(0, name='global_step', trainable=False)
learning_rates = [0.0001, 1e-05, 1e-06]
boundaries = [400000, 700000]
learning_rate = tf.train.piecewise_constant(global_step, boundaries=boundaries, values=learning_rates)
loss = self.loss(err_decomp, output, labels_pl, FLAGS.reg_para)
train_op = self.training(loss, learning_rate, global_step)
eval_perform = self.evaluation(output, labels_pl, 'Train')
eval_valid = self.evaluation(valid_output, labels_pl_valid, 'Test')
summary = tf.summary.merge_all()
all_vars = tf.global_variables()
vars_decomp = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'decomp')
var_save = ((vars_decomp + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'RORec')) + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'global_step'))
saver = tf.train.Saver(var_save, max_to_keep=2)
decomp_saver = tf.train.Saver(vars_decomp)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
total_paras = 0
trainable_vars = [var for var in tf.trainable_variables() if (var in var_save)]
for ele in trainable_vars:
total_paras += np.prod(np.array(ele.get_shape(), np.int32))
total_paras = (float(total_paras) / 1000000.0)
print(f'Total trainable parameters: {total_paras:0.2f}M')
sess.run(tf.variables_initializer(all_vars))
if (self.task in ['BiSR', 'ReSR']):
vgg_19_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'vgg_19')
vgg_19_saver = tf.train.Saver(var_list=vgg_19_vars)
vgg_19_saver.restore(sess, FLAGS.vgg_checkpoint)
if FLAGS.resume:
saver.restore(sess, FLAGS.RONet_checkpoint)
else:
decomp_saver.restore(sess, FLAGS.RODec_checkpoint)
(best_psnr, best_ssim) = (0.0, 0.0)
init_step = sess.run(global_step)
for step in range((init_step + 1), (FLAGS.max_steps + 1)):
start_time = time.time()
feed_dict = self.fill_feed_dict(sess, train_batch, images_pl, labels_pl)
if ((step == (init_step + 1)) or ((step % 1000) == 0)):
checkpoint_file = os.path.join(FLAGS.log_dir, 'model')
saver.save(sess, checkpoint_file)
valid_dict = self.fill_feed_dict(sess, valid_batch, images_pl_valid, labels_pl_valid)
feed_dict.update(valid_dict)
(train_psnr, train_ssim) = sess.run(eval_perform, feed_dict)
logging.info(('Train evaluation: PSNR=%0.04f SSIM=%0.04f' % (train_psnr, train_ssim)))
(new_psnr, new_ssim) = sess.run(eval_valid, feed_dict)
logging.info(('Valid evaluation: PSNR=%0.04f SSIM=%0.04f' % (new_psnr, new_ssim)))
is_new_best = (new_psnr > best_psnr)
best_psnr = max(best_psnr, new_psnr)
best_ssim = max(best_ssim, new_ssim)
if is_new_best:
checkpoint_file = os.path.join(FLAGS.log_dir, 'best_model')
saver.save(sess, checkpoint_file)
logging.info(('Best evaluation: PSNR=%0.04f SSIM=%0.04f' % (best_psnr, best_ssim)))
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
(_, loss_value) = sess.run([train_op, loss], feed_dict=feed_dict)
duration = (time.time() - start_time)
lr = sess.run(learning_rate)
if ((step % 100) == 0):
logging.info(('%s | step:[%7d/%7d] | loss=%0.04f | lr=%1.0e | lambda=%1.0e (%0.03f sec)' % (time.strftime('%Y-%m-%d %H:%M:%S'), step, FLAGS.max_steps, loss_value, lr, FLAGS.reg_para, duration))) |
def find_all_experiment_configuration(experiments_path: str, ext='.json'):
if experiments_path.endswith(ext):
(yield experiments_path)
for (root, _, files) in os.walk(experiments_path):
for file in files:
if file.endswith(ext):
(yield os.path.join(root, file)) |
def run_experiment_mem(input_config):
experiments = []
experiments.append(analyzer_experiment(instances=1, name='mem-single', experiment_type='memory', input_config=input_config, port=8081))
experiments.append(analyzer_experiment(instances=5, name='mem-multiple', experiment_type='memory', input_config=input_config, port=8081))
return experiments |
def main_loop():
for i_iter in range(args.max_iter_num):
discrim_net.to(torch.device('cpu'))
(batch, log) = agent.collect_samples(args.min_batch_size)
discrim_net.to(device)
t0 = time.time()
update_params(batch, i_iter)
t1 = time.time()
if ((i_iter % args.log_interval) == 0):
print('{}\tT_sample {:.4f}\tT_update {:.4f}\texpert_R_avg {:.2f}\tR_avg {:.2f}'.format(i_iter, log['sample_time'], (t1 - t0), log['avg_c_reward'], log['avg_reward']))
if ((args.save_model_interval > 0) and (((i_iter + 1) % args.save_model_interval) == 0)):
to_device(torch.device('cpu'), policy_net, value_net, discrim_net)
pickle.dump((policy_net, value_net, discrim_net), open(os.path.join(assets_dir(), 'learned_models/{}_gail.p'.format(args.env_name)), 'wb'))
to_device(device, policy_net, value_net, discrim_net)
'clean up gpu memory'
torch.cuda.empty_cache() |
class GcdDomains(Category_singleton):
def super_categories(self):
return [IntegralDomains()]
def additional_structure(self):
return None
class ParentMethods():
pass
class ElementMethods():
pass |
def load_reference(path_to_reference):
with open(path_to_reference, 'r') as f:
qids_to_relevant_documentids = load_reference_from_stream(f)
return qids_to_relevant_documentids |
class LabelledBinaryTrees(LabelledOrderedTrees):
def _repr_(self):
return 'Labelled binary trees'
def _an_element_(self):
LT = self._element_constructor_
t = LT([], label=3)
t1 = LT([t, t], label=42)
t2 = LT([[], []], label=5)
return LT([t1, t2], label='toto')
def unlabelled_trees(self):
return BinaryTrees_all()
def labelled_trees(self):
return self
Element = LabelledBinaryTree |
class DummyEnv():
def __init__(self, ep_len=2, reward_mag=1):
self.ep_len = ep_len
self.reward_mag = reward_mag
self.reset()
def step(self, action):
self.step_num += 1
if (action == 0):
reward = self.reward_mag
else:
reward = (- self.reward_mag)
return (self.step_num, reward, (self.step_num == self.ep_len), {})
def reset(self):
self.step_num = 0
return self.step_num |
def create_profile(profiler):
profiler.disable()
ps = pstats.Stats(profiler).sort_stats('cumulative')
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
results = {}
for item in ['ifftn', 'ifft', 'irfftn', 'irfft2', 'irfft', 'rfftn', 'rfft2', 'rfft', 'fftn', 'fft', 'dct', 'ifst', 'ifct', 'fst', 'fct', 'Alltoall', 'Alltoallw', 'Sendrecv_replace', 'rollaxis', 'copy_to_padded', 'copy_from_padded', 'RK4', 'ForwardEuler', 'AB2', 'adaptiveRK', 'nonlinear', 'add_linear', 'cross1', 'cross2', 'compute_curl', 'Cross', 'project', 'Scatter', 'ComputeRHS', 'solve_linear', 'Conv']:
for (key, val) in ps.stats.items():
if ((item is key[2]) or (("method '%s'" % item) in key[2]) or (('.%s' % item) in key[2])):
results[item] = (comm.reduce(val[2], op=MPI.MIN, root=0), comm.reduce(val[2], op=MPI.MAX, root=0), comm.reduce(val[3], op=MPI.MIN, root=0), comm.reduce(val[3], op=MPI.MAX, root=0))
del ps.stats[key]
break
if (rank == 0):
print('Printing profiling for total min/max cumulative min/max:')
print(' {0:14s}{1:11s}{2:11s}{3:11s}{4:11s}'.format('Method', 'total min', 'total max', 'cum min', 'cum max'))
pprint.pprint(['{0:12s} {1:2.4e} {2:2.4e} {3:2.4e} {4:2.4e}'.format(k, *v) for (k, v) in results.items()])
return results |
def register_Ns3EdcaParameterSetChecker_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::EdcaParameterSetChecker const &', 'arg0')])
return |
class MMOE(BaseModel):
def __init__(self, dnn_feature_columns, num_experts=3, expert_dnn_hidden_units=(256, 128), gate_dnn_hidden_units=(64,), tower_dnn_hidden_units=(64,), l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task_types=('binary', 'binary'), task_names=('ctr', 'ctcvr'), device='cpu', gpus=None):
super(MMOE, self).__init__(linear_feature_columns=[], dnn_feature_columns=dnn_feature_columns, l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, device=device, gpus=gpus)
self.num_tasks = len(task_names)
if (self.num_tasks <= 1):
raise ValueError('num_tasks must be greater than 1')
if (num_experts <= 1):
raise ValueError('num_experts must be greater than 1')
if (len(dnn_feature_columns) == 0):
raise ValueError('dnn_feature_columns is null!')
if (len(task_types) != self.num_tasks):
raise ValueError('num_tasks must be equal to the length of task_types')
for task_type in task_types:
if (task_type not in ['binary', 'regression']):
raise ValueError('task must be binary or regression, {} is illegal'.format(task_type))
self.num_experts = num_experts
self.task_names = task_names
self.input_dim = self.compute_input_dim(dnn_feature_columns)
self.expert_dnn_hidden_units = expert_dnn_hidden_units
self.gate_dnn_hidden_units = gate_dnn_hidden_units
self.tower_dnn_hidden_units = tower_dnn_hidden_units
self.expert_dnn = nn.ModuleList([DNN(self.input_dim, expert_dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device) for _ in range(self.num_experts)])
if (len(gate_dnn_hidden_units) > 0):
self.gate_dnn = nn.ModuleList([DNN(self.input_dim, gate_dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device) for _ in range(self.num_tasks)])
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.gate_dnn.named_parameters()), l2=l2_reg_dnn)
self.gate_dnn_final_layer = nn.ModuleList([nn.Linear((gate_dnn_hidden_units[(- 1)] if (len(gate_dnn_hidden_units) > 0) else self.input_dim), self.num_experts, bias=False) for _ in range(self.num_tasks)])
if (len(tower_dnn_hidden_units) > 0):
self.tower_dnn = nn.ModuleList([DNN(expert_dnn_hidden_units[(- 1)], tower_dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device) for _ in range(self.num_tasks)])
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.tower_dnn.named_parameters()), l2=l2_reg_dnn)
self.tower_dnn_final_layer = nn.ModuleList([nn.Linear((tower_dnn_hidden_units[(- 1)] if (len(tower_dnn_hidden_units) > 0) else expert_dnn_hidden_units[(- 1)]), 1, bias=False) for _ in range(self.num_tasks)])
self.out = nn.ModuleList([PredictionLayer(task) for task in task_types])
regularization_modules = [self.expert_dnn, self.gate_dnn_final_layer, self.tower_dnn_final_layer]
for module in regularization_modules:
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), module.named_parameters()), l2=l2_reg_dnn)
self.to(device)
def forward(self, X):
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
expert_outs = []
for i in range(self.num_experts):
expert_out = self.expert_dnn[i](dnn_input)
expert_outs.append(expert_out)
expert_outs = torch.stack(expert_outs, 1)
mmoe_outs = []
for i in range(self.num_tasks):
if (len(self.gate_dnn_hidden_units) > 0):
gate_dnn_out = self.gate_dnn[i](dnn_input)
gate_dnn_out = self.gate_dnn_final_layer[i](gate_dnn_out)
else:
gate_dnn_out = self.gate_dnn_final_layer[i](dnn_input)
gate_mul_expert = torch.matmul(gate_dnn_out.softmax(1).unsqueeze(1), expert_outs)
mmoe_outs.append(gate_mul_expert.squeeze())
task_outs = []
for i in range(self.num_tasks):
if (len(self.tower_dnn_hidden_units) > 0):
tower_dnn_out = self.tower_dnn[i](mmoe_outs[i])
tower_dnn_logit = self.tower_dnn_final_layer[i](tower_dnn_out)
else:
tower_dnn_logit = self.tower_dnn_final_layer[i](mmoe_outs[i])
output = self.out[i](tower_dnn_logit)
task_outs.append(output)
task_outs = torch.cat(task_outs, (- 1))
return task_outs |
class FSymBases(Category_realization_of_parent):
def super_categories(self):
R = self.base().base_ring()
return [self.base().Realizations(), HopfAlgebras(R).Graded().Realizations(), HopfAlgebras(R).Graded().WithBasis().Graded().Connected()]
class ParentMethods():
def _repr_(self):
return '{} in the {} basis'.format(self.realization_of(), self._realization_name())
def __getitem__(self, key):
try:
return self.monomial(self._indices(list(key)))
except (TypeError, ValueError):
return self.monomial(self._indices([key]))
def basis(self, degree=None):
from sage.sets.family import Family
if (degree is None):
return Family(self._indices, self.monomial)
else:
return Family(StandardTableaux(degree), self.monomial)
_method
def one_basis(self):
return self._indices([])
def duality_pairing(self, x, y):
y = self.dual_basis()(y)
return self.base_ring().sum(((coeff * y[t]) for (t, coeff) in x))
def duality_pairing_matrix(self, basis, degree):
from sage.matrix.constructor import matrix
keys = self.basis(degree=degree).keys()
return matrix(self.base_ring(), [[self.duality_pairing(self[s], basis[t]) for t in keys] for s in keys])
def degree_on_basis(self, t):
return t.size()
class ElementMethods():
def duality_pairing(self, other):
return self.parent().duality_pairing(self, other) |
def other_headings(cells):
previous_valid_heading_level = 1
first_invalid_heading_level = None
errors = []
for cell in cells[1:]:
if (not isinstance(cell, MarkdownCell)):
continue
for (elem, entering) in cell.ast.walker():
if ((not is_heading(elem)) or (not entering)):
continue
if is_title(elem):
errors.append(message_with_line(cell, 'Found another title (like `# ...`) in internal cell. Later sections should use a high level heading (like `## ...` or `### ...`)', sourcepos=elem.sourcepos))
if (elem.level > (previous_valid_heading_level + 1)):
previous = ('#' * previous_valid_heading_level)
if (first_invalid_heading_level is None):
first_invalid_heading_level = elem.level
levels_from_first_invalid = (elem.level - first_invalid_heading_level)
max_suggestion_level = ((previous_valid_heading_level + levels_from_first_invalid) + 1)
suggestions = ', '.join((f"`{('#' * i)} ...`" for i in range(2, (max_suggestion_level + 1))))
errors.append(message_with_line(cell, f'Found a heading H{elem.level} that skips level(s) from most recent valid heading (H{previous_valid_heading_level} `{previous} ...`). Consider using: {suggestions}', sourcepos=elem.sourcepos))
else:
previous_valid_heading_level = elem.level
first_invalid_heading_level = None
if errors:
raise FormattingError(errors) |
class TestDataset(Dataset):
def __init__(self, triples, all_true_triples, nentity, rel_mask=None):
self.len = len(triples)
self.triple_set = all_true_triples
self.triples = triples
self.nentity = nentity
self.rel_mask = rel_mask
self.hr2t_all = ddict(set)
for (h, r, t) in all_true_triples:
self.hr2t_all[(h, r)].add(t)
def __len__(self):
return self.len
def collate_fn(data):
triple = torch.stack([_[0] for _ in data], dim=0)
trp_label = torch.stack([_[1] for _ in data], dim=0)
return (triple, trp_label)
def __getitem__(self, idx):
(head, relation, tail) = self.triples[idx]
label = self.hr2t_all[(head, relation)]
trp_label = self.get_label(label)
triple = torch.LongTensor((head, relation, tail))
return (triple, trp_label)
def get_label(self, label):
y = np.zeros([self.nentity], dtype=np.float32)
for e2 in label:
y[e2] = 1.0
return torch.FloatTensor(y) |
def run(args):
db = {'circuit_rtt': [], 'client_goodput': [], 'client_goodput_5MiB': [], 'circuit_build_times': [], 'download_times': {}, 'daily_counts': {}, 'relay_goodput': {}}
if (args.bandwidth_data_path is not None):
logging.info(f"Parsing bandwidth data stored in '{args.bandwidth_data_path}'")
db['relay_goodput'] = __parse_bandwidth_data(args.bandwidth_data_path)
logging.info('Finished parsing bandwidth data')
if (args.onionperf_data_path is not None):
logging.info(f"Extracting onionperf data stored in '{args.onionperf_data_path}'")
__extract_onionperf_data(args, db)
logging.info('Finished extracting onionperf data')
days = []
days.extend(db['daily_counts'].keys())
days.extend(db['relay_goodput'].keys())
days.sort()
out_path = f'{args.prefix}/tor_metrics_{days[0]}--{days[(- 1)]}.json'
logging.info(f'Saving parsed Tor metrics data to {out_path}')
dump_json_data(db, out_path, compress=False) |
def visualize_result(data, pred, pred_prob, args):
(img, info) = data
img_name = info.split('/')[(- 1)]
water_mask = (pred == 21)
sea_mask = (pred == 26)
river_mask = (pred == 60)
pool_mask = (pred == 109)
fall_mask = (pred == 113)
lake_mask = (pred == 128)
water_mask = (((((water_mask | sea_mask) | river_mask) | pool_mask) | fall_mask) | lake_mask).astype(int)
if (args.mask_type == 'smooth'):
water_mask = (water_mask.astype(float) * pred_prob)
water_mask = (water_mask * 255.0)
cv2.imwrite('{}/water/{}.png'.format(args.result, img_name.split('.')[0]), water_mask)
sky_mask = (pred == 2).astype(int)
if (args.mask_type == 'smooth'):
sky_mask = (sky_mask.astype(float) * pred_prob)
sky_mask = (sky_mask * 255.0)
cv2.imwrite('{}/sky/{}.png'.format(args.result, img_name.split('.')[0]), sky_mask)
grass_mask = (pred == 9).astype(int)
if (args.mask_type == 'smooth'):
grass_mask = (grass_mask.astype(float) * pred_prob)
grass_mask = (grass_mask * 255.0)
cv2.imwrite('{}/grass/{}.png'.format(args.result, img_name.split('.')[0]), grass_mask)
person_mask = (pred == 12).astype(int)
if (args.mask_type == 'smooth'):
person_mask = (person_mask.astype(float) * pred_prob)
person_mask = (person_mask * 255.0)
cv2.imwrite('{}/person/{}.png'.format(args.result, img_name.split('.')[0]), person_mask) |
def exec_cmds(cmds):
cmd_file = 'z3_tmp.cmd'
f = open(cmd_file, 'w')
for cmd in cmds:
f.write(cmd)
f.write('\n')
f.close()
res = 0
try:
res = subprocess.call(cmd_file, shell=True)
except:
res = 1
try:
os.erase(cmd_file)
except:
pass
return res |
def AnoaTime(direction, r_in, r_out, extra=None):
del extra
if (direction == 0):
return r_out
if (direction == 1):
return r_in |
def test_read_snippets_two_columns(tmp_path):
filename = (tmp_path / 'foo.csv')
with open(filename, 'w', encoding='utf-8') as fout:
fout.write('FOO\tThis is a test\thappy\tfoo\n')
fout.write('FOO\tThis is a second sentence\tsad\tbar\n')
fout.write('FOO\tThis is a third sentence\tsad\tfoo\n')
nlp = stanza.Pipeline('en', dir=TEST_MODELS_DIR, processors='tokenize', download_method=None)
mapping = {('happy', 'foo'): 0, ('sad', 'bar'): 1, ('sad', 'foo'): 2}
snippets = process_utils.read_snippets(filename, (2, 3), 1, 'en', mapping, nlp=nlp)
assert (len(snippets) == 3)
assert (snippets == [SentimentDatum(sentiment=0, text=['This', 'is', 'a', 'test']), SentimentDatum(sentiment=1, text=['This', 'is', 'a', 'second', 'sentence']), SentimentDatum(sentiment=2, text=['This', 'is', 'a', 'third', 'sentence'])]) |
def get_spanish_datasets() -> List[Tuple[(str, Optional[str])]]:
return ([(name, None) for name in ['head_qa', 'sab']] + [('amazon_reviews_multi', 'es')]) |
class MinSymbolic(MinMax_base):
def __init__(self):
BuiltinFunction.__init__(self, 'min', nargs=0, latex_name='\\min', conversions=dict(sympy='Min'))
def _eval_(self, *args):
return self.eval_helper(min_symbolic, builtin_min, float('inf'), args)
def _evalf_(self, *args, **kwds):
return min_symbolic(args) |
def parse_vocab(filename):
if filename.endswith('.gz'):
import gzip
raw = gzip.open(filename, 'r').read().decode('utf8')
else:
raw = open(filename, 'r').read()
if raw.startswith('{'):
py_vocab = eval(raw)
assert isinstance(py_vocab, dict)
labels = {idx: label for (label, idx) in sorted(py_vocab.items())}
(min_label, max_label, num_labels) = (min(labels), max(labels), len(labels))
assert (0 == min_label)
if ((num_labels - 1) < max_label):
print(('Vocab error: not all indices used? max label: %i' % max_label))
print(('unused labels: %r' % ([i for i in range((max_label + 1)) if (i not in labels)],)))
assert ((num_labels - 1) == max_label)
zero_sym = labels[0]
assert isinstance(zero_sym, str)
return [label for (idx, label) in sorted(labels.items())]
if raw.startswith('<?xml'):
labels = []
from io import StringIO
raw_stream = StringIO(raw)
context = iter(ElementTree.iterparse(raw_stream, events=('start', 'end')))
(_, root) = next(context)
for (event, elem) in context:
if ((event == 'end') and (elem.tag == 'lemma')):
for orth_elem in elem.findall('orth'):
orth = (orth_elem.text or '').strip()
labels.append(orth)
root.clear()
return labels
return raw.splitlines() |
def extend_cfg(cfg):
from yacs.config import CfgNode as CN
cfg.TRAINER.OURS = CN()
cfg.TRAINER.OURS.N_CTX = 10
cfg.TRAINER.OURS.CSC = False
cfg.TRAINER.OURS.CTX_INIT = ''
cfg.TRAINER.OURS.WEIGHT_U = 0.1 |
class FlaxRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def FNN(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 128, 64), l2_reg_embedding=1e-05, l2_reg_linear=1e-05, l2_reg_dnn=0, seed=1024, dnn_dropout=0, dnn_activation='relu', task='binary'):
features = build_input_features((linear_feature_columns + dnn_feature_columns))
inputs_list = list(features.values())
linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear', l2_reg=l2_reg_linear)
(sparse_embedding_list, dense_value_list) = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, seed)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed=seed)(dnn_input)
dnn_logit = Dense(1, use_bias=False)(deep_out)
final_logit = add_func([dnn_logit, linear_logit])
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model |
def _batch_thread(index_queue: queue.Queue[Optional[Tuple[(int, int)]]], batch_queue: queue.Queue[Optional[Tuple[(Any, int)]]], data_path: str, batch_info_path: str, token_dropout: float, split: str) -> None:
thread_loader = BatchLoader(data_path, batch_info_path, token_dropout=token_dropout)
while True:
next_item = index_queue.get()
if (next_item is None):
batch_queue.put(None)
break
(batch_index, step) = next_item
batch = thread_loader.get_batch(split, batch_index)
if (batch['num_indices'] == 0):
batch_queue.put((None, step))
else:
batch = jax.tree_map((lambda a: jax.device_put(a, device=jax.devices('cpu')[0])), batch)
batch_queue.put((batch, step))
batch_queue.put(None) |
def summarize_jsons(test_list: TestList, interested_folders: List[str], coverage_only: List[str], platform: TestPlatform) -> None:
start_time = time.time()
if (detect_compiler_type(platform) == CompilerType.GCC):
html_oriented_report()
else:
parse_jsons(test_list, interested_folders, platform)
update_set()
line_oriented_report(test_list, tests_type, interested_folders, coverage_only, covered_lines, uncovered_lines)
file_oriented_report(test_list, tests_type, interested_folders, coverage_only, covered_lines, uncovered_lines)
print_time('summary jsons take time: ', start_time) |
def firmmax_sample(logits, temperature, dim=1):
if (temperature == 0):
return F.softmax(logits, dim=dim)
y = (logits + (sample_gumbel(logits.shape, tens_type=type(logits.data)) / temperature))
return F.softmax(y, dim=dim) |
(5, 4, FOptsDir.DOWNLINK, fOptsDownlink)
class RXParamSetupReq(FOpt):
_MASK_RX1DROFFSET = 112
_MASK_RX2DATARATE = 15
def __init__(self, rx1drOffset=None, rx2dataRate=None, freq=0, **kwargs):
super().__init__(**kwargs)
if (rx1drOffset is not None):
self.rx1drOffset = rx1drOffset
if (rx2drRate is not None):
self.rx2dataRate = rx2dataRate
self.freq = freq
def freq(self):
return bytesToFreq(self._raw[1:4])
def freq(self, freq):
self._raw[1:4] = freqToBytes(freq)
def rx1drOffset(self):
return self._region.binToRx1DrOffset(getWithMask(self._raw[0], self._MASK_RX1DROFFSET))
.setter
def rx1drOffset(self, rx1drOffset):
self._raw[0] = setWithMask(self._raw[0], self._region.rx1DrOffsetToBin(rx1drOffset), self._MASK_RX1DROFFSET)
def rx2dataRate(self):
return self._region.binToDataRate(getWithMask(self._raw[0], self._MASK_RX2DATARATE))
.setter
def rx2dataRate(self, rx2dataRate):
self._raw[0] = setWithMask(self._raw[0], self._region.dataRateToBin(rx2dataRate), self._MASK_RX2DATARATE) |
def test_nested_exis_0():
arrays = {'x': np.arange(4), 'y': ['this', 'that', 'foo', 'bar!']}
result = ak.cartesian(arrays, nested=True, axis=0)
assert (result.to_list() == [[{'x': 0, 'y': 'this'}, {'x': 0, 'y': 'that'}, {'x': 0, 'y': 'foo'}, {'x': 0, 'y': 'bar!'}], [{'x': 1, 'y': 'this'}, {'x': 1, 'y': 'that'}, {'x': 1, 'y': 'foo'}, {'x': 1, 'y': 'bar!'}], [{'x': 2, 'y': 'this'}, {'x': 2, 'y': 'that'}, {'x': 2, 'y': 'foo'}, {'x': 2, 'y': 'bar!'}], [{'x': 3, 'y': 'this'}, {'x': 3, 'y': 'that'}, {'x': 3, 'y': 'foo'}, {'x': 3, 'y': 'bar!'}]])
result = ak.cartesian(arrays, nested=['x'], axis=0)
assert (result.to_list() == [[{'x': 0, 'y': 'this'}, {'x': 0, 'y': 'that'}, {'x': 0, 'y': 'foo'}, {'x': 0, 'y': 'bar!'}], [{'x': 1, 'y': 'this'}, {'x': 1, 'y': 'that'}, {'x': 1, 'y': 'foo'}, {'x': 1, 'y': 'bar!'}], [{'x': 2, 'y': 'this'}, {'x': 2, 'y': 'that'}, {'x': 2, 'y': 'foo'}, {'x': 2, 'y': 'bar!'}], [{'x': 3, 'y': 'this'}, {'x': 3, 'y': 'that'}, {'x': 3, 'y': 'foo'}, {'x': 3, 'y': 'bar!'}]]) |
def plot_lightcurves_from_hdf5(settings, SNID_idxs):
with h5py.File(settings.hdf5_file_name, 'r') as hf:
features = hf['features'][:].astype(str)
n_features = len(features)
plt.figure(figsize=(20, 10))
gs = gridspec.GridSpec(4, 4, hspace=0.4)
for (idx, SNID_idx) in enumerate(SNID_idxs):
ax = plt.subplot(gs[idx])
SNID = hf['SNID'][SNID_idx]
str(hf['PEAKMJD'][SNID_idx])
PEAKMJDNORM = hf['PEAKMJDNORM'][SNID_idx]
typ = hf[settings.sntype_var][SNID_idx]
typ = settings.sntypes[str(typ)]
data = hf['data'][SNID_idx].reshape((- 1), n_features)
df = pd.DataFrame(data, columns=features)
non_filter_columns = ['FLUXCAL_g', 'FLUXCAL_i', 'FLUXCAL_r', 'FLUXCAL_z', 'FLUXCALERR_g', 'FLUXCALERR_i', 'FLUXCALERR_r', 'FLUXCALERR_z', 'delta_time', 'HOSTGAL_PHOTOZ', 'HOSTGAL_PHOTOZ_ERR', 'HOSTGAL_SPECZ', 'HOSTGAL_SPECZ_ERR']
filter_columns = [c for c in df.columns.values if (c not in non_filter_columns)]
present_filters = df[filter_columns].transpose().idxmax().values
list_present_filters = [set(f) for f in present_filters]
max_y = (- float('Inf'))
min_y = float('Inf')
for FLT in settings.list_filters:
idxs = np.array([i for i in range(len(df)) if (FLT in list_present_filters[i])])
if (len(idxs) == 0):
continue
arr_flux = df[f'FLUXCAL_{FLT}'].values[idxs]
arr_fluxerr = df[f'FLUXCALERR_{FLT}'].values[idxs]
arr_time = df['delta_time'].cumsum().values[idxs]
ax.errorbar(arr_time, arr_flux, yerr=arr_fluxerr, label=f'Filter {FLT}')
if (np.max(arr_flux) > max_y):
max_y = np.max(arr_flux)
if (np.min(arr_flux) < min_y):
min_y = np.min(arr_flux)
ax.plot([PEAKMJDNORM, PEAKMJDNORM], [min_y, max_y], color='k', linestyle='--')
ax.set_title(f"{SNID.decode('utf8')} -- {typ}", fontsize=18)
ax.set_aspect('auto')
ax.legend(loc='best')
plt.savefig((Path(settings.explore_dir) / 'sample_lightcurves_from_hdf5.png')) |
def dicenet_seg(args, classes):
weights = args.weights
model = DiCENetSegmentation(args, classes=classes)
if weights:
import os
if os.path.isfile(weights):
num_gpus = torch.cuda.device_count()
device = ('cuda' if (num_gpus >= 1) else 'cpu')
pretrained_dict = torch.load(weights, map_location=torch.device(device))
else:
print_error_message('Weight file does not exist at {}. Please check. Exiting!!'.format(weights))
exit()
print_log_message('Loading pretrained basenet model weights')
basenet_dict = model.base_net.state_dict()
model_dict = model.state_dict()
overlap_dict = {k: v for (k, v) in pretrained_dict.items() if (k in basenet_dict)}
if (len(overlap_dict) == 0):
print_error_message('No overlaping weights between model file and pretrained weight file. Please check')
print_log_message('{:.2f} % of weights copied from basenet to segnet'.format((((len(overlap_dict) * 1.0) / len(model_dict)) * 100)))
basenet_dict.update(overlap_dict)
model.base_net.load_state_dict(basenet_dict)
print_log_message('Pretrained basenet model loaded!!')
else:
print_warning_message('Training from scratch!!')
return model |
.usefixtures('num_cpus', 'io_type')
class BaseTest():
qbt = None
(autouse=True)
def set_tmpdir(self, request):
setattr(self, 'tmpdir', request.getfixturevalue('tmpdir'))
def teardown_class(cls):
plt.close('all')
def eigenvals(self, io_type, evals_reference):
evals_count = len(evals_reference)
evals_tst = self.qbt.eigenvals(evals_count=evals_count, filename=((self.tmpdir + 'test.') + io_type))
assert np.allclose(evals_reference, evals_tst)
def eigenvecs(self, io_type, evecs_reference):
evals_count = evecs_reference.shape[1]
(_, evecs_tst) = self.qbt.eigensys(evals_count=evals_count, filename=((self.tmpdir + 'test.') + io_type))
assert np.allclose(np.abs(evecs_reference), np.abs(evecs_tst))
def plot_evals_vs_paramvals(self, num_cpus, param_name, param_list):
self.qbt.plot_evals_vs_paramvals(param_name, param_list, evals_count=5, subtract_ground=True, filename=(self.tmpdir + 'test'), num_cpus=num_cpus)
def get_spectrum_vs_paramvals(self, num_cpus, io_type, param_name, param_list, evals_reference, evecs_reference):
evals_count = len(evals_reference[0])
calculated_spectrum = self.qbt.get_spectrum_vs_paramvals(param_name, param_list, evals_count=evals_count, subtract_ground=False, get_eigenstates=True, num_cpus=num_cpus)
calculated_spectrum.filewrite(filename=((self.tmpdir + 'test.') + io_type))
assert np.allclose(evals_reference, calculated_spectrum.energy_table)
assert np.allclose(np.abs(evecs_reference), np.abs(calculated_spectrum.state_table), atol=1e-07)
def matrixelement_table(self, io_type, op, matelem_reference):
evals_count = len(matelem_reference)
calculated_matrix = self.qbt.matrixelement_table(op, evecs=None, evals_count=evals_count, filename=((self.tmpdir + 'test.') + io_type))
assert np.allclose(np.abs(matelem_reference), np.abs(calculated_matrix))
def plot_matrixelements(self, op, evals_count=7):
self.qbt.plot_matrixelements(op, evecs=None, evals_count=evals_count, show_numbers=True)
def print_matrixelements(self, op):
mat_data = self.qbt.matrixelement_table(op)
plot.matrix2d(abs(mat_data))
def plot_matelem_vs_paramvals(self, num_cpus, op, param_name, param_list, select_elems):
self.qbt.plot_matelem_vs_paramvals(op, param_name, param_list, select_elems=select_elems, filename=(self.tmpdir + 'test'), num_cpus=num_cpus)
def test_file_io(self):
self.qbt = self.qbt_type.create()
self.qbt.filewrite((self.tmpdir + 'test.h5'))
qbt_copy = scq.read((self.tmpdir + 'test.h5'))
assert (self.qbt == qbt_copy) |
class DownBlock3D(nn.Module):
def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor=1.0, add_downsample=True, downsample_padding=1):
super().__init__()
resnets = []
for i in range(num_layers):
in_channels = (in_channels if (i == 0) else out_channels)
resnets.append(ResnetBlock3D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
self.resnets = nn.ModuleList(resnets)
if add_downsample:
self.downsamplers = nn.ModuleList([Downsample3D(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')])
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None):
output_states = ()
for resnet in self.resnets:
if (self.training and self.gradient_checkpointing):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
else:
hidden_states = resnet(hidden_states, temb)
output_states += (hidden_states,)
if (self.downsamplers is not None):
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return (hidden_states, output_states) |
def coinfo(X, ks):
info = 0.0
S = len(X)
for T in range(1, (S + 1)):
sgn = ((- 1) ** T)
info += (sgn * numpy.sum(from_data(X, ks=ks, r=T)))
return (- info) |
_task('masked_lm')
class MaskedLMTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner')
parser.add_argument('--sample-break-mode', default='complete', choices=['none', 'complete', 'complete_doc', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. "complete_doc" is similar but respects doc boundaries. If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset')
parser.add_argument('--mask-prob', default=0.15, type=float, help='probability of replacing a token with mask')
parser.add_argument('--leave-unmasked-prob', default=0.1, type=float, help='probability that a masked token is unmasked')
parser.add_argument('--random-token-prob', default=0.1, type=float, help='probability of replacing a token with a random token')
parser.add_argument('--freq-weighted-replacement', default=False, action='store_true', help='sample random replacement words based on word frequencies')
parser.add_argument('--mask-whole-words', default=False, action='store_true', help='mask whole words; you may also want to set --bpe')
parser.add_argument('--mask-multiple-length', default=1, type=int, help='repeat the mask indices multiple times')
parser.add_argument('--mask-stdev', default=0.0, type=float, help='stdev of the mask length')
parser.add_argument('--shorten-method', default='none', choices=['none', 'truncate', 'random_crop'], help='if not none, shorten sequences that exceed --tokens-per-sample')
parser.add_argument('--shorten-data-split-list', default='', help='comma-separated list of dataset splits to apply shortening to, e.g., "train,valid" (default: all dataset splits)')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
self.mask_idx = dictionary.add_symbol('<mask>')
def setup_task(cls, args, **kwargs):
paths = utils.split_paths(args.data)
assert (len(paths) > 0)
dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt'))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
paths = utils.split_paths(self.args.data)
assert (len(paths) > 0)
data_path = paths[((epoch - 1) % len(paths))]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine)
if (dataset is None):
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
dataset = maybe_shorten_dataset(dataset, split, self.args.shorten_data_split_list, self.args.shorten_method, self.args.tokens_per_sample, self.args.seed)
dataset = TokenBlockDataset(dataset, dataset.sizes, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode=self.args.sample_break_mode)
logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path))
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
mask_whole_words = (get_whole_word_mask(self.args, self.source_dictionary) if self.args.mask_whole_words else None)
(src_dataset, tgt_dataset) = MaskTokensDataset.apply_mask(dataset, self.source_dictionary, pad_idx=self.source_dictionary.pad(), mask_idx=self.mask_idx, seed=self.args.seed, mask_prob=self.args.mask_prob, leave_unmasked_prob=self.args.leave_unmasked_prob, random_token_prob=self.args.random_token_prob, freq_weighted_replacement=self.args.freq_weighted_replacement, mask_whole_words=mask_whole_words, mask_multiple_length=self.args.mask_multiple_length, mask_stdev=self.args.mask_stdev)
with data_utils.numpy_seed((self.args.seed + epoch)):
shuffle = np.random.permutation(len(src_dataset))
self.datasets[split] = SortDataset(NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': RightPadDataset(src_dataset, pad_idx=self.source_dictionary.pad()), 'src_lengths': NumelDataset(src_dataset, reduce=False)}, 'target': RightPadDataset(tgt_dataset, pad_idx=self.source_dictionary.pad()), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_dataset, reduce=True)}, sizes=[src_dataset.sizes]), sort_order=[shuffle, src_dataset.sizes])
def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
src_dataset = RightPadDataset(TokenBlockDataset(src_tokens, src_lengths, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode='eos'), pad_idx=self.source_dictionary.pad())
src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
src_dataset = NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': src_dataset, 'src_lengths': NumelDataset(src_dataset, reduce=False)}}, sizes=src_lengths)
if sort:
src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
return src_dataset
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary |
class Fpr(Critic):
def __init__(self, recall_level=0.95):
super().__init__()
self.recall_level = recall_level
def get_name(self):
return (('FPR(' + str((self.recall_level * 100))) + ')')
def stable_cumsum(self, arr, rtol=1e-05, atol=1e-08):
out = np.cumsum(arr, dtype=np.float64)
expected = np.sum(arr, dtype=np.float64)
if (not np.allclose(out[(- 1)], expected, rtol=rtol, atol=atol)):
raise RuntimeError('cumsum was found to be unstable: its last element does not correspond to sum')
return out
def fpr_and_fdr_at_recall(self, y_true, y_score, recall_level, pos_label=None):
classes = np.unique(y_true)
if ((pos_label is None) and (not (np.array_equal(classes, [0, 1]) or np.array_equal(classes, [(- 1), 1]) or np.array_equal(classes, [0]) or np.array_equal(classes, [(- 1)]) or np.array_equal(classes, [1])))):
raise ValueError('Data is not binary and pos_label is not specified')
elif (pos_label is None):
pos_label = 1.0
y_true = (y_true == pos_label)
desc_score_indices = np.argsort(y_score, kind='mergesort')[::(- 1)]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[(distinct_value_indices, (y_true.size - 1))]
tps = self.stable_cumsum(y_true)[threshold_idxs]
fps = ((1 + threshold_idxs) - tps)
thresholds = y_score[threshold_idxs]
recall = (tps / tps[(- 1)])
last_ind = tps.searchsorted(tps[(- 1)])
sl = slice(last_ind, None, (- 1))
(recall, fps, tps, thresholds) = (np.r_[(recall[sl], 1)], np.r_[(fps[sl], 0)], np.r_[(tps[sl], 0)], thresholds[sl])
cutoff = np.argmin(np.abs((recall - recall_level)))
return (fps[cutoff] / np.sum(np.logical_not(y_true)))
def evaluate(self, inlier_scores, outlier_scores):
all_scores = (inlier_scores + outlier_scores)
all_labels = ([1 for _ in range(len(inlier_scores))] + [0 for _ in range(len(outlier_scores))])
return self.fpr_and_fdr_at_recall(np.array(all_labels), np.array(all_scores), self.recall_level) |
def setup_environment(dry_run, volume_start, volume_stop, volume_size, volume_path, max_ram_size, output_patch_size, input_patch_size, channel_num, dtype, output_patch_overlap, crop_chunk_margin, mip, thumbnail_mip, max_mip, thumbnail, encoding, voxel_size, overwrite_info):
assert (not ((volume_stop is None) and (volume_size is None)))
if isinstance(volume_start, tuple):
volume_start = Vec(*volume_start)
if isinstance(volume_stop, tuple):
volume_stop = Vec(*volume_stop)
if isinstance(volume_size, tuple):
volume_size = Vec(*volume_size)
if (input_patch_size is None):
input_patch_size = output_patch_size
if (volume_size is not None):
assert (len(volume_size) == 3)
assert (volume_stop is None)
volume_stop = (volume_start + volume_size)
else:
volume_size = (volume_stop - volume_start)
print(('\noutput volume start: ' + tuple2string(volume_start)))
print(('output volume stop: ' + tuple2string(volume_stop)))
print(('output volume size: ' + tuple2string(volume_size)))
if (output_patch_overlap is None):
output_patch_overlap = tuple(((s // 2) for s in output_patch_size))
assert (output_patch_overlap[1] == output_patch_overlap[2])
if (crop_chunk_margin is None):
crop_chunk_margin = output_patch_overlap
assert (crop_chunk_margin[1] == crop_chunk_margin[2])
print(('margin size: ' + tuple2string(crop_chunk_margin)))
if thumbnail:
thumbnail_mip = max(thumbnail_mip, 5)
(block_size, output_chunk_size, factor) = get_optimized_block_size(output_patch_size, output_patch_overlap, max_ram_size, channel_num, max_mip, crop_chunk_margin, input_patch_size, mip, thumbnail_mip, volume_start)
if (not dry_run):
storage = CloudFiles(volume_path)
thumbnail_volume_path = os.path.join(volume_path, 'thumbnail')
thumbnail_storage = CloudFiles(thumbnail_volume_path)
if (not overwrite_info):
print('\ncheck that we are not overwriting existing info file.')
assert storage.exists('info')
assert thumbnail_storage.exists('info')
if overwrite_info:
print(f'create and upload info file to {volume_path}')
info = CloudVolume.create_new_info(channel_num, layer_type='image', data_type=dtype, encoding=encoding, resolution=voxel_size[::(- 1)], voxel_offset=volume_start[::(- 1)], volume_size=volume_size[::(- 1)], chunk_size=block_size[::(- 1)], max_mip=mip)
vol = CloudVolume(volume_path, info=info)
vol.commit_info()
if overwrite_info:
thumbnail_factor = (2 ** thumbnail_mip)
thumbnail_block_size = ((output_chunk_size[0] // factor), (output_chunk_size[1] // thumbnail_factor), (output_chunk_size[2] // thumbnail_factor))
print(('thumbnail block size: ' + tuple2string(thumbnail_block_size)))
thumbnail_info = CloudVolume.create_new_info(1, layer_type='image', data_type='uint8', encoding='raw', resolution=voxel_size[::(- 1)], voxel_offset=volume_start[::(- 1)], volume_size=volume_size[::(- 1)], chunk_size=thumbnail_block_size[::(- 1)], max_mip=thumbnail_mip)
thumbnail_vol = CloudVolume(thumbnail_volume_path, info=thumbnail_info)
thumbnail_vol.commit_info()
print('create a list of bounding boxes...')
roi_start = (volume_start[0], (volume_start[1] // factor), (volume_start[2] // factor))
roi_size = (volume_size[0], (volume_size[1] // factor), (volume_size[2] // factor))
roi_stop = tuple(((s + z) for (s, z) in zip(roi_start, roi_size)))
bboxes = BoundingBoxes.from_manual_setup(output_chunk_size, roi_start=roi_start, roi_stop=roi_stop)
print(f'total number of tasks: {len(bboxes)}')
print(f'bounding boxes: {bboxes}')
print(yellow(('Note that you should reuse the printed out parameters in the production run.' + ' These parameters are not ingested to AWS SQS queue.')))
return bboxes |
def show_mesh_info(options):
mesh = Mesh.from_file(options.filename)
output(mesh.cmesh)
output('element types:', mesh.descs)
output('nodal BCs:', sorted(mesh.nodal_bcs.keys()))
bbox = mesh.get_bounding_box()
output(('bounding box:\n%s' % '\n'.join((('%s: [%14.7e, %14.7e]' % (name, bbox[(0, ii)], bbox[(1, ii)])) for (ii, name) in enumerate('xyz'[:mesh.dim])))))
output(('centre: [%s]' % ', '.join((('%14.7e' % ii) for ii in (0.5 * (bbox[0] + bbox[1]))))))
output(('coordinates mean: [%s]' % ', '.join((('%14.7e' % ii) for ii in mesh.coors.mean(0)))))
if (not options.detailed):
return
domain = FEDomain(mesh.name, mesh)
for dim in range(1, (mesh.cmesh.tdim + 1)):
volumes = mesh.cmesh.get_volumes(dim)
output(('volumes of %d %dD entities:\nmin: %.7e mean: %.7e median: %.7e max: %.7e' % (mesh.cmesh.num[dim], dim, volumes.min(), volumes.mean(), nm.median(volumes), volumes.max())))
euler = (lambda mesh: nm.dot(mesh.cmesh.num, [1, (- 1), 1, (- 1)]))
ec = euler(mesh)
output('Euler characteristic:', ec)
graph = mesh.create_conn_graph(verbose=False)
(n_comp, _) = graph_components(graph.shape[0], graph.indptr, graph.indices)
output('number of connected components:', n_comp)
if (mesh.dim > 1):
region = domain.create_region('surf', 'vertices of surface', 'facet')
surf_mesh = Mesh.from_region(region, mesh, localize=True, is_surface=True)
FEDomain(surf_mesh.name, surf_mesh)
sec = euler(surf_mesh)
output('surface Euler characteristic:', sec)
if (mesh.dim == 3):
output('surface genus:', ((2.0 - sec) / 2.0))
surf_graph = surf_mesh.create_conn_graph(verbose=False)
(n_comp, _) = graph_components(surf_graph.shape[0], surf_graph.indptr, surf_graph.indices)
output('number of connected surface components:', n_comp) |
def filter_dict(example_dict, threshold):
to_pop_key_list = []
for key in example_dict:
if (len(example_dict[key]) < threshold):
to_pop_key_list.append(key)
for key in to_pop_key_list:
example_dict.pop(key)
return example_dict |
_quantizer(quantization_target=QuantizationTarget.Weights, quantization_method=[QuantizationMethod.UNIFORM], identifier=TrainingMethod.STE)
class STEUniformWeightQATQuantizer(BaseKerasQATTrainableQuantizer):
def __init__(self, quantization_config: TrainableQuantizerWeightsConfig):
super().__init__(quantization_config)
self.max_values = np.array(quantization_config.weights_quantization_params[RANGE_MAX])
self.min_values = np.array(quantization_config.weights_quantization_params[RANGE_MIN])
self.num_bits = self.quantization_config.weights_n_bits
self.per_channel = self.quantization_config.weights_per_channel_threshold
self.channel_axis = self.quantization_config.weights_channels_axis
self.min_max_shape = np.asarray(self.max_values).shape
self.max = (np.reshape(self.max_values, [(- 1)]) if self.per_channel else float(self.max_values))
self.min = (np.reshape(self.min_values, [(- 1)]) if self.per_channel else float(self.min_values))
if (self.per_channel and (self.channel_axis not in [(- 1), (len(self.min_max_shape) - 1)])):
self.perm_vec = list(np.arange(len(self.min_max_shape)))
self.perm_vec[self.channel_axis] = (len(self.min_max_shape) - 1)
self.perm_vec[(len(self.min_max_shape) - 1)] = self.channel_axis
else:
self.perm_vec = None
def initialize_quantization(self, tensor_shape: TensorShape, name: str, layer: KerasTrainableQuantizationWrapper):
fq_min = layer.add_weight((name + FQ_MIN), shape=(len(self.min) if self.per_channel else ()), initializer=tf.keras.initializers.Constant((- 1.0)), trainable=False)
fq_min.assign(self.min)
fq_max = layer.add_weight((name + FQ_MAX), shape=(len(self.max) if self.per_channel else ()), initializer=tf.keras.initializers.Constant(1.0), trainable=False)
fq_max.assign(self.max)
self.add_quantizer_variable(FQ_MIN, fq_min, VariableGroup.QPARAMS)
self.add_quantizer_variable(FQ_MAX, fq_max, VariableGroup.QPARAMS)
def __call__(self, inputs: tf.Tensor, training: bool):
_min = self.get_quantizer_variable(FQ_MIN)
_max = self.get_quantizer_variable(FQ_MAX)
(_min, _max) = adjust_range_to_include_zero(_min, _max, self.num_bits)
if self.per_channel:
if self.perm_vec:
inputs = tf.transpose(inputs, perm=self.perm_vec)
q_tensor = tf.quantization.fake_quant_with_min_max_vars_per_channel(inputs, _min, _max, num_bits=self.num_bits)
if self.perm_vec:
q_tensor = tf.transpose(q_tensor, perm=self.perm_vec)
else:
q_tensor = tf.quantization.fake_quant_with_min_max_vars(inputs, _min, _max, num_bits=self.num_bits)
return q_tensor
def convert2inferable(self) -> BaseKerasInferableQuantizer:
(min_range, max_range) = fix_range_to_include_zero(self.get_quantizer_variable(FQ_MIN).numpy(), self.get_quantizer_variable(FQ_MAX).numpy(), self.num_bits)
return WeightsUniformInferableQuantizer(num_bits=self.num_bits, min_range=list(min_range.flatten()), max_range=list(max_range.flatten()), per_channel=self.per_channel, channel_axis=self.channel_axis, input_rank=len(self.min_max_shape)) |
def get_validation_recalls(r_list, q_list, k_values, gt, print_results=True, faiss_gpu=False, dataset_name='dataset without name ?', testing=False):
embed_size = r_list.shape[1]
if faiss_gpu:
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.useFloat16 = True
flat_config.device = 0
faiss_index = faiss.GpuIndexFlatL2(res, embed_size, flat_config)
else:
faiss_index = faiss.IndexFlatL2(embed_size)
faiss_index.add(r_list)
(_, predictions) = faiss_index.search(q_list, max(k_values))
if testing:
return predictions
correct_at_k = np.zeros(len(k_values))
for (q_idx, pred) in enumerate(predictions):
for (i, n) in enumerate(k_values):
if np.any(np.in1d(pred[:n], gt[q_idx])):
correct_at_k[i:] += 1
break
correct_at_k = (correct_at_k / len(predictions))
d = {k: v for (k, v) in zip(k_values, correct_at_k)}
if print_results:
print()
table = PrettyTable()
table.field_names = (['K'] + [str(k) for k in k_values])
table.add_row(([''] + [f'{(100 * v):.2f}' for v in correct_at_k]))
print(table.get_string(title=f'Performances on {dataset_name}'))
return d |
class GraphProfilerCsvWriter():
def __init__(self, gb, file=sys.stdout):
self.file = file
self.gb = gb
self.fields = ['parameter_scope', 'function_name', 'inputs_shape', 'args_info', 'forward', 'backward', 'forward_n_run', 'backward_n_run']
self.write_header()
def write_header(self):
writer = csv.writer(self.file)
writer.writerow(['num. of run', self.gb.n_run])
writer.writerow(['device id', self.gb.device_id])
writer.writerow(['ext name', self.gb.ext_name])
writer.writerow(['time scale', self.gb.time_scale])
writer.writerow(['nnabla version', nn.__version__])
writer.writerow([])
writer.writerow(self.fields)
def check_same(self, f, b):
for field in self.fields[:4]:
if (f[field] != b[field]):
return False
return True
def write(self):
writer = csv.writer(self.file)
for (f, b) in zip(self.gb.result['forward'], self.gb.result['backward']):
f = f._asdict()
b = b._asdict()
if (not self.check_same(f, b)):
raise AssertionError()
args_info = ', '.join(['{}: {}'.format(k, v) for (k, v) in f['args_info']])
out = [f['parameter_scope'], f['function_name'], f['inputs_shape'], args_info, f['mean_time'], b['mean_time'], f['n_run'], b['n_run']]
writer.writerow(out)
writer.writerow([])
writer.writerow(['forward all', self.gb.result['forward_all']])
writer.writerow(['forward_all_n_run', self.gb.result['n_run_forward_all']])
writer.writerow([])
writer.writerow(['backward all', self.gb.result['backward_all']])
writer.writerow(['backward_all_n_run', self.gb.result['n_run_backward_all']])
if (set(self.gb.result.keys()) >= {'training', 'n_run_training'}):
writer.writerow([])
writer.writerow(['training(forward + backward + update)', self.gb.result['training']])
writer.writerow(['training_n_run', self.gb.result['n_run_training']]) |
class miniImageNet(ImageFolder):
def __init__(self, root: str, mode: str, image_sz=84) -> None:
assert (mode in ['train', 'val', 'test'])
IMAGE_PATH = os.path.join(root, mode)
if ((mode == 'val') or (mode == 'test')):
transform = transforms.Compose([transforms.Resize([92, 92]), transforms.CenterCrop(image_sz), transforms.ToTensor(), transforms.Normalize(np.array([0.4712, 0.4499, 0.4031]), np.array([0.2726, 0.2634, 0.2794]))])
elif (mode == 'train'):
transform = transforms.Compose([transforms.RandomResizedCrop(image_sz), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(np.array([0.4712, 0.4499, 0.4031]), np.array([0.2726, 0.2634, 0.2794]))])
super().__init__(IMAGE_PATH, transform)
self.label = self.targets |
def apply_impulse(vf: ti.template(), dyef: ti.template(), imp_data: ti.types.ndarray()):
g_dir = ((- ti.Vector([0, 9.8])) * 300)
for (i, j) in vf:
(omx, omy) = (imp_data[2], imp_data[3])
mdir = ti.Vector([imp_data[0], imp_data[1]])
(dx, dy) = (((i + 0.5) - omx), ((j + 0.5) - omy))
d2 = ((dx * dx) + (dy * dy))
factor = ti.exp(((- d2) / force_radius))
dc = dyef[(i, j)]
a = dc.norm()
momentum = ((((mdir * f_strength) * factor) + ((g_dir * a) / (1 + a))) * dt)
v = vf[(i, j)]
vf[(i, j)] = (v + momentum)
if (mdir.norm() > 0.5):
dc += (ti.exp(((- d2) * (4 / ((res / 15) ** 2)))) * ti.Vector([imp_data[4], imp_data[5], imp_data[6]]))
dyef[(i, j)] = dc |
class ProgressBar():
def __init__(self, iterable, epoch=None, prefix=None, quiet=False):
self.epoch = epoch
self.quiet = quiet
self.prefix = ((prefix + ' | ') if (prefix is not None) else '')
if (epoch is not None):
self.prefix += f'epoch {epoch:02d}'
self.iterable = (iterable if self.quiet else tqdm(iterable, self.prefix, leave=False))
def __iter__(self):
return iter(self.iterable)
def log(self, stats, verbose=False):
if (not self.quiet):
self.iterable.set_postfix(self.format_stats(stats, verbose), refresh=True)
def format_stats(self, stats, verbose=False):
postfix = OrderedDict(stats)
for (key, value) in postfix.items():
if isinstance(value, Number):
fmt = ('{:.3f}' if (value > 0.001) else '{:.1e}')
postfix[key] = fmt.format(value)
elif (isinstance(value, AverageMeter) or isinstance(value, RunningAverageMeter)):
if verbose:
postfix[key] = f'{value.avg:.3f} ({value.val:.3f})'
else:
postfix[key] = f'{value.avg:.3f}'
elif isinstance(value, TimeMeter):
postfix[key] = f'{value.elapsed_time:.1f}s'
elif (not isinstance(postfix[key], str)):
postfix[key] = str(value)
return postfix
def print(self, stats, verbose=False):
postfix = ' | '.join((((key + ' ') + value.strip()) for (key, value) in self.format_stats(stats, verbose).items()))
return f"{((self.prefix + ' | ') if (self.epoch is not None) else '')}{postfix}" |
class ImageType():
Scene = 0
DepthPlanner = 1
DepthPerspective = 2
DepthVis = 3
DisparityNormalized = 4
Segmentation = 5
SurfaceNormals = 6
Infrared = 7 |
def logging_config(folder=None, name=None, level=logging.INFO, console_level=logging.DEBUG):
if (name is None):
name = inspect.stack()[1][1].split('.')[0]
if (folder is None):
folder = os.path.join(os.getcwd(), name)
if (not os.path.exists(folder)):
os.makedirs(folder)
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
logging.root.handlers = []
logpath = os.path.join(folder, (name + '.log'))
print(('All Logs will be saved to %s' % logpath))
logging.root.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logfile = logging.FileHandler(logpath)
logfile.setLevel(level)
logfile.setFormatter(formatter)
logging.root.addHandler(logfile)
logconsole = logging.StreamHandler()
logconsole.setLevel(console_level)
logconsole.setFormatter(formatter)
logging.root.addHandler(logconsole)
return folder |
class ConstantSchedule(object):
def __init__(self, value):
self._v = value
def value(self, t):
return self._v |
class SysCommonNlg(object):
templates = {SystemAct.GREET: ['Hello.', 'Hi.', 'Greetings.', 'How are you doing?'], SystemAct.ASK_REPEAT: ['Can you please repeat that?', 'What did you say?'], SystemAct.ASK_REPHRASE: ['Can you please rephrase that?', 'Can you say it in another way?'], SystemAct.GOODBYE: ['Goodbye.', 'See you next time.'], SystemAct.CLARIFY: ["I didn't catch you."], (SystemAct.REQUEST + core.BaseUsrSlot.NEED): ['What can I do for you?', 'What do you need?', 'How can I help?'], (SystemAct.REQUEST + core.BaseUsrSlot.HAPPY): ['What else can I do?', 'Are you happy about my answer?', 'Anything else?'], (SystemAct.EXPLICIT_CONFIRM + 'dont_care'): ['Okay, you dont_care, do you?', 'You dont_care, right?'], (SystemAct.IMPLICIT_CONFIRM + 'dont_care'): ['Okay, you dont_care.', 'Alright, dont_care.']} |
def ResNeXt29_2x64d(feature_dim=128):
return ResNeXt(num_blocks=[3, 3, 3], cardinality=2, bottleneck_width=64, feature_dim=feature_dim) |
class NominalAttributeMultiwayTest(InstanceConditionalTest):
def __init__(self, att_idx, branch_mapping):
super().__init__()
self._att_idx = att_idx
self._branch_mapping = branch_mapping
self._reverse_branch_mapping = {b: v for (v, b) in branch_mapping.items()}
def branch_for_instance(self, X):
if ((self._att_idx > len(X)) or (self._att_idx < 0)):
return (- 1)
else:
return self._branch_mapping.get(X[self._att_idx], (- 1))
def max_branches():
return (- 1)
def describe_condition_for_branch(self, branch):
return 'Attribute {} = {}'.format(self._att_idx, self._reverse_branch_mapping[branch])
def branch_rule(self, branch):
return Predicate(self._att_idx, '==', self._reverse_branch_mapping[branch])
def get_atts_test_depends_on(self):
return [self._att_idx]
def add_new_branch(self, att_val):
new_branch_id = (max(self._branch_mapping.values()) + 1)
self._branch_mapping[att_val] = new_branch_id
self._reverse_branch_mapping[new_branch_id] = att_val
return new_branch_id |
def create_clones(config, model_fn, args=None, kwargs=None, gpu_offset=0):
clones = []
args = (args or [])
kwargs = (kwargs or {})
variables_device = config.variables_device()
with slim.arg_scope([slim.model_variable, slim.variable], device=variables_device):
for i in range(0, config.num_clones):
with tf.name_scope(config.clone_scope(i)) as clone_scope:
clone_device = config.clone_device((i + gpu_offset), force=(True if ((i + gpu_offset) >= config.num_clones) else False))
chooser = VariableDeviceChooser(variables_device, clone_device)
with tf.device(chooser.choose):
with tf.variable_scope(tf.get_variable_scope(), reuse=(True if (i > 0) else None)):
outputs = model_fn(*args, **kwargs)
clones.append(Clone(outputs, clone_scope, clone_device))
ext_var = (set(tf.global_variables()) - set(tf.model_variables()))
ext_var = list(ext_var)
global_step_tensor = tf.train.get_global_step()
for var in ext_var:
if (var is not global_step_tensor):
tf.contrib.framework.add_model_variable(var)
return clones |
def gaussian_noise_layer(x, is_training=False):
if is_training:
noise = tf.random_normal(shape=tf.shape(x), mean=0.0, stddev=1.0, dtype=tf.float32)
return (x + noise)
else:
return x |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_ch_esr(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
if (output_format == 'compact'):
result = ([esr.compact(val)] + result)
elif (output_format == 'standard'):
result = ([esr.format(val)] + result)
return result |
def get_cluster_manager(params, config_proto):
return cnn_util.GrpcClusterManager(params, config_proto) |
_properties
class MapTiling(transformation.SingleStateTransformation):
map_entry = transformation.PatternNode(nodes.MapEntry)
prefix = Property(dtype=str, default='tile', desc='Prefix for new range symbols')
tile_sizes = ShapeProperty(dtype=tuple, default=(128, 128, 128), desc='Tile size per dimension')
strides = ShapeProperty(dtype=tuple, default=tuple(), desc='Tile stride (enables overlapping tiles). If empty, matches tile')
tile_offset = ShapeProperty(dtype=tuple, default=None, desc='Negative Stride offset per dimension', allow_none=True)
divides_evenly = Property(dtype=bool, default=False, desc='Tile size divides dimension length evenly')
tile_trivial = Property(dtype=bool, default=False, desc='Tiles even if tile_size is 1')
def annotates_memlets():
return True
def expressions(cls):
return [sdutil.node_path_graph(cls.map_entry)]
def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
return True
def apply(self, graph: SDFGState, sdfg: SDFG):
tile_strides = self.tile_sizes
if ((self.strides is not None) and (len(self.strides) == len(tile_strides))):
tile_strides = self.strides
map_entry = self.map_entry
from dace.transformation.dataflow.map_collapse import MapCollapse
from dace.transformation.dataflow.strip_mining import StripMining
stripmine_subgraph = {StripMining.map_entry: self.subgraph[MapTiling.map_entry]}
sdfg_id = sdfg.sdfg_id
last_map_entry = None
removed_maps = 0
original_schedule = map_entry.schedule
for dim_idx in range(len(map_entry.map.params)):
if (dim_idx >= len(self.tile_sizes)):
tile_size = symbolic.pystr_to_symbolic(self.tile_sizes[(- 1)])
tile_stride = symbolic.pystr_to_symbolic(tile_strides[(- 1)])
else:
tile_size = symbolic.pystr_to_symbolic(self.tile_sizes[dim_idx])
tile_stride = symbolic.pystr_to_symbolic(tile_strides[dim_idx])
if (self.tile_offset and (dim_idx >= len(self.tile_offset))):
offset = self.tile_offset[(- 1)]
elif self.tile_offset:
offset = self.tile_offset[dim_idx]
else:
offset = 0
dim_idx -= removed_maps
if ((not self.tile_trivial) and (tile_size == map_entry.map.range.size()[dim_idx])):
continue
stripmine = StripMining()
stripmine.setup_match(sdfg, sdfg_id, self.state_id, stripmine_subgraph, self.expr_index)
if ((tile_size == 1) and (tile_stride == 1) and (self.tile_trivial == False)):
stripmine.dim_idx = dim_idx
stripmine.new_dim_prefix = ''
stripmine.tile_size = str(tile_size)
stripmine.tile_stride = str(tile_stride)
stripmine.divides_evenly = True
stripmine.tile_offset = str(offset)
stripmine.apply(graph, sdfg)
removed_maps += 1
else:
stripmine.dim_idx = dim_idx
stripmine.new_dim_prefix = self.prefix
stripmine.tile_size = str(tile_size)
stripmine.tile_stride = str(tile_stride)
stripmine.divides_evenly = self.divides_evenly
stripmine.tile_offset = str(offset)
stripmine.apply(graph, sdfg)
map_entry.schedule = original_schedule
if last_map_entry:
new_map_entry = graph.in_edges(map_entry)[0].src
mapcollapse_subgraph = {MapCollapse.outer_map_entry: graph.node_id(last_map_entry), MapCollapse.inner_map_entry: graph.node_id(new_map_entry)}
mapcollapse = MapCollapse()
mapcollapse.setup_match(sdfg, sdfg_id, self.state_id, mapcollapse_subgraph, 0)
mapcollapse.apply(graph, sdfg)
last_map_entry = graph.in_edges(map_entry)[0].src
return last_map_entry |
class SlurmQueueConf(BaseQueueConf):
_target_: str = 'hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher'
partition: Optional[str] = None
qos: Optional[str] = None
comment: Optional[str] = None
constraint: Optional[str] = None
exclude: Optional[str] = None
gres: Optional[str] = None
cpus_per_gpu: Optional[int] = None
gpus_per_task: Optional[int] = None
mem_per_gpu: Optional[str] = None
mem_per_cpu: Optional[str] = None
account: Optional[str] = None
signal_delay_s: int = 120
max_num_timeout: int = 0
additional_parameters: Dict[(str, Any)] = field(default_factory=dict)
array_parallelism: int = 256
setup: Optional[List[str]] = None |
def test_model(model, goal_path, show_goal=False, env_steps=1000, new_plan_frec=20, show_video=False, save_video=False, save_folder='./analysis/videos/model_trials/', save_filename='video.mp4'):
goal = plt.imread(goal_path)
if show_goal:
plt.axis('off')
plt.suptitle('Goal')
plt.imshow(goal)
plt.show()
goal = np.rint((goal * 255)).astype(int)
gym_env = gym.make('kitchen_relax-v1')
env = gym_env.env
s = env.reset()
FPS = 10
render_skip = max(1, round((1.0 / ((FPS * env.sim.model.opt.timestep) * env.frame_skip))))
viewer(env, mode='initialize')
for i in tqdm(range(env_steps)):
curr_img = env.render(mode='rgb_array')
curr_img = cv2.resize(curr_img, (300, 300))
current_and_goal = np.stack((curr_img, goal), axis=0)
current_and_goal = np.expand_dims(current_and_goal.transpose(0, 3, 1, 2), axis=0)
current_obs = np.expand_dims(s[:9], axis=0)
if ((i % new_plan_frec) == 0):
plan = model.get_pp_plan_vision(current_obs, current_and_goal)
action = model.predict_with_plan(current_obs, current_and_goal, plan).squeeze()
(s, r, _, _) = env.step(action.cpu().detach().numpy())
if ((i % render_skip) == 0):
viewer(env, mode='render', render=show_video)
if save_video:
if (not os.path.exists(save_folder)):
os.makedirs(save_folder)
viewer(env, mode='save', filename=(save_folder + save_filename))
env.close() |
def json2instanceImg(inJson, outImg, encoding='ids'):
annotation = Annotation()
annotation.fromJsonFile(inJson)
instanceImg = createInstanceImage(annotation, encoding)
instanceImg.save(outImg) |
def is_match(modules, node, pattern, max_uses=sys.maxsize):
if isinstance(pattern, tuple):
(self_match, *arg_matches) = pattern
if (self_match is getattr):
assert (len(pattern) == 2), 'Expecting getattr pattern to have two elements'
arg_matches = []
else:
self_match = pattern
arg_matches = []
if (node.uses > max_uses):
return False
if (isinstance(self_match, type) and issubclass(self_match, torch.nn.Module)):
if (node.op != 'call_module'):
return False
if (not (type(modules[node.target]) == self_match)):
return False
elif callable(self_match):
if ((node.op != 'call_function') or (node.target is not self_match)):
return False
elif (node.target is getattr):
if (node.args[1] != pattern[1]):
return False
elif (node.target != self_match):
return False
if (not arg_matches):
return True
if (len(arg_matches) != len(node.args)):
return False
return all((is_match(modules, node, arg_match, max_uses=1) for (node, arg_match) in zip(node.args, arg_matches))) |
def find_typeshed() -> Optional[Path]:
current_directory: pathlib.Path = Path(__file__).parent
bundled_typeshed_relative_path = 'pyre_check/typeshed/'
bundled_typeshed = find_parent_directory_containing_directory(current_directory, bundled_typeshed_relative_path)
if bundled_typeshed:
return (bundled_typeshed / bundled_typeshed_relative_path)
try:
import typeshed
return Path(typeshed.typeshed)
except ImportError:
LOG.debug('`import typeshed` failed, attempting a manual lookup')
return find_parent_directory_containing_directory(current_directory, 'typeshed/') |
.skip(reason='Shared function')
def test_region(region):
client = SkyplaneClient().object_store()
key = str(uuid.uuid4()).replace('-', '')
src_filename = f'src_{key}'
dst_filename = f'dst_{key}'
provider = region.split(':')[0]
if (provider == 'azure'):
bucket_name = ((str(uuid.uuid4()).replace('-', '')[:24] + '/') + str(uuid.uuid4()).replace('-', ''))
else:
bucket_name = str(uuid.uuid4()).replace('-', '')
file_size = 1024
bucket_path = client.create_bucket(region, bucket_name)
assert client.bucket_exists(bucket_name, provider), f'Bucket {bucket_name} does not exist'
with open(src_filename, 'wb') as fout:
fout.write(os.urandom(file_size))
client.upload_object(src_filename, bucket_name, provider, key)
assert client.exists(bucket_name, provider, key), f'Object {key} does not exist in bucket {bucket_name}'
client.download_object(bucket_name, provider, key, dst_filename)
assert (open(src_filename, 'rb').read() == open(dst_filename, 'rb').read()), f'Downloaded file {dst_filename} does not match uploaded file {src_filename}'
client.delete_bucket(bucket_name, provider)
assert (not client.bucket_exists(bucket_name, provider)), f'Bucket {bucket_name} still exists'
os.remove(src_filename)
os.remove(dst_filename) |
def register_Ns3QueueLimits_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::QueueLimits const &', 'arg0')])
cls.add_method('Available', 'int32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Completed', 'void', [param('uint32_t', 'count')], is_pure_virtual=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Queued', 'void', [param('uint32_t', 'count')], is_pure_virtual=True, is_virtual=True)
cls.add_method('Reset', 'void', [], is_pure_virtual=True, is_virtual=True)
return |
def add_arguments(parser):
parser.add_argument('files', nargs='+', help='path to input box files')
parser.add_argument('--invert-y', action='store_true', help='invert (mirror) the y-axis particle coordinates. appears to be necessary for .tiff compatibility with EMAN2')
parser.add_argument('--imagedir', help='directory of images. only required to invert the y-axis - necessary for particles picked on .tiff images in EMAN2')
parser.add_argument('--image-ext', default='tiff', help='image format extension, * corresponds to matching the first image file with the same name as the box file (default: tiff)')
parser.add_argument('-o', '--output', help='destination file (default: stdout)') |
def varlen_lstm_backward_setup(forward_output, seed=None):
if seed:
torch.manual_seed(seed)
rnn_utils = torch.nn.utils.rnn
sequences = forward_output[0]
padded = rnn_utils.pad_sequence(sequences)
grad = torch.randn_like(padded)
return (padded, grad) |
def _squeeze_and_excite(x, hidden_dim, activation_fn=tf.nn.relu6, normalization_op_params=None):
if (normalization_op_params is None):
raise ValueError('Normalization params cannot be `None`')
(_, height, width, channels) = x.get_shape().as_list()
u = tf.keras.layers.AveragePooling2D([height, width], strides=1, padding='valid')(x)
u = _conv(u, hidden_dim, 1, normalizer_fn=None, activation_fn=activation_fn, normalization_op_params=normalization_op_params)
u = _conv(u, channels, 1, normalizer_fn=None, activation_fn=tf.nn.sigmoid, normalization_op_params=normalization_op_params)
return (u * x) |
def main():
parser = argparse.ArgumentParser(description='Validates that the descriptions in notebooks follow the expected format, so that the notebooks read consistently and render nicely.')
parser.add_argument('locations', nargs='+', help='Paths(s) to search for Jupyter notebooks to check')
args = parser.parse_args()
all_files = []
for p in args.locations:
path = Path(p)
if path.is_dir():
all_files.extend(path.glob('**/*.ipynb'))
elif path.is_file():
all_files.append(path)
else:
raise ValueError(f"Specified location not '{path}'a file or directory.")
all_errors = []
for file_loc in all_files:
if ('.ipynb_checkpoint' in str(file_loc)):
continue
print(f'{YELLOW_BOLD}Checking file {file_loc}{RESET}')
notebook = nbformat.read(str(file_loc), as_version=4)
cells = parse_markdown_cells(notebook)
this_errors = []
for checker in CHECKERS:
try:
checker(cells)
except FormattingError as exc:
for e in exc.errors:
print(f'''{LIGHT_RED_BOLD}error{RESET}: {e}
''')
this_errors.extend(exc.errors)
if this_errors:
all_errors.append((file_loc, this_errors))
if all_errors:
def list_element(s):
indented = textwrap.indent(s, ' ')
return f'- {indented[2:]}'
def render_path(path):
text = f'**`{path}`**'
try:
commit = os.environ['BUILDKITE_COMMIT']
except KeyError:
pass
else:
url = f'
text = f'{text} ([rendered notebook]({url}))'
return text
def file_list(path, errors):
whole_list = '\n'.join((list_element(error) for error in errors))
return f'''{render_path(path)}:
{whole_list}'''
file_lists = '\n\n'.join((file_list(path, errors) for (path, errors) in all_errors))
command = f'python {__file__} demos/'
formatted = f'''Found some notebooks with inconsistent formatting. These notebooks may be less clear or render incorrectly on Read the Docs. Please adjust them.
{file_lists}
This check can be run locally, via `{command}`.'''
if ('GITHUB_ACTIONS' in os.environ):
for (path, errors) in all_errors:
whole_list = '\n'.join(errors)
message = f'''Notebook failed text check:
{whole_list}'''
escaped = message.replace('\n', '%0A')
print(f'::error file={path}::{escaped}')
try:
subprocess.run(['buildkite-agent', 'annotate', '--style=error', '--context=notebook_text_checker', formatted])
except FileNotFoundError:
pass
sys.exit(1) |
def get_partition_dataset(data_path, data_name, part_id):
part_name = os.path.join(data_name, ('partition_' + str(part_id)))
path = os.path.join(data_path, part_name)
if (not os.path.exists(path)):
print('Partition file not found.')
exit()
train_path = os.path.join(path, 'train.txt')
local2global_path = os.path.join(path, 'local_to_global.txt')
partition_book_path = os.path.join(path, 'partition_book.txt')
relation_path = os.path.join(path, 'relation_count.txt')
dataset = PartitionKGDataset(relation_path, train_path, local2global_path, read_triple=True)
partition_book = []
with open(partition_book_path) as f:
for line in f:
partition_book.append(int(line))
local_to_global = []
with open(local2global_path) as f:
for line in f:
local_to_global.append(int(line))
return (dataset, partition_book, local_to_global) |
def test_dont_record_objectproxy_instance_check():
proxy = tt.ObjectProxy(42)
with tt.shim_isinstance():
assert isinstance(proxy, tt.ObjectProxy)
assert (len(tt.UsageTraceNode.from_proxy(proxy).type_checks) == 0) |
class SequenceTranslation(object):
def __init__(self, max_shift: int):
self.max_shift = max_shift
def __call__(self, x: LongTensor, shift=None):
if (shift is None):
shift = random.randint((- self.max_shift), self.max_shift)
else:
shift = min(shift, self.max_shift)
shift = max(shift, (- self.max_shift))
num_valid_tokens = (x.size(0) - 2)
if (shift < 0):
shift = (- ((- shift) % num_valid_tokens))
elif (shift > 0):
shift = (shift % num_valid_tokens)
if (shift == 0):
return x
trimmed_x = x[1:(- 1)]
rot_x = x.clone()
if (shift < 0):
rot_x[1:((num_valid_tokens + shift) + 1)] = trimmed_x[(- shift):]
rot_x[((num_valid_tokens + shift) + 1):(- 1)] = trimmed_x[:(- shift)]
else:
rot_x[1:(shift + 1)] = trimmed_x[(- shift):]
rot_x[(shift + 1):(- 1)] = trimmed_x[:(- shift)]
return rot_x |
def test_torootname():
model_1 = pyhf.simplemodels.correlated_background([5], [50], [52], [48])
model_2 = pyhf.simplemodels.uncorrelated_background([5], [50], [7])
model_3 = pyhf.simplemodels.uncorrelated_background([5, 6], [50, 50], [7, 8])
assert (pyhf.compat.paramset_to_rootnames(model_1.config.param_set('mu')) == 'mu')
assert (pyhf.compat.paramset_to_rootnames(model_1.config.param_set('correlated_bkg_uncertainty')) == 'alpha_correlated_bkg_uncertainty')
assert (pyhf.compat.paramset_to_rootnames(model_2.config.param_set('uncorr_bkguncrt')) == ['gamma_uncorr_bkguncrt_0'])
assert (pyhf.compat.paramset_to_rootnames(model_3.config.param_set('uncorr_bkguncrt')) == ['gamma_uncorr_bkguncrt_0', 'gamma_uncorr_bkguncrt_1']) |
class CosineWarmup(torch.optim.lr_scheduler.CosineAnnealingLR):
def __init__(self, optimizer, T_max, eta_min=0, warmup_step=0, **kwargs):
self.warmup_step = warmup_step
super().__init__(optimizer, (T_max - warmup_step), eta_min, *kwargs)
def get_lr(self):
if (not self._get_lr_called_within_step):
warnings.warn('To get the last learning rate computed by the scheduler, please use `get_last_lr()`.', UserWarning)
if (self.last_epoch == self.warmup_step):
return self.base_lrs
elif (self.last_epoch < self.warmup_step):
return [((base_lr * (self.last_epoch + 1)) / self.warmup_step) for base_lr in self.base_lrs]
elif (((((self.last_epoch - self.warmup_step) - 1) - self.T_max) % (2 * self.T_max)) == 0):
return [(group['lr'] + (((base_lr - self.eta_min) * (1 - math.cos((math.pi / self.T_max)))) / 2)) for (base_lr, group) in zip(self.base_lrs, self.optimizer.param_groups)]
return [((((1 + math.cos(((math.pi * (self.last_epoch - self.warmup_step)) / self.T_max))) / (1 + math.cos(((math.pi * ((self.last_epoch - self.warmup_step) - 1)) / self.T_max)))) * (group['lr'] - self.eta_min)) + self.eta_min) for group in self.optimizer.param_groups]
_get_closed_form_lr = None |
_model('transformer_from_pretrained_xlm')
class TransformerFromPretrainedXLMModel(TransformerModel):
def add_args(parser):
TransformerModel.add_args(parser)
parser.add_argument('--pretrained-xlm-checkpoint', type=str, metavar='STR', help='XLM model to use for initializing transformer encoder and/or decoder')
parser.add_argument('--init-encoder-only', action='store_true', help="if set, don't load the XLM weights and embeddings into decoder")
parser.add_argument('--init-decoder-only', action='store_true', help="if set, don't load the XLM weights and embeddings into encoder")
def build_model(self, args, task, cls_dictionary=MaskedLMDictionary):
assert hasattr(args, 'pretrained_xlm_checkpoint'), 'You must specify a path for --pretrained-xlm-checkpoint to use --arch transformer_from_pretrained_xlm'
assert (isinstance(task.source_dictionary, cls_dictionary) and isinstance(task.target_dictionary, cls_dictionary)), 'You should use a MaskedLMDictionary when using --arch transformer_from_pretrained_xlm because the pretrained XLM model was trained using data binarized with MaskedLMDictionary. For translation, you may want to use --task translation_from_pretrained_xlm'
assert (not (getattr(args, 'init_encoder_only', False) and getattr(args, 'init_decoder_only', False))), 'Only one of --init-encoder-only and --init-decoder-only can be set.'
return super().build_model(args, task)
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoderFromPretrainedXLM(args, src_dict, embed_tokens)
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoderFromPretrainedXLM(args, tgt_dict, embed_tokens) |
class SEWDForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.