code stringlengths 101 5.91M |
|---|
def run(src_region, dest_region, n_files=1, file_size_mb=1, multipart=False):
logger.info((((((f'Running skyplane cp integration test with config ' + f'src_region={src_region}, ') + f'dest_region={dest_region}, ') + f'n_files={n_files}, ') + f'file_size_mb={file_size_mb}, ') + f'multipart={multipart}'))
(src_bucket_name, dest_bucket_name, src_prefix, dest_prefix) = setup_buckets(src_region, dest_region, n_files=n_files, file_size_mb=file_size_mb)
def map_path(region, bucket, prefix):
(provider, _) = region.split(':')
if (provider == 'aws'):
return f's3://{bucket}/{prefix}'
elif (provider == 'azure'):
(storage_account, container) = bucket.split('/')
return f'
elif (provider == 'gcp'):
return f'gs://{bucket}/{prefix}'
else:
raise Exception(f'Unknown provider {provider}')
return_code = cp(map_path(src_region, src_bucket_name, src_prefix), map_path(dest_region, dest_bucket_name, dest_prefix), recursive=True, debug=False, multipart=multipart, confirm=True, max_instances=1, max_connections=1, solver='direct', solver_required_throughput_gbits=1)
src_interface = ObjectStoreInterface.create(src_region, src_bucket_name)
dest_interface = ObjectStoreInterface.create(dest_region, dest_bucket_name)
src_interface.delete_objects([f'{src_prefix}/{i}' for i in range(n_files)])
dest_interface.delete_objects([f'{dest_prefix}/{i}' for i in range(n_files)])
src_interface.delete_bucket()
dest_interface.delete_bucket()
return return_code |
class ModelArguments():
model_name_or_path: Optional[str] = field(default=None, metadata={'help': 'The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.'})
model_type: Optional[str] = field(default=None, metadata={'help': ('If training from scratch, pass a model type from the list: ' + ', '.join(MODEL_TYPES))})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}) |
def normalize_space(format_sql):
format_sql_1 = [' '.join(sub_sql.strip().replace(',', ' , ').replace('(', ' ( ').replace(')', ' ) ').split()) for sub_sql in format_sql.split('\n')]
format_sql_1 = '\n'.join(format_sql_1)
format_sql_2 = format_sql_1.replace('\njoin', ' join').replace(',\n', ', ').replace(' where', '\nwhere').replace(' intersect', '\nintersect').replace('union ', 'union\n').replace('\nand', ' and').replace('order by t2 .\nstart desc', 'order by t2 . start desc')
return format_sql_2 |
_module()
class AOTEncoder(nn.Module):
def __init__(self, in_channels=4, mid_channels=64, out_channels=256, act_cfg=dict(type='ReLU')):
super().__init__()
self.encoder = nn.Sequential(nn.ReflectionPad2d(3), ConvModule(in_channels, mid_channels, kernel_size=7, stride=1, act_cfg=act_cfg), ConvModule(mid_channels, (mid_channels * 2), kernel_size=4, stride=2, padding=1, act_cfg=act_cfg), ConvModule((mid_channels * 2), out_channels, kernel_size=4, stride=2, padding=1, act_cfg=act_cfg))
def forward(self, x):
return self.encoder(x) |
class LoaderConfig():
schema_or_location: (str | dict[(str, Any)])
app: Any
base_url: (str | None)
validate_schema: bool
skip_deprecated_operations: bool
data_generation_methods: tuple[(DataGenerationMethod, ...)]
force_schema_version: (str | None)
request_tls_verify: (bool | str)
request_proxy: (str | None)
request_cert: (RequestCert | None)
wait_for_schema: (float | None)
rate_limit: (str | None)
auth: (tuple[(str, str)] | None)
auth_type: (str | None)
headers: (dict[(str, str)] | None)
endpoint: (Filter | None)
method: (Filter | None)
tag: (Filter | None)
operation_id: (Filter | None) |
def get_model_gradient_multipliers(last_layers, last_layer_gradient_multiplier):
gradient_multipliers = {}
for var in slim.get_model_variables():
if ('biases' in var.op.name):
gradient_multipliers[var.op.name] = 2.0
for layer in last_layers:
if ((layer in var.op.name) and ('biases' in var.op.name)):
gradient_multipliers[var.op.name] = (2 * last_layer_gradient_multiplier)
break
elif (layer in var.op.name):
gradient_multipliers[var.op.name] = last_layer_gradient_multiplier
break
return gradient_multipliers |
def _get_parts_meta():
stuff_ids = [k['id'] for k in PASCAL_PARTS_CATEGORIES]
stuff_dataset_id_to_contiguous_id = {k: i for (i, k) in enumerate(stuff_ids)}
stuff_classes = [k['name'] for k in PASCAL_PARTS_CATEGORIES]
ret = {'stuff_dataset_id_to_contiguous_id': stuff_dataset_id_to_contiguous_id, 'stuff_classes': stuff_classes}
return ret |
def compute_accuracy(anomalies, real_events):
correct = 0
for anomaly in anomalies:
if (anomaly in real_events):
correct = (correct + 1)
return (correct / len(real_events)) |
class SIE_literal(SageInputExpression):
def _sie_is_simple(self):
return (not self._sie_share) |
class TestUtil(unittest.TestCase):
def test_clean_via_pos(self):
self.assertEqual(['newly-elect', 'leader', 'wife'], clean_via_pos(['the', 'newly-elect', 'leader', "'s", 'wife'], ['DT', 'JJ', 'NN', 'POS', 'NN'])) |
class CodeObjectNode(ExprNode):
subexprs = ['varnames']
is_temp = False
result_code = None
def __init__(self, def_node):
ExprNode.__init__(self, def_node.pos, def_node=def_node)
args = list(def_node.args)
local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name]
self.varnames = TupleNode(def_node.pos, args=[IdentifierStringNode(arg.pos, value=arg.name) for arg in (args + local_vars)], is_temp=0, is_literal=1)
def may_be_none(self):
return False
def calculate_result_code(self, code=None):
if (self.result_code is None):
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
return self.result_code
def generate_result_code(self, code):
if (self.result_code is None):
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
code = code.get_cached_constants_writer(self.result_code)
if (code is None):
return
code.mark_pos(self.pos)
func = self.def_node
func_name = code.get_py_string_const(func.name, identifier=True, is_str=False, unicode_value=func.name)
file_path = StringEncoding.bytes_literal(func.pos[0].get_filenametable_entry().encode('utf8'), 'utf8')
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
flags = ['CO_OPTIMIZED', 'CO_NEWLOCALS']
if self.def_node.star_arg:
flags.append('CO_VARARGS')
if self.def_node.starstar_arg:
flags.append('CO_VARKEYWORDS')
code.putln(('%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s' % (self.result_code, (len(func.args) - func.num_kwonly_args), func.num_kwonly_args, len(self.varnames.args), ('|'.join(flags) or '0'), Naming.empty_bytes, Naming.empty_tuple, Naming.empty_tuple, self.varnames.result(), Naming.empty_tuple, Naming.empty_tuple, file_path_const, func_name, self.pos[1], Naming.empty_bytes, code.error_goto_if_null(self.result_code, self.pos)))) |
def test_worker(from_idx, to_idx, params):
params = params
succ = set()
fail = set()
for idx in range(from_idx, to_idx):
try:
succ.add(idx)
except ValueError:
fail.add(idx)
return (succ, fail) |
class SAM(torch.optim.Optimizer):
def __init__(self, params, base_optimizer, rho=0.05, **kwargs):
assert (rho >= 0.0), f'Invalid rho, should be non-negative: {rho}'
defaults = dict(rho=rho, **kwargs)
super(SAM, self).__init__(params, defaults)
self.base_optimizer = base_optimizer(self.param_groups, **kwargs)
self.param_groups = self.base_optimizer.param_groups
_grad()
def first_step(self, zero_grad=False):
grad_norm = self._grad_norm()
for group in self.param_groups:
scale = (group['rho'] / (grad_norm + 1e-12))
for p in group['params']:
if (p.grad is None):
continue
e_w = (p.grad * scale.to(p))
p.add_(e_w)
self.state[p]['e_w'] = e_w
if zero_grad:
self.zero_grad()
_grad()
def second_step(self, zero_grad=False):
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
p.sub_(self.state[p]['e_w'])
self.base_optimizer.step()
if zero_grad:
self.zero_grad()
def _grad_norm(self):
shared_device = self.param_groups[0]['params'][0].device
norm = torch.norm(torch.stack([p.grad.norm(p=2).to(shared_device) for group in self.param_groups for p in group['params'] if (p.grad is not None)]), p=2)
return norm |
def simplify_replacements(replacements):
if (len(replacements) <= 1):
return replacements
replacements.sort(key=(lambda x: len(x[0])))
idx = 0
while (idx < len(replacements)):
(old, new) = replacements[idx]
j = (idx + 1)
while (j < len(replacements)):
(old_2, new_2) = replacements[j]
if (old_2.replace(old, new) == new_2):
replacements.pop(j)
else:
j += 1
idx += 1
return replacements |
_context(allow_default=True)
class Node(object):
def __init__(self, node='local', **kwargs):
self._name = str(node)
self._kwargs = kwargs
Cluster.current().add_node(self)
def __str__(self):
return self._name
def __repr__(self):
return 'Node(name={}, kwargs={})'.format(self._name, self._kwargs)
def kwargs(self):
return self._kwargs |
def valid_YYYYMMDD(inval):
if re.search('(19[5-9]|20[0-4])\\d(0\\d|1[0-2])([0-2]\\d|3[01])', inval):
return True
else:
return False |
class Predict(Subcommand):
def __init__(self, predictor_overrides: Dict[(str, str)]={}) -> None:
self.predictors = {**DEFAULT_PREDICTORS, **predictor_overrides}
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = 'Run the specified model against a JSON-lines input file.'
subparser = parser.add_parser(name, description=description, help='Use a trained model to make predictions.')
subparser.add_argument('archive_file', type=str, help='the archived model to make predictions with')
subparser.add_argument('input_file', type=argparse.FileType('r'), help='path to input file')
subparser.add_argument('--output-file', type=argparse.FileType('w'), help='path to output file')
batch_size = subparser.add_mutually_exclusive_group(required=False)
batch_size.add_argument('--batch-size', type=int, default=1, help='The batch size to use for processing')
batch_size.add_argument('--batch_size', type=int, help=argparse.SUPPRESS)
subparser.add_argument('--silent', action='store_true', help='do not print output to stdout')
cuda_device = subparser.add_mutually_exclusive_group(required=False)
cuda_device.add_argument('--cuda-device', type=int, default=(- 1), help='id of GPU to use (if any)')
cuda_device.add_argument('--cuda_device', type=int, help=argparse.SUPPRESS)
subparser.add_argument('-o', '--overrides', type=str, default='', help='a HOCON structure used to override the experiment configuration')
subparser.set_defaults(func=_predict(self.predictors))
return subparser |
def test_get_output_auto_wrap_false():
est = EstimatorWithSetOutputNoAutoWrap()
assert (not hasattr(est, 'set_output'))
X = np.asarray([[1, 0, 3], [0, 0, 1]])
assert (X is est.transform(X)) |
class SchNet(torch.nn.Module):
def __init__(self, hidden_channels=128, num_filters=128, num_interactions=6, num_gaussians=50, cutoff=10.0, readout='add', dipole=False, mean=None, std=None, atomref=None):
super(SchNet, self).__init__()
assert (readout in ['add', 'sum', 'mean'])
self.hidden_channels = hidden_channels
self.num_filters = num_filters
self.num_interactions = num_interactions
self.num_gaussians = num_gaussians
self.cutoff = cutoff
self.readout = readout
self.dipole = dipole
self.readout = ('add' if self.dipole else self.readout)
self.mean = mean
self.std = std
self.scale = None
atomic_mass = torch.from_numpy(ase.data.atomic_masses)
self.register_buffer('atomic_mass', atomic_mass)
self.embedding = Embedding(100, hidden_channels)
self.distance_expansion = GaussianSmearing(0.0, cutoff, num_gaussians)
self.interactions = ModuleList()
for _ in range(num_interactions):
block = InteractionBlock(hidden_channels, num_gaussians, num_filters, cutoff)
self.interactions.append(block)
self.lin1 = Linear(hidden_channels, (hidden_channels // 2))
self.act = ShiftedSoftplus()
self.lin2 = Linear((hidden_channels // 2), 1)
self.register_buffer('initial_atomref', atomref)
self.atomref = None
if (atomref is not None):
self.atomref = Embedding(100, 1)
self.atomref.weight.data.copy_(atomref)
self.reset_parameters()
def reset_parameters(self):
self.embedding.reset_parameters()
for interaction in self.interactions:
interaction.reset_parameters()
torch.nn.init.xavier_uniform_(self.lin1.weight)
self.lin1.bias.data.fill_(0)
torch.nn.init.xavier_uniform_(self.lin2.weight)
self.lin2.bias.data.fill_(0)
if (self.atomref is not None):
self.atomref.weight.data.copy_(self.initial_atomref)
def forward(self, z, mask, pos):
(z_, pos_, batch) = ([], [], [])
for i in range(len(z)):
z_.append(z[i][mask[i]])
pos_.append(pos[i][mask[i]])
batch.append((torch.ones(torch.sum(mask[i]).item()) * i))
z = torch.cat(z_)
pos = torch.cat(pos_)
batch = torch.cat(batch).long()
del z_, pos_
assert ((z.dim() == 1) and (z.dtype == torch.long))
batch = (torch.zeros_like(z) if (batch is None) else batch)
h = self.embedding(z)
edge_index = radius_graph(pos, r=self.cutoff, batch=batch)
(row, col) = edge_index
edge_weight = (pos[row] - pos[col]).norm(dim=(- 1))
edge_attr = self.distance_expansion(edge_weight)
for interaction in self.interactions:
h += interaction(h, edge_index, edge_weight, edge_attr)
h = self.lin1(h)
h = self.act(h)
h = self.lin2(h)
if self.dipole:
mass = self.atomic_mass[z].view((- 1), 1)
c = (scatter((mass * pos), batch, dim=0) / scatter(mass, batch, dim=0))
h = (h * (pos - c[batch]))
if ((not self.dipole) and (self.mean is not None) and (self.std is not None)):
h = ((h * self.std) + self.mean)
if ((not self.dipole) and (self.atomref is not None)):
h = (h + self.atomref(z))
out = scatter(h, batch, dim=0, reduce=self.readout)
if self.dipole:
out = torch.norm(out, dim=(- 1), keepdim=True)
if (self.scale is not None):
out = (self.scale * out)
return out
def __repr__(self):
return f'{self.__class__.__name__}(hidden_channels={self.hidden_channels}, num_filters={self.num_filters}, num_interactions={self.num_interactions}, num_gaussians={self.num_gaussians}, cutoff={self.cutoff})' |
def _test_loader_from_config(cfg, dataset_name, mapper=None):
dataset = get_detection_dataset_dicts([dataset_name], filter_empty=False, proposal_files=([cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]] if cfg.MODEL.LOAD_PROPOSALS else None))
if (mapper is None):
mapper = DatasetMapper(cfg, False)
return {'dataset': dataset, 'mapper': mapper, 'num_workers': cfg.DATALOADER.NUM_WORKERS} |
def line_search(model, f, x, fullstep, expected_improve_full, max_backtracks=10, accept_ratio=0.1):
fval = f(True).data
for stepfrac in [(0.5 ** x) for x in range(max_backtracks)]:
x_new = (x + (stepfrac * fullstep))
set_flat_params_to(model, x_new)
fval_new = f(True).data
actual_improve = (fval - fval_new)
expected_improve = (expected_improve_full * stepfrac)
ratio = (actual_improve / expected_improve)
if (ratio > accept_ratio):
return (True, x_new)
return (False, x) |
def get_models(module, include_pretrained=False):
models = []
model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel)
for attr_name in dir(module):
if ((not include_pretrained) and (('Pretrained' in attr_name) or ('PreTrained' in attr_name))):
continue
attr = getattr(module, attr_name)
if (isinstance(attr, type) and issubclass(attr, model_classes) and (attr.__module__ == module.__name__)):
models.append((attr_name, attr))
return models |
class SegformerOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
def atol_for_validation(self) -> float:
return 0.0001
def default_onnx_opset(self) -> int:
return 12 |
.parametrize('path,name', demos)
def test_demos(path, name):
ret = subprocess.run([sys.executable, name], cwd=str(path), check=True)
assert (ret.returncode == 0) |
def is_valid(node, check_ids=True, check_prob_sum=False, light=False):
if check_ids:
(val, err) = has_valid_ids(node)
if (not val):
return (val, err)
for n in get_nodes_by_type(node):
if (len(n.scope) == 0):
return (False, ('node %s has no scope' % n.id))
is_sum = isinstance(n, Sum)
is_prod = isinstance(n, Product)
is_float = isinstance(n, IdentityNumericLeaf)
if is_sum:
if (len(n.children) != len(n.weights)):
return (False, ('node %s has different children/weights' % n.id))
if (not light):
if (len(n.children) != len(n.cluster_centers)):
return (False, ('node %s has different children/cluster_centers (#cluster_centers: %d, #childs: %d)' % (n.id, len(n.cluster_centers), len(n.children))))
weight_sum = np.sum(n.weights)
if (not isclose(weight_sum, 1, abs_tol=0.05)):
return (False, (('Sum of weights is not equal 1.0 (instead:' + weight_sum) + ')'))
if (is_sum or is_prod):
if (len(n.children) == 0):
return (False, ('node %s has no children' % n.id))
if is_float:
(ok, err) = is_valid_prob_sum(n.prob_sum, n.unique_vals, n.cardinality)
if (not ok):
return (False, err)
if check_prob_sum:
assert hasattr(n, 'prob_num'), (str(n) + ' has no property prob_num')
assert hasattr((n, 'unique_vals'))
if ((len(n.prob_sum) - 1) != len(n.unique_vals)):
return (False, 'size of prob_sum does not match unique_vals (required: prob_sum -1 == unique_vals) ')
(a, err) = is_consistent(node)
if (not a):
return (a, err)
(b, err) = is_complete(node)
if (not b):
return (b, err)
return (True, None) |
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, ignore_dangling_symlinks=False):
names = os.listdir(src)
if (ignore is not None):
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if (name in ignored_names):
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
os.symlink(linkto, dstname)
else:
if ((not os.path.exists(linkto)) and ignore_dangling_symlinks):
continue
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
copy_function(srcname, dstname)
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if ((WindowsError is not None) and isinstance(why, WindowsError)):
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors) |
class MypyManager(TCManager):
def _build_tc_cmd(self, fpath):
return ['mypy', '--show-error-codes', '--no-incremental', '--cache-dir=/dev/null', fpath]
def _check_tc_outcome(self, _, outlines):
if any((l.endswith(err) for l in outlines for err in self._inc_errcodes)):
raise FailToTypeCheck
def _parse_tc_output(self, retcode, outlines):
last_line = outlines[(- 1)]
err_breakdown = None
if (retcode == 0):
if (not last_line.startswith('Success: ')):
raise OutputParseError
no_type_errs = 0
no_files = next((int(w) for w in last_line.split() if w.isdigit()))
no_ignored_errs = 0
else:
c = Counter((err for l in outlines for err in self._inc_errcodes if l.endswith(err)))
err_breakdown = dict(c)
no_type_errs = sum(c.values())
if (last_line.startswith('Found ') and last_line.endswith(' source file)')):
numbers = [int(s) for s in last_line.split() if s.isdigit()]
no_errs = numbers[0]
no_files = numbers[1]
no_ignored_errs = (no_errs - no_type_errs)
else:
raise OutputParseError
return ParsedResult(no_type_errs, no_files, no_ignored_errs, err_breakdown=err_breakdown)
def _report_errors(self, parsed_result):
logger.info(f'Produced {parsed_result.no_type_errs} type error(s) in {parsed_result.no_files} file(s).')
if parsed_result.err_breakdown:
logger.info(f'Error breaking down: {parsed_result.err_breakdown}.') |
def tf32_on_and_off(tf32_precision=1e-05):
def with_tf32_disabled(self, function_call):
with tf32_off():
function_call()
def with_tf32_enabled(self, function_call):
with tf32_on(self, tf32_precision):
function_call()
def wrapper(f):
params = inspect.signature(f).parameters
arg_names = tuple(params.keys())
(f)
def wrapped(*args, **kwargs):
for (k, v) in zip(arg_names, args):
kwargs[k] = v
cond = tf32_is_not_fp32()
if ('device' in kwargs):
cond = (cond and (torch.device(kwargs['device']).type == 'cuda'))
if ('dtype' in kwargs):
cond = (cond and (kwargs['dtype'] in {torch.float32, torch.complex64}))
if cond:
with_tf32_disabled(kwargs['self'], (lambda : f(**kwargs)))
with_tf32_enabled(kwargs['self'], (lambda : f(**kwargs)))
else:
f(**kwargs)
return wrapped
return wrapper |
def scalar_imp_level2_train(listener=False):
data = [('blue', (240.0, 100.0, 100.0)), ('blue', (170.0, 100.0, 70.0)), ('green', (170.0, 100.0, 70.0)), ('green', (80.0, 100.0, 100.0)), ('yellow', (80.0, 100.0, 100.0))]
return pairs_to_insts(data, listener=listener) |
def tfidf_from_questions(names, args, dictionary, dataroot='data', target=['rad']):
inds = [[], []]
df = dict()
N = len(dictionary)
if args.use_RAD:
dataroot = args.RAD_dir
def populate(inds, df, text):
tokens = dictionary.tokenize(text, True)
for t in tokens:
df[t] = (df.get(t, 0) + 1)
combin = list(itertools.combinations(tokens, 2))
for c in combin:
if (c[0] < N):
inds[0].append(c[0])
inds[1].append(c[1])
if (c[1] < N):
inds[0].append(c[1])
inds[1].append(c[0])
if ('rad' in target):
for name in names:
assert (name in ['train', 'test'])
question_path = os.path.join(dataroot, (name + 'set.json'))
questions = json.load(open(question_path))
for question in questions:
populate(inds, df, question['question'])
vals = ([1] * len(inds[1]))
for (idx, col) in enumerate(inds[1]):
assert (df[col] >= 1), 'document frequency should be greater than zero!'
vals[col] /= df[col]
def normalize(inds, vals):
z = dict()
for (row, val) in zip(inds[0], vals):
z[row] = (z.get(row, 0) + val)
for (idx, row) in enumerate(inds[0]):
vals[idx] /= z[row]
return vals
vals = normalize(inds, vals)
tfidf = torch.sparse.FloatTensor(torch.LongTensor(inds), torch.FloatTensor(vals))
tfidf = tfidf.coalesce()
emb_dim = 300
glove_file = os.path.join(dataroot, 'glove', ('glove.6B.%dd.txt' % emb_dim))
(weights, word2emb) = utils.create_glove_embedding_init(dictionary.idx2word[N:], glove_file)
print(('tf-idf stochastic matrix (%d x %d) is generated.' % (tfidf.size(0), tfidf.size(1))))
return (tfidf, weights) |
class A004526(SloaneSequence):
def __init__(self):
SloaneSequence.__init__(self, offset=0)
def _repr_(self):
return 'The nonnegative integers repeated.'
def _eval(self, n):
return ZZ((n // 2)) |
class FreeAbelianMonoidFactory(UniqueFactory):
def create_key(self, n, names):
n = int(n)
names = normalize_names(n, names)
return (n, names)
def create_object(self, version, key):
return FreeAbelianMonoid_class(*key) |
def backup_codes(root_dir, res_dir, backup_list):
if os.path.exists(res_dir):
shutil.rmtree(res_dir)
os.makedirs(res_dir)
for name in backup_list:
shutil.copytree(os.path.join(root_dir, name), os.path.join(res_dir, name))
print('codes backup at {}'.format(os.path.join(res_dir, name))) |
class GradChecker():
def __init__(self, loss, to_check):
self.to_check = to_check
self.loss = loss
self.eps_range = (2.0 ** np.arange((- 3), (- 30), (- 2)).astype(np.float64))
self.result = ([None] * len(to_check))
self.all_fields = get_all_fields()
self.backups = save_all_fields(self.all_fields)
def add_calls(self, calls):
self.calls = calls
def check_grad(self):
assert (self.loss.dtype == types.f64), 'Only f64 is supported when checking grad.'
def x_pos(x: template(), tangent_np: ndarray(), eps: types.f64):
for I in impl.grouped(x):
x[I] += (eps * tangent_np[I])
def x_neg(x: template(), tangent_np: ndarray(), eps: types.f64):
for I in impl.grouped(x):
x[I] -= (eps * tangent_np[I])
for (i, x) in enumerate(self.to_check):
if (x is self.loss):
self.result[i] = True
continue
check_pass = False
re_range = []
for eps in self.eps_range:
tangent_np = np.array(np.random.rand(*x.shape)).astype(np.float64)
restore_all_fields(self.all_fields, self.backups)
x_pos(x, tangent_np, eps)
for (func, args) in self.calls:
func(*args)
loss_pos = self.loss.to_numpy()
restore_all_fields(self.all_fields, self.backups)
x_neg(x, tangent_np, eps)
for (func, args) in self.calls:
func(*args)
loss_neg = self.loss.to_numpy()
ip_numerical = (((loss_pos - loss_neg) * 0.5) / eps)
x_grad_np = x.grad.to_numpy()
extra_dim = (x_grad_np.ndim - tangent_np.ndim)
if (extra_dim == 1):
tangent_np = np.expand_dims(tangent_np, axis=(- 1))
if (extra_dim == 2):
tangent_np = np.expand_dims(tangent_np, axis=(- 1))
ip_autodiff = np.sum((x_grad_np * tangent_np))
err = abs((ip_autodiff - ip_numerical))
if (ip_numerical != 0):
re = (err / abs(ip_autodiff))
else:
re = (err / (abs(ip_autodiff) + 1e-20))
re_range.append(re)
if ((err * 100) <= abs(ip_autodiff)):
check_pass = True
break
self.result[i] = check_pass
if (not check_pass):
print('variable', i, 'has relative error', min(re_range), ', expected relative error 0.01')
else:
print('variable', i, 'passes grad check')
assert all(self.result), 'Grad check failed: Not all variables pass grad check'
restore_all_fields(self.all_fields, self.backups)
for (func, args) in self.calls:
func(*args) |
def show_all_variables():
total_count = 0
for (idx, op) in enumerate(tf.trainable_variables()):
shape = op.get_shape()
count = np.prod(shape)
print(('[%2d] %s %s = %s' % (idx, op.name, shape, count)))
total_count += int(count)
print(('[Total] variable size: %s' % '{:,}'.format(total_count))) |
def flip(prob=0.5):
if (random.random() > prob):
return (lambda x: x)
return (lambda img: img.transpose(Image.FLIP_LEFT_RIGHT)) |
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
super(NLayerDiscriminator, self).__init__()
if (not use_actnorm):
norm_layer = nn.BatchNorm2d
else:
norm_layer = ActNorm
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func != nn.BatchNorm2d)
else:
use_bias = (norm_layer != nn.BatchNorm2d)
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min((2 ** n), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=2, padding=padw, bias=use_bias), norm_layer((ndf * nf_mult)), nn.LeakyReLU(0.2, True)]
nf_mult_prev = nf_mult
nf_mult = min((2 ** n_layers), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=1, padding=padw, bias=use_bias), norm_layer((ndf * nf_mult)), nn.LeakyReLU(0.2, True)]
sequence += [nn.Conv2d((ndf * nf_mult), 1, kernel_size=kw, stride=1, padding=padw)]
self.main = nn.Sequential(*sequence)
def forward(self, input):
return nn.Sigmoid()(self.main(input)) |
class ExtPowerDualFreeModule(FiniteRankFreeModule_abstract):
Element = FreeModuleAltForm
def __init__(self, fmodule, degree, name=None, latex_name=None):
from sage.arith.misc import binomial
from sage.typeset.unicode_characters import unicode_bigwedge
self._fmodule = fmodule
self._degree = ZZ(degree)
rank = binomial(fmodule._rank, degree)
if ((name is None) and (fmodule._name is not None)):
name = (((unicode_bigwedge + '^{}('.format(degree)) + fmodule._name) + '*)')
if ((latex_name is None) and (fmodule._latex_name is not None)):
latex_name = (((('\\Lambda^{' + str(degree)) + '}\\left(') + fmodule._latex_name) + '^*\\right)')
super().__init__(fmodule._ring, rank, name=name, latex_name=latex_name)
fmodule._all_modules.add(self)
def construction(self):
return None
def _element_constructor_(self, comp=[], basis=None, name=None, latex_name=None):
if (isinstance(comp, (int, Integer)) and (comp == 0)):
return self.zero()
if isinstance(comp, FreeModuleTensor):
tensor = comp
if ((tensor.tensor_type() == (0, 1)) and (self._degree == 1) and (tensor.base_module() is self._fmodule)):
resu = self.element_class(self._fmodule, 1, name=tensor._name, latex_name=tensor._latex_name)
for (basis, comp) in tensor._components.items():
resu._components[basis] = comp.copy()
return resu
else:
raise TypeError(('cannot coerce the {} '.format(tensor) + 'to an element of {}'.format(self)))
resu = self.element_class(self._fmodule, self._degree, name=name, latex_name=latex_name)
if comp:
resu.set_comp(basis)[:] = comp
return resu
def _an_element_(self):
resu = self.element_class(self._fmodule, self._degree)
self._fmodule.an_element()
sindex = self._fmodule._sindex
ind = [(sindex + i) for i in range(resu._tensor_rank)]
resu.set_comp()[ind] = self._fmodule._ring.an_element()
return resu
_method
def zero(self):
resu = self._element_constructor_(name='zero', latex_name='0')
for basis in self._fmodule._known_bases:
resu._components[basis] = resu._new_comp(basis)
resu._is_zero = True
resu.set_immutable()
return resu
def _repr_(self):
description = '{}'.format(self._degree.ordinal_str())
description += ' exterior power of the dual of the {}'.format(self._fmodule)
return description
def base_module(self):
return self._fmodule
def degree(self):
return self._degree |
def apply_nan_suppression(updates, print_mode='all'):
new_updates = OrderedDict([])
for (shared_variable, new_expression) in updates.iteritems():
isnan = (T.isnan(new_expression).any() | T.isinf(new_expression).any())
warning_msg = 'Warning: non-finite update suppressed for %s'
if (print_mode == 'all'):
suppressed = T.zeros_like(Print(((warning_msg + ':') % shared_variable.name))(new_expression))
elif (print_mode == 'shape'):
suppressed = T.zeros_like(Print(((warning_msg + ':') % shared_variable.name), attrs=('shape',))(new_expression))
elif ((print_mode == 'none') or (print_mode is None)):
suppressed = T.zeros_like(new_expression)
else:
raise ValueError("print_mode must be one of 'all', 'shape', or 'none'")
new_updates[shared_variable] = (shared_variable + ifelse(isnan, suppressed, (new_expression - shared_variable)))
return new_updates |
class Model():
def __init__(self, inputs, gt, alpha):
self.num_coarse = 1024
self.grid_size = 4
self.num_fine = ((self.grid_size ** 2) * self.num_coarse)
self.features = self.create_encoder(inputs)
(self.coarse, self.fine) = self.create_decoder(self.features)
(self.loss, self.update) = self.create_loss(self.coarse, self.fine, gt, alpha)
self.outputs = self.fine
self.visualize_ops = [inputs[0], self.coarse[0], self.fine[0], gt[0]]
self.visualize_titles = ['input', 'coarse output', 'fine output', 'ground truth']
def create_encoder(self, inputs):
with tf.variable_scope('encoder_0', reuse=tf.AUTO_REUSE):
features = mlp_conv(inputs, [128, 256])
features_global = tf.reduce_max(features, axis=1, keep_dims=True, name='maxpool_0')
features = tf.concat([features, tf.tile(features_global, [1, tf.shape(inputs)[1], 1])], axis=2)
with tf.variable_scope('encoder_1', reuse=tf.AUTO_REUSE):
features = mlp_conv(features, [512, 1024])
features = tf.reduce_max(features, axis=1, name='maxpool_1')
return features
def create_decoder(self, features):
with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
coarse = mlp(features, [1024, 1024, (self.num_coarse * 3)])
coarse = tf.reshape(coarse, [(- 1), self.num_coarse, 3])
with tf.variable_scope('folding', reuse=tf.AUTO_REUSE):
grid = tf.meshgrid(tf.linspace((- 0.05), 0.05, self.grid_size), tf.linspace((- 0.05), 0.05, self.grid_size))
grid = tf.expand_dims(tf.reshape(tf.stack(grid, axis=2), [(- 1), 2]), 0)
grid_feat = tf.tile(grid, [features.shape[0], self.num_coarse, 1])
point_feat = tf.tile(tf.expand_dims(coarse, 2), [1, 1, (self.grid_size ** 2), 1])
point_feat = tf.reshape(point_feat, [(- 1), self.num_fine, 3])
global_feat = tf.tile(tf.expand_dims(features, 1), [1, self.num_fine, 1])
feat = tf.concat([grid_feat, point_feat, global_feat], axis=2)
center = tf.tile(tf.expand_dims(coarse, 2), [1, 1, (self.grid_size ** 2), 1])
center = tf.reshape(center, [(- 1), self.num_fine, 3])
fine = (mlp_conv(feat, [512, 512, 3]) + center)
return (coarse, fine)
def create_loss(self, coarse, fine, gt, alpha):
loss_coarse = chamfer(coarse, gt)
add_train_summary('train/coarse_loss', loss_coarse)
update_coarse = add_valid_summary('valid/coarse_loss', loss_coarse)
loss_fine = chamfer(fine, gt)
add_train_summary('train/fine_loss', loss_fine)
update_fine = add_valid_summary('valid/fine_loss', loss_fine)
loss = (loss_coarse + (alpha * loss_fine))
add_train_summary('train/loss', loss)
update_loss = add_valid_summary('valid/loss', loss)
return (loss, [update_coarse, update_fine, update_loss]) |
class DenoisingDataset(FairseqDataset):
def __init__(self, dataset, sizes, vocab, mask_idx, mask_whole_words, shuffle, seed, args):
self.dataset = dataset
self.sizes = sizes
self.vocab = vocab
self.shuffle = shuffle
self.seed = seed
self.mask_idx = mask_idx
self.mask_whole_word = mask_whole_words
self.mask_ratio = args.mask
self.random_ratio = args.mask_random
self.insert_ratio = args.insert
self.rotate_ratio = args.rotate
self.permute_sentence_ratio = args.permute_sentences
if (args.bpe != 'gpt2'):
self.full_stop_index = self.vocab.index('.')
else:
assert (args.bpe == 'gpt2')
self.full_stop_index = self.vocab.index('13')
self.replace_length = args.replace_length
if (not (self.replace_length in [(- 1), 0, 1])):
raise f'invalid arg: replace_length={self.replace_length}'
if (not (args.mask_length in ['subword', 'word', 'span-poisson'])):
raise f'invalid arg: mask-length={args.mask_length}'
if ((args.mask_length == 'subword') and (not (args.replace_length in [0, 1]))):
raise f'if using subwords, use replace-length=1 or 0'
self.mask_span_distribution = None
if (args.mask_length == 'span-poisson'):
_lambda = args.poisson_lambda
lambda_to_the_k = 1
e_to_the_minus_lambda = math.exp((- _lambda))
k_factorial = 1
ps = []
for k in range(0, 128):
ps.append(((e_to_the_minus_lambda * lambda_to_the_k) / k_factorial))
lambda_to_the_k *= _lambda
k_factorial *= (k + 1)
if (ps[(- 1)] < 1e-07):
break
ps = torch.FloatTensor(ps)
self.mask_span_distribution = torch.distributions.Categorical(ps)
self.epoch = 0
def set_epoch(self, epoch, **unused):
self.epoch = epoch
def __getitem__(self, index):
with data_utils.numpy_seed(self.seed, self.epoch, index):
tokens = self.dataset[index]
assert (tokens[(- 1)] == self.vocab.eos())
(source, target) = (tokens, tokens.clone())
if (self.permute_sentence_ratio > 0.0):
source = self.permute_sentences(source, self.permute_sentence_ratio)
if (self.mask_ratio > 0):
source = self.add_whole_word_mask(source, self.mask_ratio)
if (self.insert_ratio > 0):
source = self.add_insertion_noise(source, self.insert_ratio)
if ((self.rotate_ratio > 0.0) and (np.random.random() < self.rotate_ratio)):
source = self.add_rolling_noise(source)
assert (source >= 0).all()
assert (source[1:(- 1)] >= 1).all()
assert (source <= len(self.vocab)).all()
assert (source[0] == self.vocab.bos())
assert (source[(- 1)] == self.vocab.eos())
return {'id': index, 'source': source, 'target': target}
def __len__(self):
return len(self.dataset)
def permute_sentences(self, source, p=1.0):
full_stops = (source == self.full_stop_index)
full_stops[(- 2)] = 1
sentence_ends = ((full_stops[1:] * (~ full_stops[:(- 1)])).nonzero() + 2)
result = source.clone()
num_sentences = sentence_ends.size(0)
num_to_permute = math.ceil((((num_sentences * 2) * p) / 2.0))
substitutions = torch.randperm(num_sentences)[:num_to_permute]
ordering = torch.arange(0, num_sentences)
ordering[substitutions] = substitutions[torch.randperm(num_to_permute)]
index = 1
for i in ordering:
sentence = source[(sentence_ends[(i - 1)] if (i > 0) else 1):sentence_ends[i]]
result[index:(index + sentence.size(0))] = sentence
index += sentence.size(0)
return result
def word_starts(self, source):
if (self.mask_whole_word is not None):
is_word_start = self.mask_whole_word.gather(0, source)
else:
is_word_start = torch.ones(source.size())
is_word_start[0] = 0
is_word_start[(- 1)] = 0
return is_word_start
def add_whole_word_mask(self, source, p):
is_word_start = self.word_starts(source)
num_to_mask = int(math.ceil((is_word_start.float().sum() * p)))
num_inserts = 0
if (num_to_mask == 0):
return source
if (self.mask_span_distribution is not None):
lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,))
cum_length = torch.cumsum(lengths, 0)
while (cum_length[(- 1)] < num_to_mask):
lengths = torch.cat([lengths, self.mask_span_distribution.sample(sample_shape=(num_to_mask,))], dim=0)
cum_length = torch.cumsum(lengths, 0)
i = 0
while (cum_length[i] < num_to_mask):
i += 1
lengths[i] = (num_to_mask - (0 if (i == 0) else cum_length[(i - 1)]))
num_to_mask = (i + 1)
lengths = lengths[:num_to_mask]
lengths = lengths[(lengths > 0)]
num_inserts = (num_to_mask - lengths.size(0))
num_to_mask -= num_inserts
if (num_to_mask == 0):
return self.add_insertion_noise(source, (num_inserts / source.size(0)))
assert (lengths > 0).all()
else:
lengths = torch.ones((num_to_mask,)).long()
assert (is_word_start[(- 1)] == 0)
word_starts = is_word_start.nonzero()
indices = word_starts[torch.randperm(word_starts.size(0))[:num_to_mask]].squeeze(1)
mask_random = (torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio)
source_length = source.size(0)
assert ((source_length - 1) not in indices)
to_keep = torch.ones(source_length, dtype=torch.bool)
is_word_start[(- 1)] = 255
if (self.replace_length == 0):
to_keep[indices] = 0
else:
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),))
if (self.mask_span_distribution is not None):
assert (len(lengths.size()) == 1)
assert (lengths.size() == indices.size())
lengths -= 1
while (indices.size(0) > 0):
assert (lengths.size() == indices.size())
lengths -= is_word_start[(indices + 1)].long()
uncompleted = (lengths >= 0)
indices = (indices[uncompleted] + 1)
mask_random = mask_random[uncompleted]
lengths = lengths[uncompleted]
if (self.replace_length != (- 1)):
to_keep[indices] = 0
else:
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),))
else:
while (indices.size(0) > 0):
uncompleted = (is_word_start[(indices + 1)] == 0)
indices = (indices[uncompleted] + 1)
mask_random = mask_random[uncompleted]
if (self.replace_length != (- 1)):
to_keep[indices] = 0
else:
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),))
assert ((source_length - 1) not in indices)
source = source[to_keep]
if (num_inserts > 0):
source = self.add_insertion_noise(source, (num_inserts / source.size(0)))
return source
def add_permuted_noise(self, tokens, p):
num_words = len(tokens)
num_to_permute = math.ceil((((num_words * 2) * p) / 2.0))
substitutions = (torch.randperm((num_words - 2))[:num_to_permute] + 1)
tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]]
return tokens
def add_rolling_noise(self, tokens):
offset = np.random.randint(1, (max(1, (tokens.size((- 1)) - 1)) + 1))
tokens = torch.cat((tokens[0:1], tokens[offset:(- 1)], tokens[1:offset], tokens[(- 1):]), dim=0)
return tokens
def add_insertion_noise(self, tokens, p):
if (p == 0.0):
return tokens
num_tokens = len(tokens)
n = int(math.ceil((num_tokens * p)))
noise_indices = (torch.randperm(((num_tokens + n) - 2))[:n] + 1)
noise_mask = torch.zeros(size=((num_tokens + n),), dtype=torch.bool)
noise_mask[noise_indices] = 1
result = torch.LongTensor((n + len(tokens))).fill_((- 1))
num_random = int(math.ceil((n * self.random_ratio)))
result[noise_indices[num_random:]] = self.mask_idx
result[noise_indices[:num_random]] = torch.randint(low=1, high=len(self.vocab), size=(num_random,))
result[(~ noise_mask)] = tokens
assert (result >= 0).all()
return result
def collater(self, samples):
return collate(samples, self.vocab.pad(), self.vocab.eos(), self.vocab)
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def ordered_indices(self):
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
return indices[np.argsort(self.sizes[indices], kind='mergesort')]
def prefetch(self, indices):
self.src.prefetch(indices)
self.tgt.prefetch(indices)
def supports_prefetch(self):
return (hasattr(self.src, 'supports_prefetch') and self.src.supports_prefetch and hasattr(self.tgt, 'supports_prefetch') and self.tgt.supports_prefetch) |
.parametrize('flatlist_as_rvec', [False, True])
def test_ListArray_NumpyArray(flatlist_as_rvec):
v2a = ak.contents.listarray.ListArray(ak.index.Index(np.array([4, 100, 1], np.int64)), ak.index.Index(np.array([7, 100, 3, 200], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([6.6, 4.4, 5.5, 7.7, 1.1, 2.2, 3.3, 8.8])))
layout = v2a
generator = ak._connect.cling.togenerator(layout.form, flatlist_as_rvec=flatlist_as_rvec)
lookup = ak._lookup.Lookup(layout, generator)
generator.generate(compiler)
ROOT.gInterpreter.Declare(f'''
void roottest_ListArray_NumpyArray_v2a_{flatlist_as_rvec}(double* out, ssize_t length, ssize_t* ptrs) {{
auto obj = {generator.dataset()};
out[0] = obj.size();
out[1] = obj[0].size();
out[2] = obj[0][0];
out[3] = obj[0][1];
out[4] = obj[0][2];
out[5] = obj[1].size();
out[6] = obj[2].size();
out[7] = obj[2][0];
out[8] = obj[2][1];
}}
''')
out = np.zeros(9, dtype=np.float64)
getattr(ROOT, f'roottest_ListArray_NumpyArray_v2a_{flatlist_as_rvec}')(out, len(layout), lookup.arrayptrs)
assert (out.tolist() == [3.0, 3.0, 1.1, 2.2, 3.3, 0.0, 2.0, 4.4, 5.5]) |
def get_bridge_entities(sample):
cell_with_links = []
for (r_ind, row) in enumerate(sample['table']['data']):
for (c_ind, col) in enumerate(row):
for (e_ind, link) in enumerate(col[1]):
if ((link is not None) and (link in sample['text'])):
bridge_entity = {'loc': [r_ind, c_ind, e_ind], 'name': col[0], 'url': link}
cell_with_links.append(bridge_entity)
return cell_with_links |
class ParamsLog(Callback):
def __init__(self, total_params_log: bool=True, trainable_params_log: bool=True, non_trainable_params_log: bool=True):
super().__init__()
self._log_stats = AttributeDict({'total_params_log': total_params_log, 'trainable_params_log': trainable_params_log, 'non_trainable_params_log': non_trainable_params_log})
_zero_only
def on_fit_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
logs = {}
if self._log_stats.total_params_log:
logs['model/params_total'] = sum((p.numel() for p in pl_module.parameters()))
if self._log_stats.trainable_params_log:
logs['model/params_trainable'] = sum((p.numel() for p in pl_module.parameters() if p.requires_grad))
if self._log_stats.non_trainable_params_log:
logs['model/params_not_trainable'] = sum((p.numel() for p in pl_module.parameters() if (not p.requires_grad)))
if (trainer.logger is not None):
trainer.logger.log_hyperparams(logs) |
class LoadImage():
def __call__(self, results):
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results |
def register_pascal_person_part_parsing(root):
root = os.path.join(root, 'pascal-person-part')
meta = _get_pascal_person_part_parsing_meta()
for (name, (image_root, category_gt_root, instance_gt_root, human_gt_root)) in _PREDEFINED_SPLITS.items():
image_root = os.path.join(root, image_root)
category_gt_root = os.path.join(root, category_gt_root)
instance_gt_root = os.path.join(root, instance_gt_root)
human_gt_root = os.path.join(root, human_gt_root)
DatasetCatalog.register(name, (lambda k=image_root, l=category_gt_root, m=instance_gt_root, n=human_gt_root: load_parsing(l, m, n, k, gt_ext='png', image_ext='jpg')))
MetadataCatalog.get(name).set(image_root=image_root, category_gt_root=category_gt_root, instance_gt_root=instance_gt_root, human_gt_root=human_gt_root, evaluator_type='parsing', **meta) |
def run_conv_selection(module, x):
tag_conv(module, x)
def _dfs_traverse(module):
for submodule in module.children():
if (isinstance(submodule, torch.nn.Conv2d) and hasattr(submodule, 'input')):
selected_conv = select_conv(submodule, submodule.input)
submodule.forward = types.MethodType(selected_conv, submodule)
else:
_dfs_traverse(submodule)
_dfs_traverse(module) |
class HighResolutionNet(nn.Module):
def __init__(self, config, **kwargs):
extra = config.MODEL.EXTRA
super(HighResolutionNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = extra['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = (block.expansion * num_channels)
self.stage2_cfg = extra['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels)
(self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = extra['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = extra['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True)
last_inp_channels = np.int(np.sum(pre_stage_channels))
self.last_layer1 = counter_decoder(48, [48, 32, 16], config.DATASET.NUM_CLASSES)
self.last_layer2 = counter_decoder(96, [48, 32, 16], config.DATASET.NUM_CLASSES)
self.last_layer3 = counter_decoder(192, [96, 48, 16], config.DATASET.NUM_CLASSES)
self.last_layer4 = counter_decoder(384, [192, 96, 16], config.DATASET.NUM_CLASSES)
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if (i < num_branches_pre):
if (num_channels_cur_layer[i] != num_channels_pre_layer[i]):
transition_layers.append(nn.Sequential(nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), BatchNorm2d(num_channels_cur_layer[i], momentum=BN_MOMENTUM), nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(((i + 1) - num_branches_pre)):
inchannels = num_channels_pre_layer[(- 1)]
outchannels = (num_channels_cur_layer[i] if (j == (i - num_branches_pre)) else inchannels)
conv3x3s.append(nn.Sequential(nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), BatchNorm2d(outchannels, momentum=BN_MOMENTUM), nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm2d((planes * block.expansion), momentum=BN_MOMENTUM))
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
if ((not multi_scale_output) and (i == (num_modules - 1))):
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(HighResolutionModule(num_branches, block, num_blocks, num_inchannels, num_channels, fuse_method, reset_multi_scale_output))
num_inchannels = modules[(- 1)].get_num_inchannels()
return (nn.Sequential(*modules), num_inchannels)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if (self.transition1[i] is not None):
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if (self.transition2[i] is not None):
x_list.append(self.transition2[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if (self.transition3[i] is not None):
x_list.append(self.transition3[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
(x0_h, x0_w) = (x[0].size(2), x[0].size(3))
x1 = self.last_layer1(x[0])
x2 = self.last_layer2(x[1])
x3 = self.last_layer3(x[2])
x4 = self.last_layer4(x[3])
return [x1, x2, x3, x4]
def init_weights(self, pretrained=''):
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(pretrained))
model_dict = self.state_dict()
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict.keys())}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict) |
def get_initializer(matrix):
def _initializer(shape, dtype=None, partition_info=None, **kwargs):
return matrix
return _initializer |
class Solver():
def __init__(self, n_tasks):
super().__init__()
self.n_tasks = n_tasks
def get_weighted_loss(self, losses, ray, parameters=None, **kwargs):
pass
def __call__(self, losses, ray, parameters, **kwargs):
return self.get_weighted_loss(losses, ray, parameters, **kwargs) |
_task('multimodal_classification')
class MultimodalClassificationTask(BaseTask):
def __init__(self):
super().__init__()
def valid_step(self, model, samples):
results = []
outputs = model.predict(samples)
predictions = outputs['predictions']
targets = outputs['targets']
predictions = predictions.max(1)[1].cpu().numpy()
targets = targets.cpu().numpy()
indices = samples[self.inst_id_key]
for (pred, tgt, index) in zip(predictions, targets, indices):
if isinstance(index, torch.Tensor):
index = index.item()
results.append({self.inst_id_key: index, 'prediction': pred.item(), 'target': tgt.item()})
return results
def after_evaluation(self, val_result, split_name, epoch, **kwargs):
eval_result_file = self.save_result(result=val_result, result_dir=registry.get_path('result_dir'), filename='{}_epoch{}'.format(split_name, epoch), remove_duplicate=self.inst_id_key)
metrics = self._report_metrics(eval_result_file=eval_result_file, split_name=split_name)
return metrics
_process
def _report_metrics(self, eval_result_file, split_name):
results = json.load(open(eval_result_file))
predictions = np.array([res['prediction'] for res in results])
targets = np.array([res['target'] for res in results])
accuracy = ((targets == predictions).sum() / targets.shape[0])
metrics = {'agg_metrics': accuracy, 'acc': accuracy}
log_stats = {split_name: {k: v for (k, v) in metrics.items()}}
with open(os.path.join(registry.get_path('output_dir'), 'evaluate.txt'), 'a') as f:
f.write((json.dumps(log_stats) + '\n'))
logging.info(metrics)
return metrics |
def get_job_throughputs(jobs, oracle_throughputs, worker_types):
throughputs = {}
for (i, job) in enumerate(jobs):
throughputs[job.job_id] = {}
for worker_type in worker_types:
job_type_key = (job.job_type, job.scale_factor)
throughputs[job.job_id][worker_type] = oracle_throughputs[worker_type][job_type_key]['null']
for (j, other_job) in enumerate(jobs[(i + 1):]):
merged_job_id = JobIdPair(job.job_id[0], other_job.job_id[0])
if (other_job.scale_factor != job.scale_factor):
continue
throughputs[merged_job_id] = {}
other_job_type_key = (other_job.job_type, other_job.scale_factor)
for worker_type in worker_types:
throughputs[merged_job_id][worker_type] = oracle_throughputs[worker_type][job_type_key][other_job_type_key]
return throughputs |
class SustainDownManager():
def __init__(self, start, end):
self.start = start
self.end = end
self.managed_notes = []
self._note_dict = {}
def add_managed_note(self, note: pretty_midi.Note):
self.managed_notes.append(note)
def transposition_notes(self):
for note in reversed(self.managed_notes):
try:
note.end = self._note_dict[note.pitch]
except KeyError:
note.end = max(self.end, note.end)
self._note_dict[note.pitch] = note.start |
def evaluate_model(trained_model, data_loader):
net = CrowdCounter()
network.load_net(trained_model, net)
net.cuda()
net.eval()
mae = 0.0
mse = 0.0
for blob in data_loader:
im_data = blob['data']
gt_data = blob['gt_density']
density_map = net(im_data, gt_data)
density_map = density_map.data.cpu().numpy()
gt_count = np.sum(gt_data)
et_count = np.sum(density_map)
mae += abs((gt_count - et_count))
mse += ((gt_count - et_count) * (gt_count - et_count))
mae = (mae / data_loader.get_num_samples())
mse = np.sqrt((mse / data_loader.get_num_samples()))
return (mae, mse) |
class StateNameMixin():
def store_state_names(self, variables, cardinality, state_names):
if state_names:
for (key, value) in state_names.items():
if (not isinstance(value, (list, tuple))):
raise ValueError('The state names must be for the form: {variable: list_of_states}')
elif (not (len(set(value)) == len(value))):
raise ValueError(f'Repeated statenames for variable: {key}')
self.state_names = state_names.copy()
if state_names:
self.name_to_no = {}
self.no_to_name = {}
for (key, values) in self.state_names.items():
self.name_to_no[key] = {name: no for (no, name) in enumerate(self.state_names[key])}
self.no_to_name[key] = {no: name for (no, name) in enumerate(self.state_names[key])}
else:
self.state_names = {var: list(range(int(cardinality[index]))) for (index, var) in enumerate(variables)}
self.name_to_no = {var: {i: i for i in range(int(cardinality[index]))} for (index, var) in enumerate(variables)}
self.no_to_name = self.name_to_no.copy()
def get_state_names(self, var, state_no):
if self.state_names:
return self.no_to_name[var][state_no]
else:
return state_no
def get_state_no(self, var, state_name):
if self.state_names:
return self.name_to_no[var][state_name]
else:
return state_name
def add_state_names(self, phi1):
self.state_names.update(phi1.state_names)
self.name_to_no.update(phi1.name_to_no)
self.no_to_name.update(phi1.no_to_name)
def del_state_names(self, var_list):
for var in var_list:
del self.state_names[var]
del self.name_to_no[var]
del self.no_to_name[var] |
def load_ply(path):
f = open(path, 'r')
n_pts = 0
n_faces = 0
face_n_corners = 3
pt_props = []
face_props = []
is_binary = False
header_vertex_section = False
header_face_section = False
while True:
line = f.readline().rstrip('\n').rstrip('\r')
if line.startswith('element vertex'):
n_pts = int(line.split()[(- 1)])
header_vertex_section = True
header_face_section = False
elif line.startswith('element face'):
n_faces = int(line.split()[(- 1)])
header_vertex_section = False
header_face_section = True
elif line.startswith('element'):
header_vertex_section = False
header_face_section = False
elif (line.startswith('property') and header_vertex_section):
pt_props.append((line.split()[(- 1)], line.split()[(- 2)]))
elif (line.startswith('property list') and header_face_section):
elems = line.split()
if (elems[(- 1)] == 'vertex_indices'):
face_props.append(('n_corners', elems[2]))
for i in range(face_n_corners):
face_props.append((('ind_' + str(i)), elems[3]))
else:
print(('Warning: Not supported face property: ' + elems[(- 1)]))
elif line.startswith('format'):
if ('binary' in line):
is_binary = True
elif line.startswith('end_header'):
break
model = {}
model['pts'] = np.zeros((n_pts, 3), np.float)
if (n_faces > 0):
model['faces'] = np.zeros((n_faces, face_n_corners), np.float)
pt_props_names = [p[0] for p in pt_props]
is_normal = False
if {'nx', 'ny', 'nz'}.issubset(set(pt_props_names)):
is_normal = True
model['normals'] = np.zeros((n_pts, 3), np.float)
is_color = False
if {'red', 'green', 'blue'}.issubset(set(pt_props_names)):
is_color = True
model['colors'] = np.zeros((n_pts, 3), np.float)
is_texture = False
if {'texture_u', 'texture_v'}.issubset(set(pt_props_names)):
is_texture = True
model['texture_uv'] = np.zeros((n_pts, 2), np.float)
formats = {'float': ('f', 4), 'double': ('d', 8), 'int': ('i', 4), 'uchar': ('B', 1)}
for pt_id in range(n_pts):
prop_vals = {}
load_props = ['x', 'y', 'z', 'nx', 'ny', 'nz', 'red', 'green', 'blue', 'texture_u', 'texture_v']
if is_binary:
for prop in pt_props:
format = formats[prop[1]]
val = struct.unpack(format[0], f.read(format[1]))[0]
if (prop[0] in load_props):
prop_vals[prop[0]] = val
else:
elems = f.readline().rstrip('\n').rstrip('\r').split()
for (prop_id, prop) in enumerate(pt_props):
if (prop[0] in load_props):
prop_vals[prop[0]] = elems[prop_id]
model['pts'][(pt_id, 0)] = float(prop_vals['x'])
model['pts'][(pt_id, 1)] = float(prop_vals['y'])
model['pts'][(pt_id, 2)] = float(prop_vals['z'])
if is_normal:
model['normals'][(pt_id, 0)] = float(prop_vals['nx'])
model['normals'][(pt_id, 1)] = float(prop_vals['ny'])
model['normals'][(pt_id, 2)] = float(prop_vals['nz'])
if is_color:
model['colors'][(pt_id, 0)] = float(prop_vals['red'])
model['colors'][(pt_id, 1)] = float(prop_vals['green'])
model['colors'][(pt_id, 2)] = float(prop_vals['blue'])
if is_texture:
model['texture_uv'][(pt_id, 0)] = float(prop_vals['texture_u'])
model['texture_uv'][(pt_id, 1)] = float(prop_vals['texture_v'])
for face_id in range(n_faces):
prop_vals = {}
if is_binary:
for prop in face_props:
format = formats[prop[1]]
val = struct.unpack(format[0], f.read(format[1]))[0]
if (prop[0] == 'n_corners'):
if (val != face_n_corners):
print('Error: Only triangular faces are supported.')
print(('Number of face corners: ' + str(val)))
exit((- 1))
else:
prop_vals[prop[0]] = val
else:
elems = f.readline().rstrip('\n').rstrip('\r').split()
for (prop_id, prop) in enumerate(face_props):
if (prop[0] == 'n_corners'):
if (int(elems[prop_id]) != face_n_corners):
print('Error: Only triangular faces are supported.')
print(('Number of face corners: ' + str(int(elems[prop_id]))))
exit((- 1))
else:
prop_vals[prop[0]] = elems[prop_id]
model['faces'][(face_id, 0)] = int(prop_vals['ind_0'])
model['faces'][(face_id, 1)] = int(prop_vals['ind_1'])
model['faces'][(face_id, 2)] = int(prop_vals['ind_2'])
f.close()
return model |
class RoundRobinZipDatasets(FairseqDataset):
def __init__(self, datasets, eval_key=None):
super().__init__()
if isinstance(datasets, dict):
datasets = OrderedDict(datasets)
assert isinstance(datasets, OrderedDict)
assert datasets, "Can't make a RoundRobinZipDatasets out of nothing"
for dataset in datasets.values():
assert isinstance(dataset, FairseqDataset)
self.datasets = datasets
self.eval_key = eval_key
self.longest_dataset_key = max(datasets, key=(lambda k: len(datasets[k])))
self.longest_dataset = datasets[self.longest_dataset_key]
self._ordered_indices: Dict[(str, Sequence[int])] = None
def _map_index(self, key, index):
assert (self._ordered_indices is not None), 'Must call RoundRobinZipDatasets.ordered_indices() first'
o = self._ordered_indices[key]
return o[(index % len(o))]
def __getitem__(self, index):
if (self.eval_key is None):
return OrderedDict([(key, dataset[self._map_index(key, index)]) for (key, dataset) in self.datasets.items()])
else:
return self.datasets[self.eval_key][self._map_index(self.eval_key, index)]
def __len__(self):
if (self._ordered_indices is not None):
return len(self._ordered_indices[self.longest_dataset_key])
return len(self.longest_dataset)
def collater(self, samples):
if (len(samples) == 0):
return None
if (self.eval_key is None):
return OrderedDict([(key, dataset.collater([sample[key] for sample in samples])) for (key, dataset) in self.datasets.items()])
else:
return self.datasets[self.eval_key].collater(samples)
def num_tokens(self, index):
return max((dataset.num_tokens(self._map_index(key, index)) for (key, dataset) in self.datasets.items()))
def size(self, index):
return {key: dataset.size(self._map_index(key, index)) for (key, dataset) in self.datasets.items()}
def ordered_indices(self):
if (self._ordered_indices is None):
self._ordered_indices = OrderedDict([(key, dataset.ordered_indices()) for (key, dataset) in self.datasets.items()])
return np.arange(len(self))
def filter_indices_by_size(self, indices, max_positions=None):
def _deep_until_language_pair(dataset):
if isinstance(dataset, LanguagePairDataset):
return dataset
if hasattr(dataset, 'tgt_dataset'):
return _deep_until_language_pair(dataset.tgt_dataset)
if hasattr(dataset, 'dataset'):
return _deep_until_language_pair(dataset.dataset)
raise Exception(f"Don't know how to unwrap this dataset: {dataset}")
if (not isinstance(max_positions, dict)):
max_positions = {k: max_positions for k in self.datasets.keys()}
ignored_some = False
for (key, dataset) in self.datasets.items():
dataset = _deep_until_language_pair(dataset)
(self._ordered_indices[key], ignored) = dataset.filter_indices_by_size(self._ordered_indices[key], max_positions[key])
if (len(ignored) > 0):
ignored_some = True
logger.warning(f'{len(ignored)} samples from {key} have invalid sizes and will be skipped, max_positions={max_positions[key]}, first few sample ids={ignored[:10]}')
return (np.arange(len(self)), ([0] if ignored_some else []))
def supports_prefetch(self):
return all((getattr(dataset, 'supports_prefetch', False) for dataset in self.datasets.values()))
def prefetch(self, indices):
for (key, dataset) in self.datasets.items():
dataset.prefetch([self._map_index(key, index) for index in indices]) |
.operations('create_user', 'get_user', 'update_user')
def test_openapi_links(cli, cli_args, schema_url, hypothesis_max_examples, snapshot_cli):
assert (cli.run(*cli_args, f'--hypothesis-max-examples={(hypothesis_max_examples or 2)}', '--hypothesis-seed=1', '--hypothesis-derandomize', '--hypothesis-deadline=None', '--show-trace') == snapshot_cli) |
def compute_total_loss(split, params, rng, config):
num_to_get = min(500, loader.get_number_of_batches(split))
total_loss = 0
total_indices = 0
for i in range(num_to_get):
batch = loader.get_batch(split, i)
total_loss += (compute_loss(femr.models.transformer.convert_params(params, jnp.float16), rng, config, batch) * batch['num_indices'])
total_indices += batch['num_indices']
return (total_loss / total_indices) |
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None, logits=None, meta: Optional[Dict]=None, idx=(- 1)):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.logits = logits
self.idx = idx
self.meta = (meta if meta else {})
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
return (json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n')
def load_examples(path: str) -> List['InputExample']:
with open(path, 'rb') as fh:
return pickle.load(fh)
def save_examples(examples: List['InputExample'], path: str) -> None:
with open(path, 'wb') as fh:
pickle.dump(examples, fh) |
_numpy_output(check_dtype=True)
def test_ufunc_copysign_ff(A: dace.float32[10], B: dace.float32[10]):
return np.copysign(A, B) |
class LRLambda(object):
def constant_lr():
return (lambda step: 1.0)
def constant_lr_with_warmup(num_warmup_steps: int):
assert (num_warmup_steps >= 1)
def lr_lambda(step: int):
if (step < num_warmup_steps):
return (step / num_warmup_steps)
else:
return 1.0
return lr_lambda
def linear_decay_lr_with_warmup(num_warmup_steps: int, num_total_steps: int):
assert (num_warmup_steps >= 1)
assert (num_total_steps >= num_warmup_steps)
def lr_lambda(step: int):
if (step < num_warmup_steps):
return (step / num_warmup_steps)
elif (step < num_total_steps):
return ((num_total_steps - step) / (num_total_steps - num_warmup_steps))
else:
return 0.0
return lr_lambda
def exponential_decay_lr_with_warmup(num_warmup_steps: int, num_period_steps: int=None, gamma: float=0.9):
if (num_period_steps is None):
num_period_steps = num_warmup_steps
assert (num_warmup_steps >= 1)
assert (num_period_steps >= 1)
assert (0 < gamma < 1)
def lr_lambda(step: int):
if (step < num_warmup_steps):
return (step / num_warmup_steps)
else:
return (gamma ** ((step - num_warmup_steps) / num_period_steps))
return lr_lambda
def power_decay_lr_with_warmup(num_warmup_steps: int, alpha: float=0.5):
assert (num_warmup_steps >= 1)
assert (0 < alpha < 1)
def lr_lambda(step: int):
if (step < num_warmup_steps):
return (step / num_warmup_steps)
else:
return ((step / num_warmup_steps) ** (- alpha))
return lr_lambda
def plot_lr_lambda(lr_lambda, num_total_steps: int):
x = numpy.arange(0, num_total_steps, (num_total_steps // 200))
y = numpy.array([lr_lambda(xi) for xi in x])
(fig, ax) = matplotlib.pyplot.subplots(figsize=(8, 3))
ax.plot(x, y)
matplotlib.pyplot.show() |
class SWS2013Testset(Dataset):
def __init__(self, split, **kwargs):
assert (split in ['dev', 'eval'])
scoring_root = Path(kwargs['sws2013_scoring_root'])
audio_names = parse_ecf(((scoring_root / f'sws2013_{split}') / 'sws2013.ecf.xml'))
query_names = parse_tlist(((scoring_root / f'sws2013_{split}') / f'sws2013_{split}.tlist.xml'))
self.dataset_root = Path(kwargs['sws2013_root'])
self.split = split
self.n_queries = len(query_names)
self.n_docs = len(audio_names)
self.data = (query_names + audio_names)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
audio_name = self.data[idx]
audio_path = (((self.dataset_root / f'{self.split}_queries') / audio_name) if (idx < self.n_queries) else ((self.dataset_root / 'Audio') / audio_name))
audio_path = audio_path.with_suffix('.wav')
(wav, _) = apply_effects_file(str(audio_path), [['channels', '1'], ['rate', '16000'], ['norm'], ['vad', '-T', '0.25', '-p', '0.1'], ['reverse'], ['vad', '-T', '0.25', '-p', '0.1'], ['reverse'], ['pad', '0', '3']])
segments = wav.squeeze(0).unfold(0, 48000, 12000).unbind(0)
return (segments, len(segments), audio_name)
def collate_fn(self, samples):
(segments, lengths, audio_names) = zip(*samples)
segments = [seg for segs in segments for seg in segs]
return (segments, (lengths, audio_names)) |
_criterion('ctc', dataclass=CtcCriterionConfig)
class CtcCriterion(FairseqCriterion):
def __init__(self, cfg: CtcCriterionConfig, task: FairseqTask):
super().__init__(task)
self.blank_idx = (task.target_dictionary.index(task.blank_symbol) if hasattr(task, 'blank_symbol') else 0)
self.pad_idx = task.target_dictionary.pad()
self.eos_idx = task.target_dictionary.eos()
self.post_process = cfg.post_process
if (cfg.wer_args is not None):
(cfg.wer_kenlm_model, cfg.wer_lexicon, cfg.wer_lm_weight, cfg.wer_word_score) = eval(cfg.wer_args)
if (cfg.wer_kenlm_model is not None):
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
dec_args = Namespace()
dec_args.nbest = 1
dec_args.criterion = 'ctc'
dec_args.kenlm_model = cfg.wer_kenlm_model
dec_args.lexicon = cfg.wer_lexicon
dec_args.beam = 50
dec_args.beam_size_token = min(50, len(task.target_dictionary))
dec_args.beam_threshold = min(50, len(task.target_dictionary))
dec_args.lm_weight = cfg.wer_lm_weight
dec_args.word_score = cfg.wer_word_score
dec_args.unk_weight = (- math.inf)
dec_args.sil_weight = 0
self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary)
else:
self.w2l_decoder = None
self.zero_infinity = cfg.zero_infinity
self.sentence_avg = cfg.sentence_avg
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
lprobs = model.get_normalized_probs(net_output, log_probs=True).contiguous()
if ('src_lengths' in sample['net_input']):
input_lengths = sample['net_input']['src_lengths']
else:
non_padding_mask = (~ net_output['padding_mask'])
input_lengths = non_padding_mask.long().sum((- 1))
pad_mask = ((sample['target'] != self.pad_idx) & (sample['target'] != self.eos_idx))
targets_flat = sample['target'].masked_select(pad_mask)
if ('target_lengths' in sample):
target_lengths = sample['target_lengths']
else:
target_lengths = pad_mask.sum((- 1))
with torch.backends.cudnn.flags(enabled=False):
loss = F.ctc_loss(lprobs, targets_flat, input_lengths, target_lengths, blank=self.blank_idx, reduction='sum', zero_infinity=self.zero_infinity)
ntokens = (sample['ntokens'] if ('ntokens' in sample) else target_lengths.sum().item())
sample_size = (sample['target'].size(0) if self.sentence_avg else ntokens)
logging_output = {'loss': utils.item(loss.data), 'ntokens': ntokens, 'nsentences': sample['id'].numel(), 'sample_size': sample_size}
if (not model.training):
import editdistance
with torch.no_grad():
lprobs_t = lprobs.transpose(0, 1).float().contiguous().cpu()
c_err = 0
c_len = 0
w_errs = 0
w_len = 0
wv_errs = 0
for (lp, t, inp_l) in zip(lprobs_t, (sample['target_label'] if ('target_label' in sample) else sample['target']), input_lengths):
lp = lp[:inp_l].unsqueeze(0)
decoded = None
if (self.w2l_decoder is not None):
decoded = self.w2l_decoder.decode(lp)
if (len(decoded) < 1):
decoded = None
else:
decoded = decoded[0]
if (len(decoded) < 1):
decoded = None
else:
decoded = decoded[0]
p = ((t != self.task.target_dictionary.pad()) & (t != self.task.target_dictionary.eos()))
targ = t[p]
targ_units = self.task.target_dictionary.string(targ)
targ_units_arr = targ.tolist()
toks = lp.argmax(dim=(- 1)).unique_consecutive()
pred_units_arr = toks[(toks != self.blank_idx)].tolist()
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
targ_words = post_process(targ_units, self.post_process).split()
pred_units = self.task.target_dictionary.string(pred_units_arr)
pred_words_raw = post_process(pred_units, self.post_process).split()
if ((decoded is not None) and ('words' in decoded)):
pred_words = decoded['words']
w_errs += editdistance.eval(pred_words, targ_words)
wv_errs += editdistance.eval(pred_words_raw, targ_words)
else:
dist = editdistance.eval(pred_words_raw, targ_words)
w_errs += dist
wv_errs += dist
w_len += len(targ_words)
logging_output['wv_errors'] = wv_errs
logging_output['w_errors'] = w_errs
logging_output['w_total'] = w_len
logging_output['c_errors'] = c_err
logging_output['c_total'] = c_len
return (loss, sample_size, logging_output)
def reduce_metrics(logging_outputs) -> None:
loss_sum = utils.item(sum((log.get('loss', 0) for log in logging_outputs)))
ntokens = utils.item(sum((log.get('ntokens', 0) for log in logging_outputs)))
nsentences = utils.item(sum((log.get('nsentences', 0) for log in logging_outputs)))
sample_size = utils.item(sum((log.get('sample_size', 0) for log in logging_outputs)))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
metrics.log_scalar('ntokens', ntokens)
metrics.log_scalar('nsentences', nsentences)
if (sample_size != ntokens):
metrics.log_scalar('nll_loss', ((loss_sum / ntokens) / math.log(2)), ntokens, round=3)
c_errors = sum((log.get('c_errors', 0) for log in logging_outputs))
metrics.log_scalar('_c_errors', c_errors)
c_total = sum((log.get('c_total', 0) for log in logging_outputs))
metrics.log_scalar('_c_total', c_total)
w_errors = sum((log.get('w_errors', 0) for log in logging_outputs))
metrics.log_scalar('_w_errors', w_errors)
wv_errors = sum((log.get('wv_errors', 0) for log in logging_outputs))
metrics.log_scalar('_wv_errors', wv_errors)
w_total = sum((log.get('w_total', 0) for log in logging_outputs))
metrics.log_scalar('_w_total', w_total)
if (c_total > 0):
metrics.log_derived('uer', (lambda meters: (safe_round(((meters['_c_errors'].sum * 100.0) / meters['_c_total'].sum), 3) if (meters['_c_total'].sum > 0) else float('nan'))))
if (w_total > 0):
metrics.log_derived('wer', (lambda meters: (safe_round(((meters['_w_errors'].sum * 100.0) / meters['_w_total'].sum), 3) if (meters['_w_total'].sum > 0) else float('nan'))))
metrics.log_derived('raw_wer', (lambda meters: (safe_round(((meters['_wv_errors'].sum * 100.0) / meters['_w_total'].sum), 3) if (meters['_w_total'].sum > 0) else float('nan'))))
def logging_outputs_can_be_summed() -> bool:
return True |
def test_save_and_load_one_entry():
dense_matrix = np.zeros((4, 6))
dense_matrix[(1, 2)] = 1
_check_save_and_load(dense_matrix) |
(scope='session')
def hdf_file_path(tmpdir_factory):
path = tmpdir_factory.mktemp('hdf_buffer').join('test.hdf')
return str(path) |
_converter_regitstry('DMA_cw_transpose')
def DMA_cw_transpose_converter(context: 'BM1688Context', reg: DMA_cw_transpose_reg):
lane_mask = ((reg.localmem_mask_h32 * (2 ** 32)) + reg.localmem_mask_l32)
(n, c, h, w) = (reg[f'src_{d}size'] for d in 'nchw')
opd0 = dict(address=dma_addr(reg.src_start_addr_h8, reg.src_start_addr_l32), dtype=DType(reg.src_data_format), shape=(n, c, h, w), stride=(*(reg[f'src_{d}stride'] for d in 'nch'), 1), layout=Layout.DMAstride(lane_mask))
res0 = dict(address=dma_addr(reg.dst_start_addr_h8, reg.dst_start_addr_l32), dtype=DType(reg.src_data_format), shape=(n, w, h, c), stride=(*(reg[f'dst_{d}stride'] for d in 'nch'), 1), layout=Layout.DMAstride(lane_mask))
attr = dict()
if (lane_mask != ((2 ** 64) - 1)):
attr['lane_mask'] = hex(lane_mask)
if reg.fill_constant_en:
attr = {}
opd0 = dict(address=reg.constant_value, dtype=DType(reg.src_data_format), is_const=True)
operands = [get_value(context, **opd0)]
results = [get_value(context, **res0)]
return (results, attr, operands) |
def example():
write_ebml_header(sys.stdout, 'matroska', 2, 2)
write_infinite_segment_header(sys.stdout)
sys.stdout.write(ebml_element(, (((('' + ebml_element(29604, random_uid())) + ebml_element(31657, 'mkvgen.py test')) + ebml_element(19840, 'mkvgen.py')) + ebml_element(22337, 'mkvgen.py'))))
sys.stdout.write(ebml_element(, (('' + ebml_element(174, (((((('' + ebml_element(215, ben(1))) + ebml_element(29637, ben(119))) + ebml_element(131, ben(1))) + ebml_element(21358, 'mjpeg data')) + ebml_element(134, 'V_MJPEG')) + ebml_element(224, (('' + ebml_element(176, ben(640))) + ebml_element(186, ben(480))))))) + ebml_element(174, ((((('' + ebml_element(215, ben(2))) + ebml_element(29637, ben(120))) + ebml_element(131, ben(2))) + ebml_element(21358, 'content of mp3 file')) + ebml_element(134, 'A_MPEG/L3'))))))
mp3file = open('q.mp3', 'rb')
mp3file.read(500000)
def mp3framesgenerator(f):
debt = ''
while True:
for i in xrange(0, (len(debt) + 1)):
if (i >= (len(debt) - 1)):
debt = (debt + f.read(8192))
break
if ((ord(debt[i]) == 255) and ((ord(debt[(i + 1)]) & 240) == 240) and (i > 700)):
if (i > 0):
(yield debt[0:i])
debt = debt[i:]
break
mp3 = mp3framesgenerator(mp3file)
mp3.next()
for i in xrange(0, 530):
framefile = open((('img/' + str(i)) + '.jpg'), 'rb')
framedata = framefile.read()
framefile.close()
if (random.random() < 1):
sys.stdout.write(ebml_element(, (('' + ebml_element(231, ben(int(((i * 26) * 4))))) + ebml_element(163, ((((('' + ebml_encode_number(1)) + chr(0)) + chr(0)) + chr(0)) + framedata)))))
for u in xrange(0, 4):
mp3f = mp3.next()
if (random.random() < 1):
sys.stdout.write(ebml_element(, (('' + ebml_element(231, ben((((i * 26) * 4) + (u * 26))))) + ebml_element(163, ((((('' + ebml_encode_number(2)) + chr(0)) + chr(0)) + chr(0)) + mp3f))))) |
def _create_data_folder(path, props):
if ('data_folder' in props):
props['name'] = (props['data_folder'] + '_regen')
data_folder = props['name']
else:
data_folder = Path(props['templates']).stem
data_folder += ('_' + datetime.now().strftime('%y%m%d-%H-%M-%S'))
props['data_folder'] = data_folder
path_with_dataset = (path / data_folder)
os.makedirs(path_with_dataset)
return path_with_dataset |
def dump_hls_lut_node5(f, name, lut, node):
n = lut.get_node_connection_size(node)
s = lut.get_lut_table_size(node)
tbl = 0
for i in range(s):
if lut.get_lut_table(node, i):
tbl += (1 << i)
f.write(('Q(%s,0x%016xLL)\n' % (make_lut_func_name(name, node), tbl))) |
def precak(sim, str_sim, k=None):
act_lists = [np.nonzero(s)[0] for s in str_sim]
pred_lists = np.argsort((- sim), axis=1)
num_cores = min(multiprocessing.cpu_count(), 8)
nq = len(act_lists)
preck = Parallel(n_jobs=num_cores)((delayed(prec)(act_lists[iq], pred_lists[iq], k) for iq in range(nq)))
reck = Parallel(n_jobs=num_cores)((delayed(rec)(act_lists[iq], pred_lists[iq], k) for iq in range(nq)))
return (np.mean(preck), reck) |
def wandb_xla_logger(config: WandbConfig):
last_mtime = ((wandb.run and wandb.run.start_time) or time.time())
def log_xla_to_wandb(step: StepInfo):
nonlocal last_mtime
save_xla_dumps_to_wandb(last_mtime)
last_mtime = time.time()
if config.save_xla_dumps:
return log_xla_to_wandb
else:
return (lambda x: None) |
_function_dispatch(_unary_op_dispatcher)
def isdecimal(a):
if (_use_unicode(a) != unicode_):
raise TypeError('isnumeric is only available for Unicode strings and arrays')
return _vec_string(a, bool_, 'isdecimal') |
def get_parser():
parser = argparse.ArgumentParser(description='DualHead-Net')
parser.add_argument('--work-dir', type=str, required=True, help='the work folder for storing results')
parser.add_argument('--model_saved_name', default='')
parser.add_argument('--config', default='./config/ntu-xview/test_bone.yaml', help='path to the configuration file')
parser.add_argument('--assume-yes', action='store_true', help='Say yes to every prompt')
parser.add_argument('--phase', default='train', help='must be train or test')
parser.add_argument('--save-score', type=str2bool, default=False, help='if ture, the classification score will be stored')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--log-interval', type=int, default=100, help='the interval for printing messages (#iteration)')
parser.add_argument('--save-interval', type=int, default=1, help='the interval for storing models (#iteration)')
parser.add_argument('--eval-interval', type=int, default=1, help='the interval for evaluating models (#iteration)')
parser.add_argument('--eval-start', type=int, default=1, help='The epoch number to start evaluating models')
parser.add_argument('--print-log', type=str2bool, default=True, help='print logging or not')
parser.add_argument('--show-topk', type=int, default=[1, 5], nargs='+', help='which Top K accuracy will be shown')
parser.add_argument('--feeder', default='feeder.feeder', help='data loader will be used')
parser.add_argument('--num-worker', type=int, default=16, help='the number of worker for data loader')
parser.add_argument('--train-feeder-args', default=dict(), help='the arguments of data loader for training')
parser.add_argument('--test-feeder-args', default=dict(), help='the arguments of data loader for test')
parser.add_argument('--model', default=None, help='the model will be used')
parser.add_argument('--model-args', type=dict, default=dict(), help='the arguments of model')
parser.add_argument('--weights', default=None, help='the weights for network initialization')
parser.add_argument('--ignore-weights', type=str, default=[], nargs='+', help='the name of weights which will be ignored in the initialization')
parser.add_argument('--amp-opt-level', type=int, default=1, help='NVIDIA Apex AMP optimization level')
parser.add_argument('--base-lr', type=float, default=0.01, help='initial learning rate')
parser.add_argument('--step', type=int, default=[20, 40, 60], nargs='+', help='the epoch where optimizer reduce the learning rate')
parser.add_argument('--device', type=int, default=0, nargs='+', help='the indexes of GPUs for training or testing')
parser.add_argument('--optimizer', default='SGD', help='type of optimizer')
parser.add_argument('--nesterov', type=str2bool, default=False, help='use nesterov or not')
parser.add_argument('--batch-size', type=int, default=32, help='training batch size')
parser.add_argument('--test-batch-size', type=int, default=256, help='test batch size')
parser.add_argument('--forward-batch-size', type=int, default=16, help='Batch size during forward pass, must be factor of --batch-size')
parser.add_argument('--start-epoch', type=int, default=0, help='start training from which epoch')
parser.add_argument('--num-epoch', type=int, default=120, help='stop training in which epoch')
parser.add_argument('--weight-decay', type=float, default=0.0005, help='weight decay for optimizer')
parser.add_argument('--optimizer-states', type=str, help='path of previously saved optimizer states')
parser.add_argument('--checkpoint', type=str, help='path of previously saved training checkpoint')
parser.add_argument('--debug', type=str2bool, default=False, help='Debug mode; default false')
return parser |
def test_bogus_string():
assert_raises(ValueError, np.longdouble, 'spam')
assert_raises(ValueError, np.longdouble, '1.0 flub') |
class FetchAndRestoreError(PythonCodeExecutor):
def __init__(self):
self.sizeof_PyObjectPtr = gdb.lookup_type('PyObject').pointer().sizeof
self.pointer = self.malloc((self.sizeof_PyObjectPtr * 3))
type = self.pointer
value = (self.pointer + self.sizeof_PyObjectPtr)
traceback = (self.pointer + (self.sizeof_PyObjectPtr * 2))
self.errstate = (type, value, traceback)
def __enter__(self):
gdb.parse_and_eval(('PyErr_Fetch(%d, %d, %d)' % self.errstate))
def __exit__(self, *args):
if gdb.parse_and_eval('(int) PyErr_Occurred()'):
gdb.parse_and_eval('PyErr_Print()')
pyerr_restore = 'PyErr_Restore((PyObject *) *%d,(PyObject *) *%d,(PyObject *) *%d)'
try:
gdb.parse_and_eval((pyerr_restore % self.errstate))
finally:
self.free(self.pointer) |
def common_backend(backends: Collection[Backend]) -> Backend:
if (len(backends) == 1):
return next(iter(backends))
else:
for backend in backends:
if (not backend.nplike.known_data):
return backend
if (len(backends) > 1):
raise ValueError('cannot operate on arrays with incompatible backends. Use #ak.to_backend to coerce the arrays to the same backend')
else:
raise ValueError('no backends were given in order to determine a common backend.') |
class SERes2NetBlock(nn.Module):
def __init__(self, in_channels, out_channels, res2net_scale=8, se_channels=128, kernel_size=1, dilation=1, activation=torch.nn.ReLU, groups=1):
super().__init__()
self.out_channels = out_channels
self.tdnn1 = TDNNBlock(in_channels, out_channels, kernel_size=1, dilation=1, activation=activation, groups=groups)
self.res2net_block = Res2NetBlock(out_channels, out_channels, res2net_scale, kernel_size, dilation)
self.tdnn2 = TDNNBlock(out_channels, out_channels, kernel_size=1, dilation=1, activation=activation, groups=groups)
self.se_block = SEBlock(out_channels, se_channels, out_channels)
self.shortcut = None
if (in_channels != out_channels):
self.shortcut = Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=1)
def forward(self, x, lengths=None):
residual = x
if self.shortcut:
residual = self.shortcut(x)
x = self.tdnn1(x)
x = self.res2net_block(x)
x = self.tdnn2(x)
x = self.se_block(x, lengths)
return (x + residual) |
def get_pip_packages(run_lambda):
def run_with_pip(pip):
if (get_platform() == 'win32'):
grep_cmd = 'findstr /R "numpy torch"'
else:
grep_cmd = 'grep "torch\\|numpy"'
return run_and_read_all(run_lambda, ((pip + ' list --format=legacy | ') + grep_cmd))
if (not PY3):
return ('pip', run_with_pip('pip'))
out2 = run_with_pip('pip')
out3 = run_with_pip('pip3')
num_pips = len([x for x in [out2, out3] if (x is not None)])
if (num_pips is 0):
return ('pip', out2)
if (num_pips == 1):
if (out2 is not None):
return ('pip', out2)
return ('pip3', out3)
return ('pip3', out3) |
.unbox(EmptyType)
def EmptyType_unbox(typ, obj, c):
out = numba.core.cgutils.create_struct_proxy(typ)(c.context, c.builder)
is_error = numba.core.cgutils.is_not_null(c.builder, c.pyapi.err_occurred())
return numba.extending.NativeValue(out._getvalue(), is_error=is_error) |
def GenerateSM80_PlanarComplexTensorOp_16816(manifest, args):
if (not CudaToolkitVersionSatisfies(args.cuda_version, 11, 0)):
return
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor)]
complex_transforms = [(ComplexTransform.none, ComplexTransform.none), (ComplexTransform.conj, ComplexTransform.none), (ComplexTransform.none, ComplexTransform.conj), (ComplexTransform.conj, ComplexTransform.conj)]
math_instructions = [MathInstruction([16, 8, 16], DataType.f16, DataType.f16, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add), MathInstruction([16, 8, 16], DataType.bf16, DataType.bf16, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add), MathInstruction([16, 8, 16], DataType.f16, DataType.f16, DataType.f16, OpcodeClass.TensorOp, MathOperation.multiply_add)]
min_cc = 80
max_cc = 1024
alignment_constraints = [8]
for math_inst in math_instructions:
tile_descriptions = [TileDescription([64, 128, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc)]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, math_inst.element_accumulator]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints, complex_transforms)
if (math_inst.element_a != math_inst.element_accumulator):
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, math_inst.element_accumulator]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, data_type_mixed, alignment_constraints, complex_transforms) |
class CNNTransformerSE(TransformerInterface):
def __init__(self, d_model, output_size, output_activation=nn.ReLU, nhead=8, num_layers=8, d_ffn=512, dropout=0.1, activation=nn.LeakyReLU, causal=True, custom_emb_module=None, normalize_before=False):
super().__init__(d_model=d_model, nhead=nhead, num_encoder_layers=num_layers, num_decoder_layers=0, d_ffn=d_ffn, dropout=dropout, activation=activation, positional_encoding=None, normalize_before=normalize_before, causal=causal)
self.custom_emb_module = custom_emb_module
self.output_layer = Linear(output_size, input_size=d_model, bias=False)
self.output_activation = output_activation()
def forward(self, x, src_key_padding_mask=None):
if self.causal:
self.attn_mask = get_lookahead_mask(x)
else:
self.attn_mask = None
if (self.custom_emb_module is not None):
x = self.custom_emb_module(x)
(encoder_output, _) = self.encoder(src=x, src_mask=self.attn_mask, src_key_padding_mask=src_key_padding_mask)
output = self.output_layer(encoder_output)
output = self.output_activation(output)
return output |
def track_iter_progress(tasks, bar_width=50, **kwargs):
if isinstance(tasks, tuple):
assert (len(tasks) == 2)
assert isinstance(tasks[0], collections_abc.Iterable)
assert isinstance(tasks[1], int)
task_num = tasks[1]
tasks = tasks[0]
elif isinstance(tasks, collections_abc.Iterable):
task_num = len(tasks)
else:
raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple')
prog_bar = ProgressBar(task_num, bar_width)
for task in tasks:
(yield task)
prog_bar.update()
sys.stdout.write('\n') |
def get_emotion_dict(path):
table = pd.read_csv(path)
table = table.to_dict(orient='records')
table = {item['path'].split('/')[(- 2)]: {'valence': item['valence'], 'energy': item['energy'], 'tempo': item['tempo']} for item in table}
return table |
def main(args):
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('mode', choices=['create', 'remove'], help='The mode to use')
parser.add_argument('-l', '--log-level', type=str, default='info', dest='log_level', choices=['debug', 'info', 'warning', 'error'])
parser.add_argument('-H', '--hard-link', action='store_true', default=False, dest='hard_link', help='When using the create mode create hard links instead of copies')
pargs = parser.parse_args(args)
logLevel = getattr(logging, pargs.log_level.upper(), None)
logging.basicConfig(level=logLevel)
logging.warning('Use of this script is deprecated. The script will be removed in the future')
logging.warning('Action "{}" ignored'.format(pargs.mode))
if pargs.hard_link:
logging.warning('Hard link option ignored')
return 0 |
def get_adapter_spec1() -> AdapterSpec:
return AdapterSpec(method=ADAPT_GENERATION, instructions='Please solve the following problem.\n', max_train_instances=5, max_eval_instances=10, num_outputs=3, num_train_trials=3, model='simple/model1', model_deployment='simple/model1', temperature=1, stop_sequences=['.']) |
_model_architecture('transformer_lm', 'transformer_lm_gpt2_medium')
def transformer_lm_gpt2_medium(args):
args.decoder_embed_dim = safe_getattr(args, 'decoder_embed_dim', 1280)
args.decoder_ffn_embed_dim = safe_getattr(args, 'decoder_ffn_embed_dim', 5120)
args.decoder_layers = safe_getattr(args, 'decoder_layers', 36)
args.decoder_attention_heads = safe_getattr(args, 'decoder_attention_heads', 20)
args.dropout = safe_getattr(args, 'dropout', 0.1)
args.attention_dropout = safe_getattr(args, 'attention_dropout', 0.1)
args.activation_fn = safe_getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args) |
class _Callables(_Constraint):
def is_satisfied_by(self, val):
return callable(val)
def __str__(self):
return 'a callable' |
class ROILoopPool(nn.Module):
def __init__(self, output_size, spatial_scale):
super(ROILoopPool, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
def forward(self, input, rois):
assert ((rois.dim() == 2) and (rois.size(1) == 5))
return roi_loop_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('output_size=' + str(self.output_size))
tmpstr += (', spatial_scale=' + str(self.spatial_scale))
tmpstr += ')'
return tmpstr |
def collect_point_data(scene_name):
label_map = scannet_utils.read_label_mapping(opt.label_map_file, label_from='raw_category', label_to='nyu40id')
data_folder = os.path.join(opt.scannet_path, scene_name)
out_filename = os.path.join(data_folder, (scene_name + '_new_semantic.npy'))
seg_filename = os.path.join(data_folder, ('%s_vh_clean_2.0.010000.segs.json' % scene_name))
(seg_to_verts, num_verts) = scannet_utils.read_segmentation(seg_filename)
ply_filename = os.path.join(data_folder, ('%s_vh_clean_2.ply' % scene_name))
label_filename = os.path.join(data_folder, ('%s_vh_clean_2.labels.ply' % scene_name))
points = pc_utils.read_ply_rgba_normal(ply_filename)
plydata = PlyData().read(label_filename)
labels = np.expand_dims(remapper[np.array(plydata.elements[0]['label'])], 1)
instance_ids = np.zeros(shape=num_verts, dtype=np.uint32)
for (object_id, segs) in object_id_to_segs.items():
for seg in segs:
verts = seg_to_verts[seg]
instance_ids[verts] = object_id
for i in range(max(instance_ids)):
index = (instance_ids == i)
min_label = min(labels[index])
max_label = max(labels[index])
if (min_label != max_label):
print('error')
points = np.delete(points, 6, 1)
data = np.concatenate((points, instance_ids, labels), 1)
print(out_filename)
if os.path.exists(out_filename):
return
np.save(out_filename, data) |
def check_structure_test(pretrain_file, args1, args2):
set_random_seed(1000)
other = build_model(pretrain_file, *args1)
other.eval()
set_random_seed(1001)
model = build_model(pretrain_file, *args2)
model.eval()
assert (not torch.allclose(model.delta_embedding.weight, other.delta_embedding.weight))
assert (not torch.allclose(model.output_layers[0].weight, other.output_layers[0].weight))
model.copy_with_new_structure(other)
assert torch.allclose(model.delta_embedding.weight, other.delta_embedding.weight)
assert torch.allclose(model.output_layers[0].weight, other.output_layers[0].weight)
assert torch.allclose(torch.linalg.norm(model.word_lstm.weight_ih_l0), torch.linalg.norm(other.word_lstm.weight_ih_l0))
shift = [parse_transitions.Shift()]
model_states = test_parse_transitions.build_initial_state(model, 1)
model_states = parse_transitions.bulk_apply(model, model_states, shift)
other_states = test_parse_transitions.build_initial_state(other, 1)
other_states = parse_transitions.bulk_apply(other, other_states, shift)
for (i, j) in zip(other_states[0].word_queue, model_states[0].word_queue):
assert torch.allclose(i.hx, j.hx, atol=1e-07)
for (i, j) in zip(other_states[0].transitions, model_states[0].transitions):
assert torch.allclose(i.lstm_hx, j.lstm_hx)
assert torch.allclose(i.lstm_cx, j.lstm_cx)
for (i, j) in zip(other_states[0].constituents, model_states[0].constituents):
assert ((i.value is None) == (j.value is None))
if (i.value is not None):
assert torch.allclose(i.value.tree_hx, j.value.tree_hx, atol=1e-07)
assert torch.allclose(i.lstm_hx, j.lstm_hx)
assert torch.allclose(i.lstm_cx, j.lstm_cx) |
class KLEJDYKTask(KLEJTask):
def __init__(self):
self._spec = TaskSpecification('DYK', 'classification', 2, 2, 'KLEJ')
self._spec.no_dev_set = True
self._spec.evaluation_metric = self._spec.binary_f1
def normalizer(self) -> TextNormalizer:
return TextNormalizer(detokenize=False)
def create_example(self, row: Dict, normalizer: TextNormalizer, has_target: bool) -> DataExample:
text1 = row['question'].strip()
text2 = row['answer'].strip()
return DataExample([text1, text2], (row['target'].strip() if has_target else None)) |
def create_model(variant, pretrained=False, rng=None, input_shape=None, dtype=jnp.float32, **kwargs):
model_cfg = get_model_cfg(variant)
model_args = model_cfg['arch_fn'](variant, **model_cfg['arch_cfg'])
model_args.update(kwargs)
se_args = model_args.pop('se_cfg', {})
if ('se_layer' not in model_args):
if ('bound_act_fn' in se_args):
se_args['bound_act_fn'] = get_act_fn(se_args['bound_act_fn'])
if ('gate_fn' in se_args):
se_args['gate_fn'] = get_act_fn(se_args['gate_fn'])
model_args['se_layer'] = partial(SqueezeExcite, **se_args)
bn_args = model_args.pop('bn_cfg')
if ('norm_layer' not in model_args):
model_args['norm_layer'] = partial(batchnorm2d, **bn_args)
model_args['act_fn'] = get_act_fn(model_args.pop('act_fn', 'relu'))
model = EfficientNet(dtype=dtype, default_cfg=model_cfg['default_cfg'], **model_args)
rng = (jax.random.PRNGKey(0) if (rng is None) else rng)
(params_rng, dropout_rng) = jax.random.split(rng)
input_shape = (model_cfg['default_cfg']['input_size'] if (input_shape is None) else input_shape)
input_shape = (1, input_shape[1], input_shape[2], input_shape[0])
variables = model.init({'params': params_rng, 'dropout': dropout_rng}, jnp.ones(input_shape, dtype=dtype), training=False)
if pretrained:
variables = load_pretrained(variables, default_cfg=model.default_cfg, filter_fn=_filter)
return (model, variables) |
class TLPool(CornerPoolPack):
def __init__(self, dim, conv_cfg=None, norm_cfg=None, first_kernel_size=3, kernel_size=3, corner_dim=128):
super(TLPool, self).__init__(dim, CornerPool('top'), CornerPool('left'), conv_cfg, norm_cfg, first_kernel_size, kernel_size, corner_dim) |
class SchemeMorphism_polynomial_projective_space_field(SchemeMorphism_polynomial_projective_space):
def rational_preimages(self, Q, k=1):
k = ZZ(k)
if (k <= 0):
raise ValueError(('k (=%s) must be a positive integer' % k))
from sage.schemes.projective.projective_subscheme import AlgebraicScheme_subscheme_projective
if isinstance(Q, AlgebraicScheme_subscheme_projective):
return Q.preimage(self, k)
BR = self.base_ring()
if ((k > 1) and (not self.is_endomorphism())):
raise TypeError('must be an endomorphism of projective space')
if (Q not in self.codomain()):
raise TypeError('point must be in codomain of self')
if isinstance(BR.base_ring(), (sage.rings.abc.ComplexField, sage.rings.abc.RealField, sage.rings.abc.RealIntervalField, sage.rings.abc.ComplexIntervalField)):
raise NotImplementedError('not implemented over precision fields')
PS = self.domain().ambient_space()
N = PS.dimension_relative()
L = [Q]
for n in range(k):
L2 = []
for P in L:
I = list(self.domain().defining_polynomials())
for i in range((N + 1)):
for j in range((i + 1), (N + 1)):
I.append(((P[i] * self[j]) - (P[j] * self[i])))
X = PS.subscheme(I)
if (X.dimension() > 0):
return X
preimages = []
for T in X.rational_points():
if (not all(((g(tuple(T)) == 0) for g in self))):
preimages.append(PS(T))
L2 = (L2 + preimages)
L = L2
return L
def _number_field_from_algebraics(self):
from sage.rings.qqbar import number_field_elements_from_algebraics
from sage.schemes.projective.projective_space import is_ProjectiveSpace
if (not (is_ProjectiveSpace(self.domain()) and is_ProjectiveSpace(self.domain()))):
raise NotImplementedError('not implemented for subschemes')
(K_pre, C, phi) = number_field_elements_from_algebraics([c for f in self for c in f.coefficients()], minimal=True)
if (K_pre is QQ):
if (K_pre is self.base_ring()):
return self
elif ((not isinstance(self.base_ring(), sage.rings.abc.AlgebraicField)) and K_pre.is_isomorphic(self.base_ring())):
return self
if (K_pre is QQ):
K = QQ
else:
from sage.rings.number_field.number_field import NumberField
K = NumberField(K_pre.polynomial(), embedding=phi(K_pre.gen()), name='a')
psi = K_pre.hom([K.gen()], K)
C = [psi(c) for c in C]
from sage.schemes.projective.projective_space import ProjectiveSpace
N = self.domain().dimension_relative()
PS = ProjectiveSpace(K, N, self.domain().variable_names())
if self.is_endomorphism():
H = End(PS)
else:
PS2 = ProjectiveSpace(K, self.codomain().dimension_relative(), self.codomain().variable_names())
H = Hom(PS, PS2)
R = PS.coordinate_ring()
exps = [f.exponents() for f in self]
F = []
j = 0
for t in exps:
G = 0
for e in t:
G += (C[j] * prod([(R.gen(i) ** e[i]) for i in range((N + 1))]))
j += 1
F.append(G)
return H(F)
def base_indeterminacy_locus(self):
dom = self.domain()
AS = dom.ambient_space()
return AS.subscheme((list(dom.defining_polynomials()) + list(self.defining_polynomials())))
def indeterminacy_locus(self):
from sage.misc.superseded import deprecation
deprecation(29145, 'The meaning of indeterminacy_locus() has changed. Read the docstring.')
P = self.domain()
X = P.subscheme(0)
return (self * X.hom(P.gens(), P)).indeterminacy_locus()
def indeterminacy_points(self, F=None, base=False):
if (F is None):
fcn = self
else:
if (not F.is_field()):
raise NotImplementedError('indeterminacy points only implemented for fields')
fcn = self.change_ring(F)
if base:
indScheme = fcn.base_indeterminacy_locus()
else:
indScheme = fcn.indeterminacy_locus()
if (indScheme.dimension() > 0):
raise ValueError('indeterminacy scheme is not dimension 0')
indPoints = indScheme.rational_points()
return indPoints
def reduce_base_field(self):
K = self.base_ring()
if ((K in NumberFields()) or isinstance(K, sage.rings.abc.AlgebraicField)):
return self._number_field_from_algebraics()
if (K in FiniteFields()):
c = [v for g in self for v in g.coefficients()]
d = lcm([a.minpoly().degree() for a in c])
if (d == 1):
from sage.rings.finite_rings.finite_field_constructor import GF
return self.change_ring(GF(K.characteristic()))
if (d == K.degree()):
return self
for (L, phi) in K.subfields():
if (L.degree() == d):
break
R = PolynomialRing(K.prime_subfield(), 2, 'a')
(a, b) = R.gens()
from sage.schemes.projective.projective_space import ProjectiveSpace
new_domain = ProjectiveSpace(L, self.domain().dimension_relative(), self.domain().variable_names())
new_R = new_domain.coordinate_ring()
u = phi(L.gen())
g = R(str(u).replace(K.variable_name(), R.variable_names()[0]))
new_f = []
for fi in self:
mon = fi.monomials()
mon_deg = [m.degrees() for m in mon]
coef = fi.coefficients()
new_c = []
for c in coef:
w = R(str(c).replace(K.variable_name(), R.variable_names()[0]))
I = R.ideal([(b - g), w])
v = I.elimination_ideal([a]).gen(0)
if (v.subs({b: g}).lc() == w.lc()):
new_c.append(L(str(v).replace(R.variable_names()[1], L.variable_name())))
else:
new_c.append(L(str((w.lc() * v)).replace(R.variable_names()[1], L.variable_name())))
new_f.append(sum(((new_c[i] * prod(((new_R.gen(j) ** mon_deg[i][j]) for j in range(new_R.ngens())))) for i in range(len(mon)))))
if self.is_endomorphism():
H = Hom(new_domain, new_domain)
else:
new_codomain = ProjectiveSpace(L, self.codomain().dimension_relative(), self.codomain().variable_names())
H = Hom(new_domain, new_codomain)
return H(new_f)
elif isinstance(K, AlgebraicClosureFiniteField_generic):
self.domain().coordinate_ring()
c = [v for g in self for v in g.coefficients()]
d = lcm([a.minpoly().degree() for a in c])
(L, L_to_K) = K.subfield(d)
from sage.schemes.projective.projective_space import ProjectiveSpace
new_domain = ProjectiveSpace(L, self.domain().dimension_relative(), self.domain().variable_names())
new_R = new_domain.coordinate_ring()
new_f = []
for fi in self:
mon = fi.monomials()
mon_deg = [m.degrees() for m in mon]
coef = fi.coefficients()
new_c = []
for c in coef:
da = c.minpoly().degree()
for (M, M_to_L) in L.subfields():
if (M.degree() == da):
break
c = M(str(c).replace(c.as_finite_field_element()[0].variable_name(), M.variable_name()))
new_c.append(M_to_L(c))
new_f.append(sum([(new_c[i] * prod(((new_R.gen(j) ** mon_deg[i][j]) for j in range(new_R.ngens())))) for i in range(len(mon))]))
if self.is_endomorphism():
H = Hom(new_domain, new_domain)
else:
new_codomain = ProjectiveSpace(L, self.codomain().dimension_relative(), self.codomain().variable_names())
H = Hom(new_domain, new_codomain)
return H(new_f)
raise NotImplementedError('only implemented for number fields and finite fields')
def image(self):
X = self.domain().subscheme(0)
e = X.embedding_morphism()
return (self * e).image() |
def test_unknown_1():
text = 'unknown'
parsedtype = ak.types.from_datashape(text, highlevel=False)
assert isinstance(parsedtype, ak.types.UnknownType)
assert (str(parsedtype) == text) |
def setup_logging(default_path=CFG_FILE, default_level=logging.INFO):
path = default_path
if osp.exists(osp.abspath(path)):
with open(path, 'r') as f:
config = yaml.safe_load(f)
logging.config.dictConfig(config)
return __get_collect_logger(config)
else:
logging.basicConfig(level=default_level)
logging.warning("Config file '{}' cannot be found.".format(path))
return (None, None) |
def bracket_filter(sentence, mode='phonetic'):
new_sentence = str()
if (mode == 'phonetic'):
flag = False
for ch in sentence:
if ((ch == '(') and (flag is False)):
flag = True
continue
if ((ch == '(') and (flag is True)):
flag = False
continue
if ((ch != ')') and (flag is False)):
new_sentence += ch
elif (mode == 'spelling'):
flag = True
for ch in sentence:
if (ch == '('):
continue
if (ch == ')'):
if (flag is True):
flag = False
continue
else:
flag = True
continue
if ((ch != ')') and (flag is True)):
new_sentence += ch
else:
raise ValueError('Unsupported mode : {0}'.format(mode))
return new_sentence |
class MLP(LasagnePowered, Serializable):
def __init__(self, output_dim, hidden_sizes, hidden_nonlinearity, output_nonlinearity, hidden_W_init=LI.GlorotUniform(), hidden_b_init=LI.Constant(0.0), output_W_init=LI.GlorotUniform(), output_b_init=LI.Constant(0.0), name=None, input_var=None, input_layer=None, input_shape=None, batch_norm=False):
Serializable.quick_init(self, locals())
if (name is None):
prefix = ''
else:
prefix = (name + '_')
if (input_layer is None):
l_in = L.InputLayer(shape=((None,) + input_shape), input_var=input_var)
else:
l_in = input_layer
self._layers = [l_in]
l_hid = l_in
for (idx, hidden_size) in enumerate(hidden_sizes):
l_hid = L.DenseLayer(l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name=('%shidden_%d' % (prefix, idx)), W=hidden_W_init, b=hidden_b_init)
if batch_norm:
l_hid = L.batch_norm(l_hid)
self._layers.append(l_hid)
l_out = L.DenseLayer(l_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name=('%soutput' % (prefix,)), W=output_W_init, b=output_b_init)
self._layers.append(l_out)
self._l_in = l_in
self._l_out = l_out
self._output = L.get_output(l_out)
LasagnePowered.__init__(self, [l_out])
def input_layer(self):
return self._l_in
def output_layer(self):
return self._l_out
def layers(self):
return self._layers
def output(self):
return self._output |
def compute_dists(recon_points, gt_points, eval_type='Default'):
recon_kd_tree = KDTree(recon_points)
gt_kd_tree = KDTree(gt_points)
(re2gt_distances, re2gt_vertex_ids) = recon_kd_tree.query(gt_points, workers=4)
(gt2re_distances, gt2re_vertex_ids) = gt_kd_tree.query(recon_points, workers=4)
if (eval_type == 'DeepSDF'):
cd_re2gt = np.mean((re2gt_distances ** 2))
cd_gt2re = np.mean((gt2re_distances ** 2))
hd_re2gt = np.max(re2gt_distances)
hd_gt2re = np.max(gt2re_distances)
chamfer_dist = (cd_re2gt + cd_gt2re)
hausdorff_distance = np.max((hd_re2gt, hd_gt2re))
else:
cd_re2gt = np.mean(re2gt_distances)
cd_gt2re = np.mean(gt2re_distances)
hd_re2gt = np.max(re2gt_distances)
hd_gt2re = np.max(gt2re_distances)
chamfer_dist = (0.5 * (cd_re2gt + cd_gt2re))
hausdorff_distance = np.max((hd_re2gt, hd_gt2re))
return (chamfer_dist, hausdorff_distance, cd_re2gt, cd_gt2re, hd_re2gt, hd_gt2re) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.