code stringlengths 101 5.91M |
|---|
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = six.StringIO()
_write_requirements(data, dist.install_requires)
extras_require = (dist.extras_require or {})
for extra in sorted(extras_require):
data.write('\n[{extra}]\n'.format(**vars()))
_write_requirements(data, extras_require[extra])
cmd.write_or_delete_file('requirements', filename, data.getvalue()) |
def lift(x):
try:
return x.lift()
except AttributeError:
return PowerSeriesRing(Rationals(), 't')(x.list(), x.prec()) |
def build_configs_and_run(config_files: Sequence[str], executable: Optional[str]=None, kwargs: Dict[(str, Any)]={}) -> Tuple[(List[Dict[(str, Any)]], Callable)]:
configs = []
executable = None
for config_file in config_files:
(seml_config, _, experiment_config) = read_config(config_file)
if (executable is None):
executable = seml_config['executable']
elif (executable != seml_config['executable']):
raise ValueError(f"All configs must be for the same executable! Found {executable} and {seml_config['executable']}.")
configs.extend(generate_configs(experiment_config))
for (key, value) in kwargs.items():
for config in configs:
config[key] = value
deduplicate_index = {json.dumps(config, sort_keys=True): i for (i, config) in enumerate(configs)}
configs = [configs[i] for i in deduplicate_index.values()]
module_path = Path(executable)
anchor_path = module_path.parents._parts[:(- 1)]
module_name = os.path.splitext(module_path.name)[0]
module = importlib.import_module(f'.{module_name}', '.'.join(anchor_path))
run = None
for attr in dir(module):
if isinstance(getattr(module, attr), Experiment):
run = getattr(module, attr).run
if (run is None):
raise ValueError(f'Executable {executable} has not attribute of type `sacred.Experiment`!')
return (configs, run) |
def spyx_tmp():
global _spyx_tmp
if _spyx_tmp:
return _spyx_tmp
d = tempfile.TemporaryDirectory()
_spyx_tmp = os.path.join(d.name, 'spyx')
atexit.register((lambda : d.cleanup()))
return _spyx_tmp |
def check_existed(sample, java_func_dir):
couple = sample['url'].split('/')[(- 1)].split('#')
class_name = couple[0].split('.java')[0]
start = couple[1].split('-')[0].replace('L', '')
end = couple[1].split('-')[1].replace('L', '')
if ('repo' in sample.keys()):
project = sample['repo'].replace('/', '-')
else:
project = sample['nwo'].replace('/', '-')
file_name = os.path.join(java_func_dir, ((((((project + '_') + class_name) + '_') + str(start)) + '_') + str(end)))
if os.path.exists(file_name):
return True
else:
return False |
def build_lr_scheduler(optimizer, optimizer_config, total_step):
optimizer_type = optimizer_config.type
config = optimizer_config
if (optimizer_type == 'rms_prop_optimizer'):
lr_scheduler = _create_learning_rate_scheduler(config, optimizer, total_step=total_step)
elif (optimizer_type == 'momentum_optimizer'):
lr_scheduler = _create_learning_rate_scheduler(config, optimizer, total_step=total_step)
elif (optimizer_type == 'adam'):
lr_scheduler = _create_learning_rate_scheduler(config, optimizer, total_step=total_step)
return lr_scheduler |
def print_epoch_result(train_result, valid_result, epoch, max_epochs):
epoch_len = len(str(max_epochs))
(seg_loss, seg_dice) = (train_result['seg_loss'], train_result['seg_dice'])
(val_dice, val_loss, val_lge_dice, val_lge_loss, test_lge_dice, test_lge_loss, valid_vert_loss) = (valid_result['val_dice'], valid_result['val_loss'], valid_result['val_lge_dice'], valid_result['val_lge_loss'], valid_result['test_lge_dice'], valid_result['test_lge_loss'], valid_result['val_vert_loss'])
print_msg_line1 = ((f'valid_loss: {val_loss:.5f} ' + f'valid_lge_loss: {val_lge_loss:.5f} ') + f'test_lge_loss: {test_lge_loss:.5f} ')
if (args.d4 or args.d4aux):
(ver_s_loss, ver_t_loss) = (train_result['ver_s_loss'], train_result['ver_t_loss'])
print_msg_line1 += f'vertex_loss: {ver_s_loss:.5f}, vertex_t_loss: {ver_t_loss:.5f} '
print_msg_line2 = (((f'valid_dice: {val_dice:.5f} ' + f'valid_lge_dice: {val_lge_dice:.5f} ') + f'test_lge_dice: {test_lge_dice:.5f} ') + f'valid_vert_loss: {valid_vert_loss:.5f} ')
print_msg_line1 = (f'train_loss: {seg_loss:.5f} ' + print_msg_line1)
print_msg_line2 = (f'train_dice: {seg_dice:.5f} ' + print_msg_line2)
if args.d1:
(dis1_acc1, dis1_acc2) = (train_result['dis1_acc1'], train_result['dis1_acc2'])
print_msg_line2 += (f'd1_acc1: {dis1_acc1: 5f} ' + f'd1_acc2: {dis1_acc2: 5f} ')
if args.d2:
(dis2_acc1, dis2_acc2) = (train_result['dis2_acc1'], train_result['dis2_acc2'])
print_msg_line2 += (f'd2_acc1: {dis2_acc1: 5f} ' + f'd2_acc2: {dis2_acc2: 5f} ')
if args.d4:
(dis4_acc1, dis4_acc2) = (train_result['dis4_acc1'], train_result['dis4_acc2'])
print_msg_line2 += (f'd4_acc1: {dis4_acc1: 5f} ' + f'd4_acc2: {dis4_acc2: 5f} ')
print_msg_line1 = (f'[{(epoch + 1):>{epoch_len}}/{max_epochs:>{epoch_len}}] ' + print_msg_line1)
print_msg_line2 = ((' ' * ((2 * epoch_len) + 4)) + print_msg_line2)
print(print_msg_line1)
print(print_msg_line2) |
class SelfAttention(SelfAttentionBase):
def __call__(self, source: Tensor, *, axis: Dim) -> Tensor:
(q, k, v) = self.forward_qkv(source)
kv_axis = Dim(None, name=f'{axis.name}-kv')
(k, _) = rf.replace_dim(k, in_dim=axis, out_dim=kv_axis)
(v, _) = rf.replace_dim(v, in_dim=axis, out_dim=kv_axis)
return self.attention(q, k, v, kv_axis=kv_axis) |
class Swish(nn.Module):
def __init__(self, inplace):
super(Swish, self).__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return (x * self.sigmoid(x)) |
_zero_only
def print_config(config: DictConfig, fields: Sequence[str]=('trainer', 'model', 'datamodule', 'train', 'eval', 'callbacks', 'logger', 'seed', 'name'), resolve: bool=True) -> None:
style = 'dim'
tree = rich.tree.Tree('CONFIG', style=style, guide_style=style)
for field in fields:
branch = tree.add(field, style=style, guide_style=style)
config_section = config.get(field)
branch_content = str(config_section)
if isinstance(config_section, DictConfig):
branch_content = OmegaConf.to_yaml(config_section, resolve=resolve)
branch.add(rich.syntax.Syntax(branch_content, 'yaml'))
rich.print(tree)
with open('config_tree.txt', 'w') as fp:
rich.print(tree, file=fp) |
def _config_likelihood(forward_dict):
input_dict = {}
input_dict['conditions'] = forward_dict['prior_draws'].astype(np.float32)
input_dict['observables'] = forward_dict['sim_data'].astype(np.float32)
return input_dict |
class DOMValueEmbeddings(SimpleEmbeddings):
def __init__(self, embed_dim):
values = DOMValueVocab()
embed_matrix = np.random.uniform((- np.sqrt((3.0 / embed_dim))), np.sqrt((3.0 / embed_dim)), size=(len(values), embed_dim)).astype(np.float32)
super(DOMValueEmbeddings, self).__init__(embed_matrix, values) |
def user_config_dir(appname, roaming=True):
if WINDOWS:
path = user_data_dir(appname, roaming=roaming)
elif (sys.platform == 'darwin'):
path = user_data_dir(appname)
else:
path = os.getenv('XDG_CONFIG_HOME', expanduser('~/.config'))
path = os.path.join(path, appname)
return path |
.parametrize('return_fitted_val', [False, True], ids=['no_fitval', 'do_fitval'])
.parametrize('do_grad', [False, True], ids=['no_grad', 'do_grad'])
def test_jax_jit_enable_stitching(caplog, do_grad, return_fitted_val):
pyhf.set_backend('jax', 'scipy', precision='64b')
pdf = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])
data = pyhf.tensorlib.astensor(([125.0] + pdf.config.auxdata))
with caplog.at_level(logging.DEBUG, 'pyhf.optimize.opt_jax'):
pyhf.infer.mle.fixed_poi_fit(1.0, data, pdf, do_grad=do_grad, do_stitch=False, return_fitted_val=return_fitted_val)
assert ('jitting function' in caplog.text)
caplog.clear()
with caplog.at_level(logging.DEBUG, 'pyhf.optimize.opt_jax'):
pyhf.infer.mle.fixed_poi_fit(1.0, data, pdf, do_grad=do_grad, do_stitch=True, return_fitted_val=return_fitted_val)
assert ('jitting function' in caplog.text)
caplog.clear() |
def test_cli_example():
with patch_sys_argv_helper(['ti', 'example', 'minimal']) as custom_argv:
cli = TaichiMain(test_mode=True)
args = cli()
assert (args.name == 'minimal')
with patch_sys_argv_helper(['ti', 'example', 'minimal.py']) as custom_argv:
cli = TaichiMain(test_mode=True)
args = cli()
assert (args.name == 'minimal')
with patch_sys_argv_helper(['ti', 'example', '-s', 'minimal.py']) as custom_argv:
cli = TaichiMain(test_mode=True)
args = cli()
assert ((args.name == 'minimal') and (args.save == True))
with patch_sys_argv_helper(['ti', 'example', '-p', 'minimal.py']) as custom_argv:
cli = TaichiMain(test_mode=True)
args = cli()
assert ((args.name == 'minimal') and (args.print == True))
with patch_sys_argv_helper(['ti', 'example', '-P', 'minimal.py']) as custom_argv:
cli = TaichiMain(test_mode=True)
args = cli()
assert ((args.name == 'minimal') and (args.pretty_print == True)) |
def prepare_onnx_paddings(dim, pad):
assert isinstance(dim, int)
assert (len(pad) <= (dim * 2))
paddings = (list(pad[:]) + ([0] * ((dim * 2) - len(pad))))
paddings = (paddings[(- 2)::(- 2)] + paddings[(- 1)::(- 2)])
assert (len(paddings) == (dim * 2))
return paddings |
def get_doc(infile: TextIO):
res = []
for line in infile:
if (not line.strip()):
(yield res)
res = []
res.append(line)
(yield res) |
def gen_classifier_loader(name, d):
def classifier_loader():
TFHider()
gpus_list = TFHider.tf.config.experimental.list_physical_devices('GPU')
TFHider.tf.config.experimental.set_visible_devices(gpus_list[torch.cuda.current_device()], 'GPU')
loaded = TFHider.tf.saved_model.load(('/data/~/vtab/' + name), tags=[])
infer = loaded.signatures['default']
return (lambda images: infer(images)[d['output_node']])
return classifier_loader |
class CheckDummiesTester(unittest.TestCase):
def test_find_backend(self):
no_backend = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")')
self.assertIsNone(no_backend)
simple_backend = find_backend(' if not is_tokenizers_available():')
self.assertEqual(simple_backend, 'tokenizers')
backend_with_underscore = find_backend(' if not is_tensorflow_text_available():')
self.assertEqual(backend_with_underscore, 'tensorflow_text')
double_backend = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):')
self.assertEqual(double_backend, 'sentencepiece_and_tokenizers')
double_backend_with_underscore = find_backend(' if not (is_sentencepiece_available() and is_tensorflow_text_available()):')
self.assertEqual(double_backend_with_underscore, 'sentencepiece_and_tensorflow_text')
triple_backend = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):')
self.assertEqual(triple_backend, 'sentencepiece_and_tokenizers_and_vision')
def test_read_init(self):
objects = read_init()
self.assertIn('torch', objects)
self.assertIn('tensorflow_text', objects)
self.assertIn('sentencepiece_and_tokenizers', objects)
self.assertIn('BertModel', objects['torch'])
self.assertIn('TFBertModel', objects['tf'])
self.assertIn('FlaxBertModel', objects['flax'])
self.assertIn('BertModel', objects['torch'])
self.assertIn('TFBertTokenizer', objects['tensorflow_text'])
self.assertIn('convert_slow_tokenizer', objects['sentencepiece_and_tokenizers'])
def test_create_dummy_object(self):
dummy_constant = create_dummy_object('CONSTANT', "'torch'")
self.assertEqual(dummy_constant, '\nCONSTANT = None\n')
dummy_function = create_dummy_object('function', "'torch'")
self.assertEqual(dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n")
expected_dummy_class = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
dummy_class = create_dummy_object('FakeClass', "'torch'")
self.assertEqual(dummy_class, expected_dummy_class)
def test_create_dummy_files(self):
expected_dummy_pytorch_file = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
dummy_files = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'], expected_dummy_pytorch_file) |
def test_toarrow_BitMaskedArray():
content = ak.highlevel.Array(['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']).layout
bitmask = ak.index.IndexU8(np.array([40, 34], dtype=np.uint8))
array = ak.contents.BitMaskedArray(bitmask, content, False, 9, False)
assert (array.to_arrow().to_pylist() == to_list(array)) |
class _SuiteFilter(object):
def __init__(self, name):
self._name = name
def matches(self, bench):
if (self._name == '*'):
return True
return (bench.suite.name == self._name) |
class TestWeightedMedoid():
def test_simple_example_weighted(self):
A = torch.tensor([[0.5, 0.3, 0, 0.4], [0.3, 0.2, 0, 0], [0, 0, 0.9, 0.3], [0.4, 0, 0.4, 0.4]], dtype=torch.float32)
x = torch.tensor([[(- 10), 10, 10], [(- 1), 1, 1], [0, 0, 0], [10, (- 10), (- 10)]], dtype=torch.float32)
medoids = weighted_medoid(A, x)
row_sum = A.sum((- 1))
layer_idx = 0
assert torch.all((medoids[layer_idx] == (row_sum[layer_idx] * x[1])))
layer_idx = 1
assert torch.all((medoids[layer_idx] == (row_sum[layer_idx] * x[0])))
layer_idx = 2
assert torch.all((medoids[layer_idx] == (row_sum[layer_idx] * x[2])))
layer_idx = 3
assert torch.all((medoids[layer_idx] == (row_sum[layer_idx] * x[2])))
def test_simple_example_unweighted(self):
A = torch.tensor([[1, 1, 0, 1], [1, 1, 0, 0], [0, 0, 1, 1], [1, 0, 1, 1]], dtype=torch.float32)
x = torch.tensor([[(- 10), 10, 10], [(- 1), 1, 1], [0, 0, 0], [10, (- 10), (- 10)]], dtype=torch.float32)
medoids = weighted_medoid(A, x)
row_sum = A.sum((- 1))
layer_idx = 0
assert torch.all((medoids[layer_idx] == (row_sum[layer_idx] * x[1])))
layer_idx = 1
assert (torch.all((medoids[layer_idx] == (row_sum[layer_idx] * x[0]))) or torch.all((medoids[layer_idx] == (row_sum[layer_idx] * x[1]))))
layer_idx = 2
assert (torch.all((medoids[layer_idx] == (row_sum[layer_idx] * x[2]))) or torch.all((medoids[layer_idx] == (row_sum[layer_idx] * x[3]))))
layer_idx = 3
assert torch.all((medoids[layer_idx] == (row_sum[layer_idx] * x[2]))) |
def _impl(array, file, line_delimited, num_indent_spaces, num_readability_spaces, nan_string, posinf_string, neginf_string, complex_record_fields, convert_bytes, convert_other):
if ((array is None) or isinstance(array, (bool, str, bytes, Number))):
out = ak.operations.from_iter([array], highlevel=False)
elif isinstance(array, ak.highlevel.Array):
out = array.layout
elif isinstance(array, ak.highlevel.Record):
out = array.layout.array[array.layout.at:(array.layout.at + 1)]
elif isinstance(array, ak.highlevel.ArrayBuilder):
out = array.snapshot().layout
elif isinstance(array, ak.record.Record):
out = array.array[array.at:(array.at + 1)]
elif isinstance(array, _ext.ArrayBuilder):
(formstr, length, buffers) = array.to_buffers()
form = ak.forms.from_json(formstr)
out = ak.operations.from_buffers(form, length, buffers, byteorder=ak._util.native_byteorder, highlevel=False)
elif isinstance(array, ak.contents.Content):
out = array
elif (hasattr(array, 'shape') and hasattr(array, 'dtype')):
out = ak.contents.NumpyArray(array)
else:
raise TypeError(f'unrecognized array type: {array!r}')
jsondata = out.to_json(nan_string=nan_string, posinf_string=posinf_string, neginf_string=neginf_string, complex_record_fields=complex_record_fields, convert_bytes=convert_bytes, behavior=behavior_of(array))
if (line_delimited and (not isinstance(line_delimited, str))):
line_delimited = '\n'
separators = ((',' + (' ' * num_readability_spaces)), (':' + (' ' * num_readability_spaces)))
if (file is not None):
if isinstance(file, (str, bytes, PathLike)):
parsed_url = urlparse(fsdecode(file))
if ((parsed_url.scheme == '') or (parsed_url.netloc == '')):
def opener():
return open(file, 'w', encoding='utf8')
else:
import fsspec
def opener():
return fsspec.open(file, 'w', encoding='utf8').open()
else:
def opener():
return _NoContextManager(file)
try:
if line_delimited:
if (file is None):
out = []
for datum in jsondata:
out.append(json.dumps(datum, skipkeys=True, ensure_ascii=True, check_circular=False, allow_nan=False, indent=None, separators=separators, default=convert_other, sort_keys=False))
out.append(line_delimited)
return ''.join(out)
else:
with opener() as openfile:
for datum in jsondata:
json.dump(datum, openfile, skipkeys=True, ensure_ascii=True, check_circular=False, allow_nan=False, indent=None, separators=separators, default=convert_other, sort_keys=False)
openfile.write(line_delimited)
else:
if isinstance(array, (ak.highlevel.Record, ak.record.Record)):
jsondata = jsondata[0]
if (file is None):
return json.dumps(jsondata, skipkeys=True, ensure_ascii=True, check_circular=False, allow_nan=False, indent=num_indent_spaces, separators=separators, default=convert_other, sort_keys=False)
else:
with opener() as openfile:
return json.dump(jsondata, openfile, skipkeys=True, ensure_ascii=True, check_circular=False, allow_nan=False, indent=num_indent_spaces, separators=separators, default=convert_other, sort_keys=False)
except Exception as err:
raise err from err |
class SimpleFeaturePyramid(nn.Module):
def __init__(self, in_channels, out_channels, scale_factors, norm='LN'):
super(SimpleFeaturePyramid, self).__init__()
self.scale_factors = scale_factors
dim = in_channels
self.stages = []
use_bias = (norm == '')
for (idx, scale) in enumerate(scale_factors):
out_dim = dim
if (scale == 4.0):
layers = [nn.ConvTranspose2d(dim, (dim // 2), kernel_size=2, stride=2), get_norm(norm, (dim // 2)), nn.GELU(), nn.ConvTranspose2d((dim // 2), (dim // 4), kernel_size=2, stride=2)]
out_dim = (dim // 4)
elif (scale == 2.0):
layers = [nn.ConvTranspose2d(dim, (dim // 2), kernel_size=2, stride=2)]
out_dim = (dim // 2)
elif (scale == 1.0):
layers = []
elif (scale == 0.5):
layers = [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
raise NotImplementedError(f'scale_factor={scale} is not supported yet.')
layers.extend([Conv2d(out_dim, out_channels, kernel_size=1, bias=use_bias, norm=get_norm(norm, out_channels)), Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=use_bias, norm=get_norm(norm, out_channels))])
layers = nn.Sequential(*layers)
self.add_module(f'simfp_{idx}', layers)
self.stages.append(layers)
def forward(self, x):
out = OrderedDict()
for (idx, stage) in enumerate(self.stages):
out[('res' + str((idx + 2)))] = stage(x)
return out |
def rule_help(info_finding):
descr_short = info_finding.get('descr_short')
descr_long = info_finding.get('descr_long')
return (descr_long if descr_long else (descr_short if descr_short else '')) |
class SourceDistribution(AbstractDistribution):
def get_pkg_resources_distribution(self):
return self.req.get_dist()
def prepare_distribution_metadata(self, finder, build_isolation):
self.req.load_pyproject_toml()
should_isolate = (self.req.use_pep517 and build_isolation)
if should_isolate:
self._setup_isolation(finder)
self.req.prepare_metadata()
def _setup_isolation(self, finder):
def _raise_conflicts(conflicting_with, conflicting_reqs):
format_string = 'Some build dependencies for {requirement} conflict with {conflicting_with}: {description}.'
error_message = format_string.format(requirement=self.req, conflicting_with=conflicting_with, description=', '.join(('{} is incompatible with {}'.format(installed, wanted) for (installed, wanted) in sorted(conflicting))))
raise InstallationError(error_message)
pyproject_requires = self.req.pyproject_requires
assert (pyproject_requires is not None)
self.req.build_env = BuildEnvironment()
self.req.build_env.install_requirements(finder, pyproject_requires, 'overlay', 'Installing build dependencies')
(conflicting, missing) = self.req.build_env.check_requirements(self.req.requirements_to_check)
if conflicting:
_raise_conflicts('PEP 517/518 supported requirements', conflicting)
if missing:
logger.warning('Missing build requirements in pyproject.toml for %s.', self.req)
logger.warning('The project does not specify a build backend, and pip cannot fall back to setuptools without %s.', ' and '.join(map(repr, sorted(missing))))
with self.req.build_env:
runner = runner_with_spinner_message('Getting requirements to build wheel')
backend = self.req.pep517_backend
assert (backend is not None)
with backend.subprocess_runner(runner):
reqs = backend.get_requires_for_build_wheel()
(conflicting, missing) = self.req.build_env.check_requirements(reqs)
if conflicting:
_raise_conflicts('the backend dependencies', conflicting)
self.req.build_env.install_requirements(finder, missing, 'normal', 'Installing backend dependencies') |
def global_train_once(global_model, client_data_loaders, test_loader, FL_params):
device = torch.device(('cuda' if (FL_params.use_gpu * FL_params.cuda_state) else 'cpu'))
device_cpu = torch.device('cpu')
client_models = []
client_sgds = []
for ii in range(FL_params.N_client):
client_models.append(copy.deepcopy(global_model))
client_sgds.append(optim.SGD(client_models[ii].parameters(), lr=FL_params.local_lr, momentum=0.9))
for client_idx in range(FL_params.N_client):
if ((FL_params.if_retrain and (FL_params.forget_client_idx == client_idx)) or (FL_params.if_unlearning and (FL_params.forget_client_idx == client_idx))):
continue
model = client_models[client_idx]
optimizer = client_sgds[client_idx]
model.to(device)
model.train()
for local_epoch in range(FL_params.local_epoch):
for (batch_idx, (data, target)) in enumerate(client_data_loaders[client_idx]):
data = data.to(device)
target = target.to(device)
optimizer.zero_grad()
pred = model(data)
criteria = nn.CrossEntropyLoss()
loss = criteria(pred, target)
loss.backward()
optimizer.step()
if FL_params.train_with_test:
print('Local Client No. {}, Local Epoch: {}'.format(client_idx, local_epoch))
test(model, test_loader)
model.to(device_cpu)
client_models[client_idx] = model
if (FL_params.if_retrain and (FL_params.forget_client_idx == client_idx)):
client_models.pop(FL_params.forget_client_idx)
return client_models
elif (FL_params.if_unlearning and (FL_params.forget_client_idx in range(FL_params.N_client))):
client_models.pop(FL_params.forget_client_idx)
return client_models
else:
return client_models |
class GLU(nn.Module):
def __init__(self, dim=(- 1), activation='sigmoid'):
super().__init__()
assert (not activation.startswith('glu'))
self.dim = dim
self.activation_fn = Activation(activation)
def forward(self, x):
(x, g) = torch.split(x, (x.size(self.dim) // 2), dim=self.dim)
return (x * self.activation_fn(g)) |
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module)
register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
register_functions_ns3_tests(module.get_submodule('tests'), root_module)
return |
class VGGBlock(torch.nn.Module):
def __init__(self, in_channels, out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, input_dim, conv_stride=1, padding=None, layer_norm=False):
assert (input_dim is not None), 'Need input_dim for LayerNorm and infer_conv_output_dim'
super(VGGBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_kernel_size = _pair(conv_kernel_size)
self.pooling_kernel_size = _pair(pooling_kernel_size)
self.num_conv_layers = num_conv_layers
self.padding = (tuple(((e // 2) for e in self.conv_kernel_size)) if (padding is None) else _pair(padding))
self.conv_stride = _pair(conv_stride)
self.layers = nn.ModuleList()
for layer in range(num_conv_layers):
conv_op = nn.Conv2d((in_channels if (layer == 0) else out_channels), out_channels, self.conv_kernel_size, stride=self.conv_stride, padding=self.padding)
self.layers.append(conv_op)
if layer_norm:
(conv_output_dim, per_channel_dim) = infer_conv_output_dim(conv_op, input_dim, (in_channels if (layer == 0) else out_channels))
self.layers.append(nn.LayerNorm(per_channel_dim))
input_dim = per_channel_dim
self.layers.append(nn.ReLU())
if (self.pooling_kernel_size is not None):
pool_op = nn.MaxPool2d(kernel_size=self.pooling_kernel_size, ceil_mode=True)
self.layers.append(pool_op)
(self.total_output_dim, self.output_dim) = infer_conv_output_dim(pool_op, input_dim, out_channels)
def forward(self, x):
for (i, _) in enumerate(self.layers):
x = self.layers[i](x)
return x |
def metrics(X, T, Ns=[2, 5, 10, 20, 30], metrics=['prec', 'recall', 'map', 'ndcg']):
n_users = float(len(T))
N_pos = len(Ns)
funcs = {'prec': PRECISION, 'recall': RECALL, 'map': MAP, 'ndcg': NDCG}
res = {}
for m in metrics:
re = []
for n in Ns:
re.append(0.0)
res[m] = re
for (u, t) in T.items():
t = set(t)
if (u not in X):
continue
pred = X[u]
correct = [int((r in t)) for r in pred]
cumsum_x = np.cumsum(correct)
for m in metrics:
f = funcs[m]
s = f(correct, t, Ns, cumsum_x)
for i in range(N_pos):
res[m][i] += s[i]
for m in metrics:
for i in range(N_pos):
res[m][i] = (res[m][i] / n_users)
return res |
class graphTypeSub(supermod.graphType):
def __init__(self, node=None):
supermod.graphType.__init__(self, node) |
def _batch_and_pad(sequences):
batch_embeddings = []
batch_mask = []
batch_len = max([len(seq) for seq in sequences])
for seq in sequences:
(embeddings, mask) = _pad(seq, batch_len)
batch_embeddings.append(embeddings)
batch_mask.append(mask)
return (np.array(batch_embeddings), np.array(batch_mask)) |
class distill():
def __init__(self, args, model, teacher):
self.args = args
self.student = model
self.teacher = teacher
self.student_layer = self.sampled_layer(args.arch, self.student)
self.teacher_layer = self.sampled_layer(args.teacher_arch, self.teacher)
def kwargs(**kwargs):
return kwargs
setattr(tcl.Conv2d, 'pre_defined', kwargs(use_biases=False, trainable=True))
setattr(tcl.Conv2d_transpose, 'pre_defined', kwargs(use_biases=False, trainable=True))
setattr(tcl.BatchNorm, 'pre_defined', kwargs(activation_fn=tf.nn.leaky_relu, trainable=True))
rate = 0.5
D = self.teacher_layer.gamma.shape[(- 1)]
self.aux_layers = []
self.aux_layers.append(tf.keras.Sequential([tcl.Conv2d([3, 3], int((D * rate)), 1, name='conv0'), tcl.BatchNorm(name='bn0'), tcl.Conv2d([3, 3], int((D * (rate ** 2))), int((1 / rate)), name='conv1'), tcl.BatchNorm(name='bn1'), tcl.Conv2d([3, 3], int((D * (rate ** 3))), 1, name='conv2'), tcl.BatchNorm(activation_fn=None, name='bn2')]))
self.aux_layers.append(tf.keras.Sequential([tcl.Conv2d_transpose([3, 3], int((D * (rate ** 2))), 1, name='convt0'), tcl.BatchNorm(name='bnt0'), tcl.Conv2d_transpose([3, 3], int((D * rate)), int((1 / rate)), name='convt1'), tcl.BatchNorm(name='bnt1'), tcl.Conv2d_transpose([3, 3], D, 1, use_biases=True, name='convt2')]))
self.student.aux_layers = tf.keras.Sequential([tcl.Conv2d([3, 3], int((D * rate)), 1, name='conv0'), tcl.BatchNorm(name='bn0'), tcl.Conv2d([3, 3], int((D * (rate ** 2))), int((1 / rate)), name='conv1'), tcl.BatchNorm(name='bn1'), tcl.Conv2d([3, 3], int((D * (rate ** 3))), 1, activation_fn=None, name='conv2'), tcl.BatchNorm(activation_fn=None, name='bn2')])
self.teacher(np.zeros(([1] + args.input_shape), np.float32))
self.student(np.zeros(([1] + args.input_shape), np.float32))
self.aux_layers[1](self.aux_layers[0](self.teacher_layer.feat))
self.student.aux_layers(self.student_layer.feat)
self.beta = 100.0
def sampled_layer(self, arch, model):
if ('WResNet' in arch):
model.Layers['bn_last'].keep_feat = 'output'
return model.Layers['bn_last']
def forward(self, input, labels, target_loss):
self.teacher(input, training=False)
t = tf.nn.l2_normalize(self.aux_layers[0](self.teacher_layer.feat, training=False), [1, 2, 3])
s = tf.nn.l2_normalize(self.student.aux_layers(self.student_layer.feat, training=True), [1, 2, 3])
(B, H, W, D) = s.shape
return (target_loss + (tf.reduce_mean(tf.abs((t - s))) * self.beta))
def auxiliary_training(self, dataset):
optimizer = tf.keras.optimizers.SGD(0.001, 0.9, nesterov=True)
train_loss = tf.keras.metrics.Mean(name='train_loss')
teacher_aux = (self.aux_layers[0].trainable_variables + self.aux_layers[1].trainable_variables)
(experimental_compile=True)
def training(images):
self.teacher(images)
with tf.GradientTape() as tape:
tape.watch(teacher_aux)
feat = self.teacher_layer.feat
enc = tf.nn.leaky_relu(self.aux_layers[0](feat))
dec = self.aux_layers[1](enc)
dec = self.teacher_layer.activation_fn(dec)
(B, H, W, D) = dec.shape
loss = ((((tf.reduce_sum(tf.abs((feat - dec))) / B) / H) / W) + ((((tf.reduce_sum(tf.abs(enc)) / B) / H) / W) * 1e-06))
gradients = tape.gradient(loss, teacher_aux)
gradients = [(g + (v * self.args.weight_decay)) for (g, v) in zip(gradients, teacher_aux)]
optimizer.apply_gradients(zip(gradients, teacher_aux))
train_loss.update_state(loss)
for e in range(int((self.args.train_epoch * 0.3))):
for (images, _) in dataset:
training(images)
print(('Aux Epoch: %d: loss: %.4f' % (e, train_loss.result())))
train_loss.reset_states() |
_grad()
def generate_images_from_latents(H, all_latents, embedding_weight, generator):
all_latents = all_latents.cuda()
generator = generator.cuda()
for (idx, latents) in tqdm(list(enumerate(torch.split(all_latents, H.batch_size)))):
latents_one_hot = latent_ids_to_onehot(latents, H.latent_shape, H.codebook_size).cuda()
q = torch.matmul(latents_one_hot, embedding_weight).view(latents_one_hot.size(0), H.latent_shape[1], H.latent_shape[2], H.emb_dim).permute(0, 3, 1, 2).contiguous()
gen_images = generator(q)
save_images(gen_images.detach().cpu(), 'sample', idx, H.log_dir, save_individually=True)
del generator |
.torch
def test_prediction_bert4rec(item_user_sequential_dataset, train_loader):
pred = Bert4RecPredictionDataset(item_user_sequential_dataset, max_sequence_length=5)
pred_loader = torch.utils.data.DataLoader(pred)
trainer = L.Trainer(max_epochs=1)
model = Bert4Rec(tensor_schema=item_user_sequential_dataset._tensor_schema, max_seq_len=5, hidden_size=64)
trainer.fit(model, train_loader)
predicted = trainer.predict(model, pred_loader)
assert (len(predicted) == len(pred))
assert (predicted[0].size() == (1, 6)) |
def make_algo():
logger = Logger(log_dir, {})
algo = DDPG(state_shape=STATE_SHAPE, action_shape=ACTION_SHAPE, device=args.device, seed=args.seed, logger=logger)
return algo |
class PyBacktrace(gdb.Command):
def __init__(self):
gdb.Command.__init__(self, 'py-bt', gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
if (not frame):
print('Unable to locate python frame')
return
sys.stdout.write('Traceback (most recent call first):\n')
while frame:
if frame.is_python_frame():
frame.print_traceback()
frame = frame.older() |
class PLMSSampler(object):
def __init__(self, model, schedule='linear', **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if (type(attr) == torch.Tensor):
if (attr.device != torch.device('cuda')):
attr = attr.to(torch.device('cuda'))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize='uniform', ddim_eta=0.0, verbose=True):
if (ddim_eta != 0):
raise ValueError('ddim_eta must be 0 for PLMS')
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert (alphas_cumprod.shape[0] == self.ddpm_num_timesteps), 'alphas have to be defined for each timestep'
to_torch = (lambda x: x.clone().detach().to(torch.float32).to(self.model.device))
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt((1.0 / alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(((1.0 / alphas_cumprod.cpu()) - 1))))
(ddim_sigmas, ddim_alphas, ddim_alphas_prev) = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt((1.0 - ddim_alphas)))
sigmas_for_original_sampling_steps = (ddim_eta * torch.sqrt((((1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod)) * (1 - (self.alphas_cumprod / self.alphas_cumprod_prev)))))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
_grad()
def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, dynamic_threshold=None, **kwargs):
if (conditioning is not None):
if isinstance(conditioning, dict):
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
if (cbs != batch_size):
print(f'Warning: Got {cbs} conditionings but batch-size is {batch_size}')
elif (conditioning.shape[0] != batch_size):
print(f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}')
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
(C, H, W) = shape
size = (batch_size, C, H, W)
print(f'Data shape for PLMS sampling is {size}')
(samples, intermediates) = self.plms_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold)
return (samples, intermediates)
_grad()
def plms_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, dynamic_threshold=None):
device = self.model.betas.device
b = shape[0]
if (x_T is None):
img = torch.randn(shape, device=device)
else:
img = x_T
if (timesteps is None):
timesteps = (self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps)
elif ((timesteps is not None) and (not ddim_use_original_steps)):
subset_end = (int((min((timesteps / self.ddim_timesteps.shape[0]), 1) * self.ddim_timesteps.shape[0])) - 1)
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = (list(reversed(range(0, timesteps))) if ddim_use_original_steps else np.flip(timesteps))
total_steps = (timesteps if ddim_use_original_steps else timesteps.shape[0])
print(f'Running PLMS Sampling with {total_steps} timesteps')
iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
old_eps = []
for (i, step) in enumerate(iterator):
index = ((total_steps - i) - 1)
ts = torch.full((b,), step, device=device, dtype=torch.long)
ts_next = torch.full((b,), time_range[min((i + 1), (len(time_range) - 1))], device=device, dtype=torch.long)
if (mask is not None):
assert (x0 is not None)
img_orig = self.model.q_sample(x0, ts)
img = ((img_orig * mask) + ((1.0 - mask) * img))
outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, old_eps=old_eps, t_next=ts_next, dynamic_threshold=dynamic_threshold)
(img, pred_x0, e_t) = outs
old_eps.append(e_t)
if (len(old_eps) >= 4):
old_eps.pop(0)
if callback:
callback(i)
if img_callback:
img_callback(pred_x0, i)
if (((index % log_every_t) == 0) or (index == (total_steps - 1))):
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return (img, intermediates)
_grad()
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, old_eps=None, t_next=None, dynamic_threshold=None):
(b, *_, device) = (*x.shape, x.device)
def get_model_output(x, t):
if ((unconditional_conditioning is None) or (unconditional_guidance_scale == 1.0)):
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat(([x] * 2))
t_in = torch.cat(([t] * 2))
c_in = torch.cat([unconditional_conditioning, c])
(e_t_uncond, e_t) = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = (e_t_uncond + (unconditional_guidance_scale * (e_t - e_t_uncond)))
if (score_corrector is not None):
assert (self.model.parameterization == 'eps')
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
return e_t
alphas = (self.model.alphas_cumprod if use_original_steps else self.ddim_alphas)
alphas_prev = (self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev)
sqrt_one_minus_alphas = (self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas)
sigmas = (self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas)
def get_x_prev_and_pred_x0(e_t, index):
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)
pred_x0 = ((x - (sqrt_one_minus_at * e_t)) / a_t.sqrt())
if quantize_denoised:
(pred_x0, _, *_) = self.model.first_stage_model.quantize(pred_x0)
if (dynamic_threshold is not None):
pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)
dir_xt = (((1.0 - a_prev) - (sigma_t ** 2)).sqrt() * e_t)
noise = ((sigma_t * noise_like(x.shape, device, repeat_noise)) * temperature)
if (noise_dropout > 0.0):
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = (((a_prev.sqrt() * pred_x0) + dir_xt) + noise)
return (x_prev, pred_x0)
e_t = get_model_output(x, t)
if (len(old_eps) == 0):
(x_prev, pred_x0) = get_x_prev_and_pred_x0(e_t, index)
e_t_next = get_model_output(x_prev, t_next)
e_t_prime = ((e_t + e_t_next) / 2)
elif (len(old_eps) == 1):
e_t_prime = (((3 * e_t) - old_eps[(- 1)]) / 2)
elif (len(old_eps) == 2):
e_t_prime = ((((23 * e_t) - (16 * old_eps[(- 1)])) + (5 * old_eps[(- 2)])) / 12)
elif (len(old_eps) >= 3):
e_t_prime = (((((55 * e_t) - (59 * old_eps[(- 1)])) + (37 * old_eps[(- 2)])) - (9 * old_eps[(- 3)])) / 24)
(x_prev, pred_x0) = get_x_prev_and_pred_x0(e_t_prime, index)
return (x_prev, pred_x0, e_t) |
def main_func():
arg_parser = _shell_options()
(args, remaining_args) = arg_parser.parse_known_args()
cpu_info = get_cpu_info()
num_cores = cpu_info['count']
result = {}
if (args.command == 'minimize'):
result = _minimize_noise(num_cores, args.use_nice, args.use_shielding, args.for_profiling)
elif (args.command == 'restore'):
result = _restore_standard_settings(num_cores, args.use_shielding)
elif (args.command == 'exec'):
_exec(num_cores, args.use_nice, args.use_shielding, remaining_args)
elif (args.command == 'kill'):
_kill(remaining_args[0])
elif (args.command == 'test'):
_test(num_cores)
else:
arg_parser.print_help()
return (- 1)
if args.json:
print(json.dumps(result))
else:
print('Setting scaling_governor: ', result.get('scaling_governor', None))
print('Setting no_turbo: ', result.get('no_turbo', False))
print('Setting perf_event_max_sample_rate: ', result.get('perf_event_max_sample_rate', None))
print('')
print('Enabled core shielding: ', result.get('shielding', False))
print('')
print('Can set niceness: ', result.get('can_set_nice', False))
if ('failed' in result.values()):
return (- 1)
else:
return 0 |
def make_palette(num_classes):
palette = np.zeros((num_classes, 3), dtype=np.uint8)
for k in xrange(0, num_classes):
label = k
i = 0
while label:
palette[(k, 0)] |= (((label >> 0) & 1) << (7 - i))
palette[(k, 1)] |= (((label >> 1) & 1) << (7 - i))
palette[(k, 2)] |= (((label >> 2) & 1) << (7 - i))
label >>= 3
i += 1
return palette |
def is_reach_goal(context, goal):
context = kw_tokenize(context)
if (goal in context):
return True
for wd in context:
if is_candiword(wd):
rela = calculate_linsim(wd, goal)
if (rela > 0.9):
return True
return False |
def parse_subj_obj(f):
(subj_obj, score) = f.split(':')
score = float(score)
(subj, obj) = subj_obj.split('_')
(subj_lemma, subj_pos) = subj.split('#')
(obj_lemma, obj_pos) = obj.split('#')
return (subj_lemma, subj_pos, obj_lemma, obj_pos, score) |
class ReshapeModel(torch.nn.Module):
def __init__(self):
super(ReshapeModel, self).__init__()
def forward(self, x):
return torch.reshape(x, [1, (- 1)]) |
def test_examples_from_cli(app, testdir, cli, base_url, schema_with_examples):
schema = schema_with_examples.raw_schema
app['config'].update({'schema_data': schema})
schema_file = testdir.makefile('.yaml', schema=yaml.dump(schema))
result = cli.run(str(schema_file), f'--base-url={base_url}', '--hypothesis-phases=explicit')
assert (result.exit_code == ExitCode.OK), result.stdout
not_a_server_line = next(filter((lambda line: ('not_a_server_error' in line)), result.stdout.split('\n')))
assert ('3 / 3 passed' in not_a_server_line) |
def _create_learning_rate_scheduler(optimizer, learning_rate_config, total_step):
lr_scheduler = None
learning_rate_type = learning_rate_config.type
config = learning_rate_config
if (learning_rate_type == 'multi_phase'):
lr_phases = []
mom_phases = []
for phase_cfg in config.phases:
lr_phases.append((phase_cfg.start, phase_cfg.lambda_func))
mom_phases.append((phase_cfg.start, phase_cfg.momentum_lambda_func))
lr_scheduler = lsf.LRSchedulerStep(optimizer, total_step, lr_phases, mom_phases)
elif (learning_rate_type == 'one_cycle'):
lr_scheduler = lsf.OneCycle(optimizer, total_step, config.lr_max, config.moms, config.div_factor, config.pct_start)
elif (learning_rate_type == 'exponential_decay'):
lr_scheduler = lsf.ExponentialDecay(optimizer, total_step, config.initial_learning_rate, config.decay_length, config.decay_factor, config.staircase)
elif (learning_rate_type == 'manual_stepping'):
lr_scheduler = lsf.ManualStepping(optimizer, total_step, config.boundaries, config.rates)
elif (lr_scheduler is None):
raise ValueError(('Learning_rate %s not supported.' % learning_rate_type))
return lr_scheduler |
def split_underscores(tree):
assert (not tree.is_leaf()), 'Should never reach a leaf in this code path'
if tree.is_preterminal():
return tree
children = tree.children
new_children = []
for child in children:
if child.is_preterminal():
if ('_' not in child.children[0].label):
new_children.append(child)
continue
if (child.label.split('-')[0] not in WORD_TO_PHRASE):
raise ValueError('SPLITTING {}'.format(child))
pieces = []
for piece in child.children[0].label.split('_'):
if (len(piece) == 0):
raise ValueError('A word started or ended with _')
pieces.append(Tree(child.label, Tree(piece)))
new_children.append(Tree(WORD_TO_PHRASE[child.label.split('-')[0]], pieces))
else:
new_children.append(split_underscores(child))
return Tree(tree.label, new_children) |
class Posets(Category):
_method
def super_categories(self):
return [Sets()]
def example(self, choice=None):
from sage.categories.examples.posets import FiniteSetsOrderedByInclusion, PositiveIntegersOrderedByDivisibilityFacade
if (choice == 'facade'):
return PositiveIntegersOrderedByDivisibilityFacade()
else:
return FiniteSetsOrderedByInclusion()
def __iter__(self):
from sage.combinat.posets.posets import FinitePosets_n
n = 0
while True:
(yield from FinitePosets_n(n))
n += 1
Finite = LazyImport('sage.categories.finite_posets', 'FinitePosets')
class ParentMethods():
_method
def le(self, x, y):
def lt(self, x, y):
return (self.le(x, y) and (x != y))
def ge(self, x, y):
return self.le(y, x)
def gt(self, x, y):
return self.lt(y, x)
_method(optional=True)
def upper_covers(self, x):
_method(optional=True)
def lower_covers(self, x):
_method(optional=True)
def order_ideal(self, elements):
_method(optional=True)
def order_filter(self, elements):
def directed_subset(self, elements, direction):
if (direction == 'up'):
return self.order_filter(elements)
if (direction == 'down'):
return self.order_ideal(elements)
raise ValueError("direction must be either 'up' or 'down'")
def principal_order_ideal(self, x):
return self.order_ideal([x])
principal_lower_set = principal_order_ideal
def principal_order_filter(self, x):
return self.order_filter([x])
principal_upper_set = principal_order_filter
def order_ideal_toggle(self, I, v):
if (v not in I):
if all(((u in I) for u in self.lower_covers(v))):
from sage.sets.set import Set
return I.union(Set({v}))
elif all(((u not in I) for u in self.upper_covers(v))):
from sage.sets.set import Set
return I.difference(Set({v}))
return I
def order_ideal_toggles(self, I, vs):
for v in vs:
I = self.order_ideal_toggle(I, v)
return I
def is_order_ideal(self, o):
return all((((u in self) and all(((x in o) for x in self.lower_covers(u)))) for u in o))
def is_order_filter(self, o):
return all((((u in self) and all(((x in o) for x in self.upper_covers(u)))) for u in o))
def is_chain_of_poset(self, o, ordered=False):
list_o = list(o)
if ordered:
return all((self.lt(a, b) for (a, b) in zip(list_o, list_o[1:])))
else:
for (i, x) in enumerate(list_o):
for y in list_o[:i]:
if ((not self.le(x, y)) and (not self.gt(x, y))):
return False
return True
def is_antichain_of_poset(self, o):
return all(((not self.lt(x, y)) for x in o for y in o))
CartesianProduct = LazyImport('sage.combinat.posets.cartesian_product', 'CartesianProductPoset')
class ElementMethods():
pass |
class MyModule():
lock = threading.Lock()
def __init__(self):
g_cpu = torch.Generator()
g_cpu.manual_seed(0)
self.w = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
def forward(self, t1):
return torch.mm(self.w, t1)
def get_w(self):
return self.w |
def upload_resource(file_path, oss_obj_name, bucket):
resource_oss_url = (' % (bucket.bucket_name, bucket.endpoint, oss_obj_name))
bucket.put_object_from_file(oss_obj_name, file_path)
return resource_oss_url |
def _is_path(name_or_buffer):
return (isinstance(name_or_buffer, str) or ((sys.version_info[0] == 3) and isinstance(name_or_buffer, pathlib.Path))) |
def trim_midi(mid_orig, start, end, strict=True):
eps = 0.001
mid = deepcopy(mid_orig)
for ins in mid.instruments:
if strict:
ins.notes = [note for note in ins.notes if ((note.start >= start) and (note.end <= end))]
else:
ins.notes = [note for note in ins.notes if ((note.end > (start + eps)) and (note.start < (end - eps)))]
for note in ins.notes:
if (not strict):
note.start = max(start, note.start)
note.end = min(end, note.end)
note.start -= start
note.end -= start
mid.instruments = [ins for ins in mid.instruments if ins.notes]
return mid |
def add_wd_without_bias(wd, scope=None):
scope = (scope or tf.get_variable_scope().name)
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
counter = 0
with tf.name_scope('weight_decay'):
for var in variables:
if (len(var.get_shape().as_list()) <= 1):
continue
counter += 1
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='{}-wd'.format('-'.join(str(var.op.name).split('/'))))
tf.add_to_collection('losses', weight_decay)
return counter |
def p_property_decl(s):
pos = s.position()
s.next()
name = p_ident(s)
(doc, body) = p_suite_with_docstring(s, Ctx(level='property'), with_doc_only=True)
return Nodes.PropertyNode(pos, name=name, doc=doc, body=body) |
(base=10)
def plot_semilogy(funcs, *args, **kwds):
return plot(funcs, *args, scale='semilogy', **kwds) |
class LSTM(RNNBase):
def __init__(self, *args, **kwargs):
super(LSTM, self).__init__('LSTM', *args, **kwargs)
def check_forward_args(self, input: Tensor, hidden: Tuple[(Tensor, Tensor)], batch_sizes: Optional[Tensor]):
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
self.check_hidden_size(hidden[0], expected_hidden_size, 'Expected hidden[0] size {}, got {}')
self.check_hidden_size(hidden[1], expected_hidden_size, 'Expected hidden[1] size {}, got {}')
def permute_hidden(self, hx: Tuple[(Tensor, Tensor)], permutation: Optional[Tensor]) -> Tuple[(Tensor, Tensor)]:
if (permutation is None):
return hx
return (apply_permutation(hx[0], permutation), apply_permutation(hx[1], permutation))
_jit_internal._overload_method
def forward(self, input: Tensor, hx: Optional[Tuple[(Tensor, Tensor)]]=None) -> Tuple[(Tensor, Tuple[(Tensor, Tensor)])]:
pass
_jit_internal._overload_method
def forward(self, input: PackedSequence, hx: Optional[Tuple[(Tensor, Tensor)]]=None) -> Tuple[(PackedSequence, Tuple[(Tensor, Tensor)])]:
pass
def forward(self, input, hx=None):
orig_input = input
if isinstance(orig_input, PackedSequence):
(input, batch_sizes, sorted_indices, unsorted_indices) = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
else:
batch_sizes = None
max_batch_size = (input.size(0) if self.batch_first else input.size(1))
sorted_indices = None
unsorted_indices = None
if (hx is None):
num_directions = (2 if self.bidirectional else 1)
zeros = torch.zeros((self.num_layers * num_directions), max_batch_size, self.hidden_size, dtype=input.dtype, device=input.device)
hx = (zeros, zeros)
else:
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
if (batch_sizes is None):
result = _VF.lstm(input, hx, self._flat_weights, self.bias, self.num_layers, self.dropout, self.training, self.bidirectional, self.batch_first)
else:
result = _VF.lstm(input, batch_sizes, hx, self._flat_weights, self.bias, self.num_layers, self.dropout, self.training, self.bidirectional)
output = result[0]
hidden = result[1:]
if isinstance(orig_input, PackedSequence):
output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
return (output_packed, self.permute_hidden(hidden, unsorted_indices))
else:
return (output, self.permute_hidden(hidden, unsorted_indices)) |
class GumbelVQ(VQModel):
def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, temperature_scheduler_config, ckpt_path=None, ignore_keys=[], image_key='image', colorize_nlabels=None, monitor=None, kl_weight=1e-08, remap=None):
z_channels = ddconfig['z_channels']
super().__init__(ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=ignore_keys, image_key=image_key, colorize_nlabels=colorize_nlabels, monitor=monitor)
self.loss.n_classes = n_embed
self.vocab_size = n_embed
self.quantize = GumbelQuantize(z_channels, embed_dim, n_embed=n_embed, kl_weight=kl_weight, temp_init=1.0, remap=remap)
self.temperature_scheduler = instantiate_from_config(temperature_scheduler_config)
if (ckpt_path is not None):
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.current_step = 0
def temperature_scheduling(self):
self.quantize.temperature = self.temperature_scheduler(self.global_step)
def encode_to_prequant(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
return h
def decode_code(self, code_b):
raise NotImplementedError
def training_step(self, batch, batch_idx, optimizer_idx):
self.temperature_scheduling()
x = self.get_input(batch, self.image_key)
(xrec, qloss) = self(x)
if (optimizer_idx == 0):
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
self.log('temperature', self.quantize.temperature, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if (optimizer_idx == 1):
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
self.current_step += 1
if ((self.current_step % 1000) == 0):
ckpt = os.path.join(self.trainer.logdir, 'checkpoints', f'{self.current_step:04d}.ckpt')
self.trainer.save_checkpoint(ckpt)
return discloss
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
(xrec, qloss) = self(x)
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, 0, self.global_step, last_layer=self.get_last_layer(), split='val')
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, 1, self.global_step, last_layer=self.get_last_layer(), split='val')
rec_loss = log_dict_ae['val/rec_loss']
self.log('val/rec_loss', rec_loss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log('val/aeloss', aeloss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def on_validation_end(self, **kwargs):
ckpt = os.path.join(self.trainer.logdir, 'checkpoints', 'latest.ckpt')
self.trainer.save_checkpoint(ckpt)
def log_images(self, batch, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
h = self.encoder(x)
h = self.quant_conv(h)
(quant, _, _) = self.quantize(h)
x_rec = self.decode(quant)
log['inputs'] = x
log['reconstructions'] = x_rec
return log |
class _PredictManager():
def __init__(self, predictor: Predictor, input_file: str, output_file: Optional[str], batch_size: int, print_to_console: bool, has_dataset_reader: bool) -> None:
self._predictor = predictor
self._input_file = input_file
if (output_file is not None):
self._output_file = open(output_file, 'w')
else:
self._output_file = None
self._batch_size = batch_size
self._print_to_console = print_to_console
if has_dataset_reader:
self._dataset_reader = predictor._dataset_reader
else:
self._dataset_reader = None
def _predict_json(self, batch_data: List[JsonDict]) -> Iterator[str]:
if (len(batch_data) == 1):
results = [self._predictor.predict_json(batch_data[0])]
else:
results = self._predictor.predict_batch_json(batch_data)
for output in results:
(yield self._predictor.dump_line(output))
def _predict_instances(self, batch_data: List[Instance]) -> Iterator[str]:
if (len(batch_data) == 1):
results = [self._predictor.predict_instance(batch_data[0])]
else:
results = self._predictor.predict_batch_instance(batch_data)
for output in results:
(yield self._predictor.dump_line(output))
def _maybe_print_to_console_and_file(self, index: int, prediction: str, model_input: str=None) -> None:
if self._print_to_console:
if (model_input is not None):
print(f'input {index}: ', model_input)
print('prediction: ', prediction)
if (self._output_file is not None):
self._output_file.write(prediction)
def _get_json_data(self) -> Iterator[JsonDict]:
if (self._input_file == '-'):
for line in sys.stdin:
if (not line.isspace()):
(yield self._predictor.load_line(line))
else:
input_file = cached_path(self._input_file)
with open(input_file, 'r') as file_input:
for line in file_input:
if (not line.isspace()):
(yield self._predictor.load_line(line))
def _get_instance_data(self) -> Iterator[Instance]:
if (self._input_file == '-'):
raise ConfigurationError('stdin is not an option when using a DatasetReader.')
elif (self._dataset_reader is None):
raise ConfigurationError('To generate instances directly, pass a DatasetReader.')
else:
(yield from self._dataset_reader.read(self._input_file))
def run(self) -> None:
has_reader = (self._dataset_reader is not None)
index = 0
if has_reader:
for batch in tqdm(lazy_groups_of(self._get_instance_data(), self._batch_size)):
for (model_input_instance, result) in zip(batch, self._predict_instances(batch)):
self._maybe_print_to_console_and_file(index, result, str(model_input_instance))
index = (index + 1)
else:
for batch_json in lazy_groups_of(self._get_json_data(), self._batch_size):
for (model_input_json, result) in zip(batch_json, self._predict_json(batch_json)):
self._maybe_print_to_console_and_file(index, result, json.dumps(model_input_json))
index = (index + 1)
if (self._output_file is not None):
self._output_file.close() |
def combine_sequences(sequences, axis=(- 1), name=None):
with tf.name_scope((name or 'combine_sequences')):
shapes = [shape_list(seq) for seq in sequences]
sl_list = [shp[axis] for shp in shapes]
sl_max = tf.reduce_max(tf.stack(sl_list))
def _get_padding_shape(_shape, _sl, _sl_max, _dim):
_new_shape = copy.copy(_shape)
_new_shape[_dim] = (_sl_max - _sl)
return _new_shape
return tf.stack([tf.concat([seq, tf.zeros(_get_padding_shape(shp, sl, sl_max, axis), seq.dtype)], axis=axis) for (seq, shp, sl) in zip(sequences, shapes, sl_list)], axis=axis) |
def c4_graph():
G = nx.Graph()
G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)])
return G |
_connect.numpy.implements('nanargmin')
def _nep_18_impl_nanargmin(a, axis=None, out=UNSUPPORTED, *, keepdims=False):
return nanargmin(a, axis=axis, keepdims=keepdims) |
class Predictor(ABC):
if Benchmark.bench_predict:
def time_predict(self, *args):
self.estimator.predict(self.X)
def peakmem_predict(self, *args):
self.estimator.predict(self.X)
if (Benchmark.base_commit is not None):
def track_same_prediction(self, *args):
est_path = get_estimator_path(self, Benchmark.base_commit, args, True)
with est_path.open(mode='rb') as f:
estimator_base = pickle.load(f)
y_val_pred_base = estimator_base.predict(self.X_val)
y_val_pred = self.estimator.predict(self.X_val)
return np.allclose(y_val_pred_base, y_val_pred)
def params(self):
pass |
def test_count_featurizer(tmp_path: pathlib.Path):
time_horizon = TimeHorizon(datetime.timedelta(days=0), datetime.timedelta(days=180))
create_database(tmp_path)
database_path = os.path.join(tmp_path, 'target')
database = femr.datasets.PatientDatabase(database_path)
ontology = database.get_ontology()
femr_outcome_code = get_femr_code(ontology, 2)
femr_admission_code = get_femr_code(ontology, 3)
labeler = CodeLabeler([femr_outcome_code], time_horizon, [femr_admission_code])
patient: femr.Patient = cast(femr.Patient, database[next(iter(database))])
labels = labeler.label(patient)
featurizer = CountFeaturizer()
featurizer.preprocess(patient, labels, ontology)
patient_features = featurizer.featurize(patient, labels, ontology)
assert (featurizer.get_num_columns() == 3), f'featurizer.get_num_columns() = {featurizer.get_num_columns()}'
simple_patient_features = [{(featurizer.get_column_name(v.column), v.value) for v in a} for a in patient_features]
assert (simple_patient_features[0] == {('dummy/three', 1)}), f'patient_features[0] = {patient_features[0]}'
assert (simple_patient_features[1] == {('dummy/three', 2), ('dummy/two', 2)})
assert (simple_patient_features[2] == {('dummy/three', 3), ('dummy/two', 4)})
labeled_patients = labeler.apply(path_to_patient_database=database_path)
featurizer = CountFeaturizer(is_ontology_expansion=True)
featurizer_list = FeaturizerList([featurizer])
featurizer_list.preprocess_featurizers(database_path, labeled_patients)
featurized_patients = featurizer_list.featurize(database_path, labeled_patients)
labels_per_patient = [True, False, False]
assert (featurized_patients[0].shape == ((len(labeled_patients) * len(labels_per_patient)), 3))
_assert_featurized_patients_structure(labeled_patients, featurized_patients, labels_per_patient) |
class TestAsLinearOperator():
def setup_method(self):
self.cases = []
def make_cases(original, dtype):
cases = []
cases.append((matrix(original, dtype=dtype), original))
cases.append((np.array(original, dtype=dtype), original))
cases.append((sparse.csr_matrix(original, dtype=dtype), original))
def mv(x, dtype):
y = original.dot(x)
if (len(x.shape) == 2):
y = y.reshape((- 1), 1)
return y
def rmv(x, dtype):
return original.T.conj().dot(x)
class BaseMatlike(interface.LinearOperator):
args = ()
def __init__(self, dtype):
self.dtype = np.dtype(dtype)
self.shape = original.shape
def _matvec(self, x):
return mv(x, self.dtype)
class HasRmatvec(BaseMatlike):
args = ()
def _rmatvec(self, x):
return rmv(x, self.dtype)
class HasAdjoint(BaseMatlike):
args = ()
def _adjoint(self):
shape = (self.shape[1], self.shape[0])
matvec = partial(rmv, dtype=self.dtype)
rmatvec = partial(mv, dtype=self.dtype)
return interface.LinearOperator(matvec=matvec, rmatvec=rmatvec, dtype=self.dtype, shape=shape)
class HasRmatmat(HasRmatvec):
def _matmat(self, x):
return original.dot(x)
def _rmatmat(self, x):
return original.T.conj().dot(x)
cases.append((HasRmatvec(dtype), original))
cases.append((HasAdjoint(dtype), original))
cases.append((HasRmatmat(dtype), original))
return cases
original = np.array([[1, 2, 3], [4, 5, 6]])
self.cases += make_cases(original, np.int32)
self.cases += make_cases(original, np.float32)
self.cases += make_cases(original, np.float64)
self.cases += [(interface.aslinearoperator(M).T, A.T) for (M, A) in make_cases(original.T, np.float64)]
self.cases += [(interface.aslinearoperator(M).H, A.T.conj()) for (M, A) in make_cases(original.T, np.float64)]
original = np.array([[1, 2j, 3j], [4j, 5j, 6]])
self.cases += make_cases(original, np.complex128)
self.cases += [(interface.aslinearoperator(M).T, A.T) for (M, A) in make_cases(original.T, np.complex128)]
self.cases += [(interface.aslinearoperator(M).H, A.T.conj()) for (M, A) in make_cases(original.T, np.complex128)]
def test_basic(self):
for (M, A_array) in self.cases:
A = interface.aslinearoperator(M)
(M, N) = A.shape
xs = [np.array([1, 2, 3]), np.array([[1], [2], [3]])]
ys = [np.array([1, 2]), np.array([[1], [2]])]
if (A.dtype == np.complex128):
xs += [np.array([1, 2j, 3j]), np.array([[1], [2j], [3j]])]
ys += [np.array([1, 2j]), np.array([[1], [2j]])]
x2 = np.array([[1, 4], [2, 5], [3, 6]])
for x in xs:
assert_equal(A.matvec(x), A_array.dot(x))
assert_equal((A * x), A_array.dot(x))
assert_equal(A.matmat(x2), A_array.dot(x2))
assert_equal((A * x2), A_array.dot(x2))
for y in ys:
assert_equal(A.rmatvec(y), A_array.T.conj().dot(y))
assert_equal(A.T.matvec(y), A_array.T.dot(y))
assert_equal(A.H.matvec(y), A_array.T.conj().dot(y))
for y in ys:
if (y.ndim < 2):
continue
assert_equal(A.rmatmat(y), A_array.T.conj().dot(y))
assert_equal(A.T.matmat(y), A_array.T.dot(y))
assert_equal(A.H.matmat(y), A_array.T.conj().dot(y))
if hasattr(M, 'dtype'):
assert_equal(A.dtype, M.dtype)
assert_(hasattr(A, 'args'))
def test_dot(self):
for (M, A_array) in self.cases:
A = interface.aslinearoperator(M)
(M, N) = A.shape
x0 = np.array([1, 2, 3])
x1 = np.array([[1], [2], [3]])
x2 = np.array([[1, 4], [2, 5], [3, 6]])
assert_equal(A.dot(x0), A_array.dot(x0))
assert_equal(A.dot(x1), A_array.dot(x1))
assert_equal(A.dot(x2), A_array.dot(x2)) |
class TestStepwiseStore(TestCase):
def test_load(self):
self.assertGreater(len(store), 0)
self.assertIsNotNone(store.get('gelu', 3)) |
class AnomalyDetector():
def __init__(self, config: AnomalyDetectionConfig):
self.anomaly_detector = factory.get_algorithm('detection', config.algo_name.lower(), config)
def fit(self, log_features: pd.DataFrame):
return self.anomaly_detector.fit(log_features)
def predict(self, log_features: pd.DataFrame) -> pd.DataFrame:
return self.anomaly_detector.predict(log_features) |
def test_replace_in_file_multiline_old_text(test_file, test_file_path, agent: Agent):
old_content = 'This is a multi_line\ntest for testing\nhow well this function\nworks when the input\nis multi-lined'
expected_content = 'This is a multi_line\nfile. succeeded test\nis multi-lined'
test_file.write(old_content)
test_file.close()
file_ops.replace_in_file(test_file_path, '\ntest for testing\nhow well this function\nworks when the input\n', '\nfile. succeeded test\n', agent=agent)
with open(test_file_path) as f:
new_content = f.read()
assert (new_content == expected_content) |
_utils.test()
def test_ad_reduce_fwd():
N = 16
x = ti.field(dtype=ti.f32, shape=N)
loss = ti.field(dtype=ti.f32, shape=())
ti.root.lazy_dual()
def func():
for i in x:
loss[None] += (x[i] ** 2)
total_loss = 0
for i in range(N):
x[i] = i
total_loss += (i * i)
with ti.ad.FwdMode(loss=loss, param=x, seed=[1.0 for _ in range(N)]):
func()
assert (total_loss == test_utils.approx(loss[None]))
sum = 0
for i in range(N):
sum += (i * 2)
assert (loss.dual[None] == test_utils.approx(sum)) |
def isinf(tensor):
if (not isinstance(tensor, torch.Tensor)):
raise ValueError('The argument is not a tensor', str(tensor))
return (tensor.abs() == math.inf) |
class RegressionTask(SingleOutputTask):
__metaclass__ = abc.ABCMeta
def __init__(self, config: configure_finetuning.FinetuningConfig, name, tokenizer, min_value, max_value):
super(RegressionTask, self).__init__(config, name, tokenizer)
self._tokenizer = tokenizer
self._min_value = min_value
self._max_value = max_value
def _get_dummy_label(self):
return 0.0
def get_feature_specs(self):
feature_specs = [feature_spec.FeatureSpec((self.name + '_eid'), []), feature_spec.FeatureSpec((self.name + '_targets'), [], is_int_feature=False)]
return feature_specs
def _add_features(self, features, example, log):
label = float(example.label)
assert (self._min_value <= label <= self._max_value)
label = ((label - self._min_value) / self._max_value)
if log:
utils.log(' label: {:}'.format(label))
features[(example.task_name + '_targets')] = label
def get_prediction_module(self, bert_model, features, is_training, percent_done):
reprs = bert_model.get_pooled_output()
if is_training:
reprs = tf.nn.dropout(reprs, keep_prob=0.9)
predictions = tf.layers.dense(reprs, 1)
predictions = tf.squeeze(predictions, (- 1))
targets = features[(self.name + '_targets')]
losses = tf.square((predictions - targets))
outputs = dict(loss=losses, predictions=predictions, targets=features[(self.name + '_targets')], eid=features[(self.name + '_eid')])
return (losses, outputs)
def get_scorer(self):
return classification_metrics.RegressionScorer() |
def get_plot_config(args):
assert (args.log in ['all', 'tb', 'wandb'])
return ((args.log in ['all', 'tb']), (args.log in ['all', 'wandb'])) |
class TruncationOpManagerInference():
def __load_quantizer__(self, qtype, qparams):
qtype_name = qtype.rstrip('')
quant_params = (qparams[qtype_name] if (qtype_name in qparams) else {})
quantizer = qtypes.__dict__[(qtype_name + '_quantizer')](qtype, quant_params)
return (quantizer, quant_params)
def __fill_quantizers__(self, qtype, qparams, arch=None, qweight='int8'):
(classifier_quantizer, _) = self.__load_quantizer__('int8', qparams)
classifier_quantizer.clipping = 'no'
classifier_quantizer.kld = False
classifier_quantizer.pcq_w = False
classifier_quantizer.pcq_a = False
classifier_quantizer.sm = StatisticManager
classifier_quantizer.stats_kind = 'max'
classifier_quantizer.measure_entropy = False
self.quantizers['activation_classifier'] = classifier_quantizer
if (qweight == 'f32'):
weights_quantizer = DummyQuantizer()
else:
(weights_quantizer, _) = self.__load_quantizer__(qweight, qparams)
weights_quantizer.pcq_a = False
weights_quantizer.clipping = 'no'
weights_quantizer.kld = False
weights_quantizer.bit_alloc = False
weights_quantizer.stats_kind = 'max'
self.quantizers['weight'] = weights_quantizer
(weights_quantizer, _) = self.__load_quantizer__('int8', qparams)
weights_quantizer.pcq_a = False
weights_quantizer.clipping = 'no'
weights_quantizer.kld = False
weights_quantizer.bit_alloc = False
weights_quantizer.stats_kind = 'max'
weights_quantizer.measure_entropy = False
self.quantizers['weight_classifier'] = weights_quantizer
(bias_quantizer, _) = self.__load_quantizer__('int8', qparams)
bias_quantizer.pcq_w = False
bias_quantizer.pcq_a = False
bias_quantizer.clipping = 'no'
bias_quantizer.kld = False
bias_quantizer.bit_alloc = False
self.quantizers['bias'] = DummyQuantizer()
(quantizer_ignored, _) = self.__load_quantizer__('int8', qparams)
quantizer_ignored.pcq_w = False
quantizer_ignored.pcq_a = False
quantizer_ignored.sm = StatisticManager
quantizer_ignored.clipping = 'no'
quantizer_ignored.kld = False
self.quantizers['ignored'] = quantizer_ignored
(activation_quantizer, _) = self.__load_quantizer__(qtype, qparams)
activation_quantizer.force_positive = self.fused_relu
activation_quantizer.pcq_w = False
self.quantizers['activation'] = activation_quantizer
(activation_linear_quantizer, _) = self.__load_quantizer__(qtype, qparams)
activation_linear_quantizer.force_positive = self.fused_relu
activation_linear_quantizer.pcq_w = False
activation_linear_quantizer.pcq_a = False
activation_linear_quantizer.sm = StatisticManager
self.quantizers['activation_linear'] = activation_linear_quantizer
(pooling_quantizer, _) = self.__load_quantizer__('int8', qparams)
pooling_quantizer.pcq_w = False
pooling_quantizer.pcq_a = False
pooling_quantizer.sm = StatisticManager
pooling_quantizer.clipping = 'no'
pooling_quantizer.kld = False
pooling_quantizer.bit_alloc = False
pooling_quantizer.measure_entropy = False
self.quantizers['activation_pooling'] = pooling_quantizer
def __init__(self, args, qparams):
self.verbose = False
self.activation_quantizer = None
self.origin_linear = nn.Linear
self.origin_conv2d = nn.Conv2d
self.origin_batch_norm = nn.BatchNorm2d
self.orig_maxpool = nn.MaxPool2d
self.orig_avgpool = nn.AvgPool2d
self.orig_relu = nn.ReLU
self.ignore_ids = []
self.rho_act = (qparams['qmanager']['rho_act'] if ('qmanager' in qparams) else None)
self.rho_weight = (qparams['qmanager']['rho_weight'] if ('qmanager' in qparams) else None)
self.fp32_clip = ((self.rho_act is not None) or (self.rho_weight is not None))
self.fused_relu = ((args.arch is not None) and ((args.arch == 'alexnet') or (args.arch == 'vgg16') or (args.arch == 'vgg16_bn') or (args.arch == 'inception_v3') or ('squeezenet' in args.arch)))
if (args.qtype is not None):
self.quantizers = {}
self.quantize = True
if ('bfloat' in args.qtype):
(self.quantizer_default, _) = self.__load_quantizer__(args.qtype, qparams)
self.linear_layer_quantizer = DummyQuantizer()
else:
self.__fill_quantizers__(args.qtype, qparams, args.arch, args.qweight)
(self.quantizer_default, _) = self.__load_quantizer__('int8', qparams)
self.activations_clipper = StatisticalClipper(self.rho_act)
self.weights_clipper = RatioClipper(self.rho_weight)
def __exit__(self, *args):
pass
def get_quantizer(self, tag, tensor=None):
if (tag in self.quantizers):
return self.quantizers[tag]
else:
return self.quantizer_default
def set_8bit_list(self, ignore_list):
self.ignore_ids = ignore_list
def enable(self):
nn.Linear = LinearWithId
nn.Conv2d = Conv2dWithId
nn.BatchNorm2d = BatchNorm2dWithId
nn.MaxPool2d = MaxPool2dWithId
nn.AvgPool2d = AvgPool2dWithId
nn.ReLU = ReLUWithId
def disable(self):
nn.Linear = self.origin_linear
nn.Conv2d = self.origin_conv2d
nn.BatchNorm2d = self.origin_batch_norm
nn.MaxPool2d = self.orig_maxpool
nn.AvgPool2d = self.orig_avgpool
nn.ReLU = self.orig_relu
def quantize_matmul(self):
def quantized_matmul(tensor1, tensor2):
tensor1_ = attacher.pytorch_attach(tensor1, self.activation_quantizer, None)
tensor2_ = attacher.pytorch_attach(tensor2, self.activation_quantizer, None)
res = self.origin_matmul(tensor1_, tensor2_)
return attacher.pytorch_attach(res, self.activation_quantizer, None)
torch.Tensor.matmul = quantized_matmul
def quantize_tensor(self, tensor, fprop=True, bprop=True):
fprop = (self.activation_quantizer if fprop else None)
return attacher.pytorch_attach(tensor, fprop, None)
def quantize_instant(self, tensor, id, tag='', stat_id=None, half_range=False, override_att=None, verbose=False):
ignore_cond = False
if (stat_id is not None):
ignore_cond = np.array([(l == stat_id) for l in self.ignore_ids]).any()
qtag = ('ignored' if ignore_cond else tag)
q = self.get_quantizer(qtag)
q.half_range = half_range
if verbose:
print('Quantize {0:21} | Id - {1:18} | {2:} | {3:}'.format(tag, str(stat_id), str(q), str(tensor.device)))
return q(tensor, id, tag, stat_id, override_att) |
('warnings.warn')
(sdv, 'iter_entry_points')
def test__find_addons_missing_object(entry_points_mock, warning_mock, mock_sdv):
bad_entry_point = Mock()
bad_entry_point.name = 'sdv.submodule:missing_object.new_method'
entry_points_mock.return_value = [bad_entry_point]
msg = "Failed to set 'sdv.submodule:missing_object.new_method': missing_object."
del mock_sdv.submodule.missing_object
_find_addons()
entry_points_mock.assert_called_once_with(group='sdv_modules')
warning_mock.assert_called_once_with(msg) |
class classifier(nn.Module):
def __init__(self, feadim, classnum):
super(classifier, self).__init__()
self.fc1 = nn.Linear(feadim, (feadim // 2))
self.fc2 = nn.Linear((feadim // 2), (feadim // 4))
self.fc3 = nn.Linear((feadim // 4), classnum)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.5)
self.avgpool = nn.AdaptiveAvgPool1d(1)
def forward(self, x):
x = x.permute([0, 2, 1])
x = self.avgpool(x).squeeze((- 1))
x = self.fc1(x)
x = self.dropout(self.relu(x))
x = self.fc2(x)
x = self.dropout(self.relu(x))
out = self.fc3(x)
return out |
class MPolynomialIdeal_singular_base_repr():
_field
def syzygy_module(self):
from sage.libs.singular.function_factory import ff
syz = ff.syz
from sage.matrix.constructor import matrix
S = syz(self)
return matrix(self.ring(), S)
_gb_standard_options
def _groebner_basis_libsingular(self, algorithm='groebner', *args, **kwds):
from sage.libs.singular.function import singular_function
from sage.libs.singular.function_factory import ff
from sage.libs.singular.option import opt
from sage.rings.polynomial.multi_polynomial_ideal_libsingular import slimgb_libsingular, std_libsingular
groebner = ff.groebner
if (get_verbose() >= 2):
opt['prot'] = True
for (name, value) in kwds.items():
if (value is not None):
opt[name] = value
if (algorithm == 'std'):
S = std_libsingular(self)
elif (algorithm == 'slimgb'):
S = slimgb_libsingular(self)
elif (algorithm == 'groebner'):
S = groebner(self)
else:
try:
fnc = singular_function(algorithm)
S = fnc(self)
except NameError:
raise NameError(("Algorithm '%s' unknown" % algorithm))
return S
_gb_standard_options
def _groebner_cover(self):
from sage.rings.fraction_field import FractionField_generic
from sage.rings.polynomial.multi_polynomial_ring_base import is_MPolynomialRing
from sage.rings.polynomial.polynomial_ring import is_PolynomialRing
F = self.base_ring()
if ((not isinstance(F, FractionField_generic)) or ((not is_MPolynomialRing(F.ring())) and (not is_PolynomialRing(F.ring())))):
raise TypeError('the base ring must be a field with parameters')
from sage.arith.functions import lcm
from sage.libs.singular.function import lib, singular_function
lib('grobcov.lib')
grobcov = singular_function('grobcov')
polynomials = []
for f in self.gens():
polynomials.append((f * lcm([c.denominator() for c in f.coefficients()])))
return grobcov(self.ring().ideal(polynomials)) |
def get_bench_net_lstm(input_var, mask_var, inp_dim, rnn_size, classes):
l_in = lasagne.layers.InputLayer(shape=(None, None, inp_dim), input_var=input_var)
l_mask = lasagne.layers.InputLayer(shape=(None, None), input_var=mask_var)
(batch_size, seq_len, _) = input_var.shape
h1f = lasagne.layers.LSTMLayer(l_in, num_units=rnn_size, mask_input=l_mask, hid_init=lasagne.init.GlorotUniform())
h1b = lasagne.layers.LSTMLayer(l_in, num_units=rnn_size, mask_input=l_mask, hid_init=lasagne.init.GlorotUniform(), backwards=True)
h1 = lasagne.layers.ConcatLayer([h1f, h1b], axis=2)
h2f = lasagne.layers.LSTMLayer(h1, num_units=rnn_size, mask_input=l_mask, hid_init=lasagne.init.GlorotUniform())
h2b = lasagne.layers.LSTMLayer(h1, num_units=rnn_size, mask_input=l_mask, hid_init=lasagne.init.GlorotUniform(), backwards=True)
h2 = lasagne.layers.ConcatLayer([h2f, h2b], axis=2)
h3f = lasagne.layers.LSTMLayer(h2, num_units=rnn_size, mask_input=l_mask, hid_init=lasagne.init.GlorotUniform())
h3b = lasagne.layers.LSTMLayer(h2, num_units=rnn_size, mask_input=l_mask, hid_init=lasagne.init.GlorotUniform(), backwards=True)
h3 = lasagne.layers.ConcatLayer([h3f, h3b], axis=2)
h4f = lasagne.layers.LSTMLayer(h3, num_units=rnn_size, mask_input=l_mask, hid_init=lasagne.init.GlorotUniform())
h4b = lasagne.layers.LSTMLayer(h3, num_units=rnn_size, mask_input=l_mask, hid_init=lasagne.init.GlorotUniform(), backwards=True)
h4 = lasagne.layers.ConcatLayer([h4f, h4b], axis=2)
h5 = non_flattening_dense(h4, batch_size=batch_size, seq_len=seq_len, num_units=classes, nonlinearity=lasagne.nonlinearities.linear)
h6 = lasagne.layers.DimshuffleLayer(h5, (1, 0, 2))
return h6 |
def getRoot():
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.parse('/home/user/test.xml', parser)
return tree.getroot() |
def test_index_no_files():
with tempfile.TemporaryDirectory() as tmpdir:
empty_dataset = []
source = SingleShardDocumentSource(empty_dataset)
cache = TokenizedDocumentCache.build_or_load(f'{tmpdir}/cache', source, tokenizer, flatten_docs=True, enforce_eos=False, override_resources={'num_cpus': 1})
for chunk in cache:
pytest.fail('Should not have any chunks') |
class EdgeConnect():
def __init__(self, config):
self.config = config
if (config.MODEL == 1):
model_name = 'edge'
elif (config.MODEL == 2):
model_name = 'inpaint'
elif (config.MODEL == 3):
model_name = 'edge_inpaint'
elif (config.MODEL == 4):
model_name = 'joint'
self.debug = False
self.model_name = model_name
self.edge_model = EdgeModel(config).to(config.DEVICE)
self.inpaint_model = InpaintingModel(config).to(config.DEVICE)
self.test_dataset = Dataset(config, config.TEST_FLIST, config.TEST_EDGE_FLIST, augment=False, training=False)
self.samples_path = os.path.join(config.PATH, 'samples')
self.results_path = os.path.join(config.PATH, 'results')
if (config.RESULTS is not None):
self.results_path = os.path.join(config.RESULTS)
if ((config.DEBUG is not None) and (config.DEBUG != 0)):
self.debug = True
self.log_file = os.path.join(config.PATH, (('log_' + model_name) + '.dat'))
def load(self):
if (self.config.MODEL == 1):
self.edge_model.load()
elif (self.config.MODEL == 2):
self.inpaint_model.load()
else:
self.edge_model.load()
self.inpaint_model.load()
def save(self):
if (self.config.MODEL == 1):
self.edge_model.save()
elif ((self.config.MODEL == 2) or (self.config.MODEL == 3)):
self.inpaint_model.save()
else:
self.edge_model.save()
self.inpaint_model.save()
def test(self):
self.edge_model.eval()
self.inpaint_model.eval()
model = self.config.MODEL
create_dir(self.results_path)
test_loader = DataLoader(dataset=self.test_dataset, batch_size=1)
index = 0
for items in test_loader:
name = self.test_dataset.load_name(index)
(images, images_gray, edges, masks) = self.cuda(*items)
index += 1
if (model == 1):
outputs = self.edge_model(images_gray, edges, masks)
outputs_merged = ((outputs * masks) + (edges * (1 - masks)))
elif (model == 2):
outputs = self.inpaint_model(images, edges, masks)
outputs_merged = ((outputs * masks) + (images * (1 - masks)))
else:
edges = self.edge_model(images_gray, edges, masks).detach()
outputs = self.inpaint_model(images, edges, masks)
outputs_merged = ((outputs * masks) + (images * (1 - masks)))
output = self.postprocess(outputs_merged)[0]
path = os.path.join(self.results_path, name)
print(index, name)
imsave(output, path)
if self.debug:
edges = self.postprocess((1 - edges))[0]
masked = self.postprocess(((images * (1 - masks)) + masks))[0]
(fname, fext) = name.split('.')
imsave(edges, os.path.join(self.results_path, ((fname + '_edge.') + fext)))
imsave(masked, os.path.join(self.results_path, ((fname + '_masked.') + fext)))
print('\nEnd test....')
return output
def log(self, logs):
with open(self.log_file, 'a') as f:
f.write(('%s\n' % ' '.join([str(item[1]) for item in logs])))
def cuda(self, *args):
return (item.to(self.config.DEVICE) for item in args)
def postprocess(self, img):
img = (img * 255.0)
img = img.permute(0, 2, 3, 1)
return img.int() |
def safety_exit(world, margin, state, flat, control):
if np.any(np.isinf(control['cmd_motor_speeds'])):
return ExitStatus.INF_VALUE
if np.any(np.isnan(control['cmd_motor_speeds'])):
return ExitStatus.NAN_VALUE
if np.any((np.abs(state['v']) > 100)):
return ExitStatus.OVER_SPEED
if np.any((np.abs(state['w']) > 100)):
return ExitStatus.OVER_SPIN
if np.any((np.abs((state['x'] - flat['x'])) > 20)):
return ExitStatus.FLY_AWAY
if (len(world.world.get('blocks', [])) > 0):
collision_pts = world.path_collisions(state['x'], margin)
no_collision = (collision_pts.size == 0)
if (not no_collision):
return ExitStatus.COLLISION
return None |
def run():
global pool
pool1 = JobPool(2)
pool2 = JobPool()
if (pool1 != pool2):
raise Exception("hmmm, I thought JobPool is 'Singleton'")
try:
JobPool(4)
except Exception as e:
print(('As expected, making a new JobPool with a different cpu count failed: %s' % e))
pool = JobPool()
jobs = []
for j in range(1, 20):
job = TestJob(str(j))
jobs.append(job)
pool.enqueue_job(job)
sample_job = pool.get_asynch_result_object(jobs[3])
pool.wait_for_all_jobs(ignore_error=True)
if (sample_job.ready() and sample_job.successful()):
assert (jobs[3].result_set is True)
else:
assert (jobs[3].result_set is False)
errors = pool.get_all_job_errors()
try:
pool.wait_for_all_jobs(ignore_error=False)
except Exception as e:
print('Seems we have some jobs that failed (expected): ', e)
errs = [pool.get_job_error(job) for job in pool.get_failed_jobs()]
assert (len(errs) == len(errors)), ('Number of errors from failed jobs: %d. Number of errors: %d' % (len(errs), len(errors)))
assert (False not in [(x in errors) for x in errs])
assert (s == len(errors)), 'Parallelization Error, what happened to the rest?' |
_duration
def slide_out(clip, duration, side):
(w, h) = clip.size
ts = (clip.duration - duration)
pos_dict = {'left': (lambda t: (min(0, (w * ((- (t - ts)) / duration))), 'center')), 'right': (lambda t: (max(0, (w * ((t - ts) / duration))), 'center')), 'top': (lambda t: ('center', min(0, (h * ((- (t - ts)) / duration))))), 'bottom': (lambda t: ('center', max(0, (h * ((t - ts) / duration)))))}
return clip.set_position(pos_dict[side]) |
def dump_model(operation='create', redo=False):
create_graph()
sess = tf.InteractiveSession()
deploy_net_file = 'models/inception_v3/inception_v3_deploy.prototxt'
model_file = 'models/inception_v3/inception_v3.caffemodel'
net = []
if ((operation == 'create') and ((not os.path.exists(deploy_net_file)) or redo)):
net = caffe.NetSpec()
elif ((operation == 'save') and ((not os.path.exists(model_file)) or redo)):
caffe.set_device(1)
caffe.set_mode_gpu()
net = caffe.Net(deploy_net_file, caffe.TEST)
else:
return
dump_inputlayer(sess, net, operation)
dump_convbn(sess, net, 'data', 'conv', operation)
dump_convbn(sess, net, 'conv', 'conv_1', operation)
dump_convbn(sess, net, 'conv_1', 'conv_2', operation)
dump_pool(sess, net, 'conv_2', 'pool', operation)
dump_convbn(sess, net, 'pool', 'conv_3', operation)
dump_convbn(sess, net, 'conv_3', 'conv_4', operation)
dump_pool(sess, net, 'conv_4', 'pool_1', operation)
from_layer = 'pool_1'
for inception_id in xrange(0, 3):
if (inception_id == 0):
out_layer = 'mixed'
else:
out_layer = 'mixed_{}'.format(inception_id)
dump_tower(sess, net, from_layer, out_layer, ['conv'], operation)
dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer), ['conv', 'conv_1'], operation)
dump_tower(sess, net, from_layer, '{}/tower_1'.format(out_layer), ['conv', 'conv_1', 'conv_2'], operation)
dump_tower(sess, net, from_layer, '{}/tower_2'.format(out_layer), ['pool', 'conv'], operation)
dump_inception(sess, net, out_layer, ['conv', 'tower/conv_1', 'tower_1/conv_2', 'tower_2/conv'], operation)
from_layer = '{}/join'.format(out_layer)
out_layer = 'mixed_3'
dump_tower(sess, net, from_layer, out_layer, ['conv'], operation)
dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer), ['conv', 'conv_1', 'conv_2'], operation)
dump_tower(sess, net, from_layer, out_layer, ['pool'], operation)
dump_inception(sess, net, out_layer, ['conv', 'tower/conv_2', 'pool'], operation)
from_layer = '{}/join'.format(out_layer)
for inception_id in xrange(4, 8):
out_layer = 'mixed_{}'.format(inception_id)
dump_tower(sess, net, from_layer, out_layer, ['conv'], operation)
dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer), ['conv', 'conv_1', 'conv_2'], operation)
dump_tower(sess, net, from_layer, '{}/tower_1'.format(out_layer), ['conv', 'conv_1', 'conv_2', 'conv_3', 'conv_4'], operation)
dump_tower(sess, net, from_layer, '{}/tower_2'.format(out_layer), ['pool', 'conv'], operation)
dump_inception(sess, net, out_layer, ['conv', 'tower/conv_2', 'tower_1/conv_4', 'tower_2/conv'], operation)
from_layer = '{}/join'.format(out_layer)
out_layer = 'mixed_8'
dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer), ['conv', 'conv_1'], operation)
dump_tower(sess, net, from_layer, '{}/tower_1'.format(out_layer), ['conv', 'conv_1', 'conv_2', 'conv_3'], operation)
dump_tower(sess, net, from_layer, out_layer, ['pool'], operation)
dump_inception(sess, net, out_layer, ['tower/conv_1', 'tower_1/conv_3', 'pool'], operation)
from_layer = '{}/join'.format(out_layer)
for inception_id in xrange(9, 11):
out_layer = 'mixed_{}'.format(inception_id)
dump_tower(sess, net, from_layer, out_layer, ['conv'], operation)
dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer), ['conv'], operation)
dump_tower(sess, net, '{}/tower/conv'.format(out_layer), '{}/tower/mixed'.format(out_layer), ['conv'], operation)
dump_tower(sess, net, '{}/tower/conv'.format(out_layer), '{}/tower/mixed'.format(out_layer), ['conv_1'], operation)
dump_inception(sess, net, '{}/tower/mixed'.format(out_layer), ['conv', 'conv_1'], operation, False)
dump_tower(sess, net, from_layer, '{}/tower_1'.format(out_layer), ['conv', 'conv_1'], operation)
dump_tower(sess, net, '{}/tower_1/conv_1'.format(out_layer), '{}/tower_1/mixed'.format(out_layer), ['conv'], operation)
dump_tower(sess, net, '{}/tower_1/conv_1'.format(out_layer), '{}/tower_1/mixed'.format(out_layer), ['conv_1'], operation)
dump_inception(sess, net, '{}/tower_1/mixed'.format(out_layer), ['conv', 'conv_1'], operation, False)
dump_tower(sess, net, from_layer, '{}/tower_2'.format(out_layer), ['pool', 'conv'], operation)
dump_inception(sess, net, out_layer, ['conv', 'tower/mixed', 'tower_1/mixed', 'tower_2/conv'], operation)
from_layer = '{}/join'.format(out_layer)
dump_pool(sess, net, from_layer, 'pool_3', operation)
dump_softmax(sess, net, 'pool_3', 'softmax', operation)
if ((operation == 'create') and ((not os.path.exists(deploy_net_file)) or redo)):
model_dir = os.path.dirname(deploy_net_file)
if (not os.path.exists(model_dir)):
os.makedirs(model_dir)
with open(deploy_net_file, 'w') as f:
print('name: "inception_v3_deploy"', file=f)
print(net.to_proto(), file=f)
elif ((operation == 'save') and ((not os.path.exists(model_file)) or redo)):
net.save(model_file)
sess.close() |
class Parent(StackProtocol):
def __init__(self, own: 'Node', keysize: int, keynum: int):
super().__init__(own, '')
self.upper_protocols = []
self.lower_protocols = []
self.keysize = keysize
self.keynum = keynum
self.keys = []
self.counter = 0
def init(self):
pass
def pop(self, key):
self.keys.append(key)
self.counter += 1
def push(self):
self.lower_protocols[0].push(self.keysize, self.keynum)
def received_message(self):
pass |
def model_to_graph_def(model, **kwargs):
nets = [model.param_init_net, model.net]
return nets_to_graph_def(nets, **kwargs) |
def matting_inference(model, img, trimap):
cfg = model.cfg
device = next(model.parameters()).device
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if (('key' in pipeline) and (key == pipeline['key'])):
cfg.test_pipeline.remove(pipeline)
if (('keys' in pipeline) and (key in pipeline['keys'])):
pipeline['keys'].remove(key)
if (len(pipeline['keys']) == 0):
cfg.test_pipeline.remove(pipeline)
if (('meta_keys' in pipeline) and (key in pipeline['meta_keys'])):
pipeline['meta_keys'].remove(key)
test_pipeline = Compose(cfg.test_pipeline)
data = dict(merged_path=img, trimap_path=trimap)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
with torch.no_grad():
result = model(test_mode=True, **data)
return result['pred_alpha'] |
class WordPaths_square_grid(WordPaths_all):
def __init__(self, alphabet):
d = [(1, 0), (0, 1), ((- 1), 0), (0, (- 1))]
super().__init__(alphabet, steps=d)
_attribute
def _element_classes(self):
return {'list': FiniteWordPath_square_grid_list, 'str': FiniteWordPath_square_grid_str, 'tuple': FiniteWordPath_square_grid_tuple, 'callable_with_caching': FiniteWordPath_square_grid_callable_with_caching, 'callable': FiniteWordPath_square_grid_callable, 'iter_with_caching': FiniteWordPath_square_grid_iter_with_caching, 'iter': FiniteWordPath_square_grid_iter}
def __repr__(self):
return 'Word Paths on the square grid' |
def add_stderr_logger(level=logging.DEBUG):
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s', __name__)
return handler |
class DualObjectsCategory(CovariantConstructionCategory):
_functor_category = 'DualObjects'
def _repr_object_names(self):
return ('duals of %s' % self.base_category()._repr_object_names()) |
def all_ids(scene_class):
with open(osp.join('./datasets/{}'.format(scene_class), 'id_train.txt'), 'r') as fp:
ids_train = [s.strip() for s in fp.readlines() if s]
rs.shuffle(ids_train)
with open(osp.join('./datasets/{}'.format(scene_class), 'id_test.txt'), 'r') as fp:
ids_test = [s.strip() for s in fp.readlines() if s]
rs.shuffle(ids_test)
return (ids_train, ids_test) |
class PytorchRandomCropFlipImagePipeline(BaseImagePipeline):
def __init__(self, output_image_size: int, extra_pixels: int=0):
super(PytorchRandomCropFlipImagePipeline, self).__init__(output_image_size)
self.extra_pixels = extra_pixels
self.random_crop = RandomCrop(self.output_image_size)
self.random_flip = RandomHorizontalFlip(0.5)
self.center_crop = CenterCrop(self.output_image_size)
def get_image_input_size(self) -> int:
return (self.output_image_size + self.extra_pixels)
def image_input_manipulation(self, images: Tensor) -> Tensor:
random_flipped_data = self.random_flip(images)
return self.random_crop(random_flipped_data)
def image_output_finalize(self, images: Tensor) -> Tensor:
return self.center_crop(images) |
class Softmax(BaseActivation):
def __init__(self):
super(Softmax, self).__init__('Softmax')
def output(signal: np.ndarray) -> np.ndarray:
return special.softmax(signal, axis=1)
def gradient(signal: np.ndarray, direction: np.ndarray) -> np.ndarray:
output = Softmax.output(signal)
return (output * (direction.T - (output * direction).sum(axis=1)).T) |
def bn_self_folding_resblock(x, i, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1), channel_last=False, name='convblock'):
h = x
with nn.parameter_scope(name):
h = PF.convolution(h, maps, kernel=kernel, pad=pad, stride=stride, channel_last=channel_last, with_bias=False)
axes = get_channel_axes(h, channel_last)
(a, b) = create_scale_bias(1, h.shape, axes=axes)
h = ((a * h) + b)
return F.relu((h + x)) |
class LFW(FaceDataset):
def __init__(self, root: str, mode: str='train', transform=None) -> None:
super().__init__(root, mode, transform)
identities = self.read_split_file(root, mode)
self.reduce_to_sample_identities(identities)
self.num_classes = len(np.unique(self.ids))
print(f' [{mode.capitalize()}] Number of Identities: {self.num_classes} Number of Images: {len(self.ids)}')
def read_split_file(self, root: str, mode: str='train'):
root = Path(root)
text_file = ('peopleDevTrain.txt' if (mode == 'train') else 'peopleDevTest.txt')
split_file = (root / text_file)
assert split_file.exists()
identities = []
with open(split_file) as f:
lines = f.read().splitlines()[1:]
for line in lines:
identity = line.split()[0]
identities.append(identity)
return identities |
_module()
class VQAv2Dataset(MInstrDataset):
def __init__(self, *args, has_annotation=True, **kwargs):
super().__init__(*args, **kwargs, placeholders=(IMAGE_PLACEHOLDER, QUESTION_PLACEHOLDER))
self.has_annotation = has_annotation
def __getitem__(self, index):
item = self.get_raw_item(index)
image = self.get_image(image_path=item['image_path'])
question = item['question']
final_question = self.get_template().replace(QUESTION_PLACEHOLDER, question)
if self.has_annotation:
final_answer = item['annotation']['multiple_choice_answer']
else:
final_answer = 'UNKNOWN'
ret = {'image': image, 'conversations': [{'from': 'human', 'value': final_question}, {'from': 'gpt', 'value': f'The answer is {final_answer}.'}]}
return ret |
class Partition12(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerFF[2]/Dropout[dropout]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:12'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.15.0.layer_norm', 'l_1': 'decoder.15.0.SelfAttention.q', 'l_2': 'decoder.15.0.SelfAttention.k', 'l_3': 'decoder.15.0.SelfAttention.v', 'l_4': 'decoder.15.0.SelfAttention.dropout', 'l_5': 'decoder.15.0.SelfAttention.o', 'l_6': 'decoder.15.0.dropout', 'l_7': 'decoder.15.1.layer_norm', 'l_8': 'decoder.15.1.EncDecAttention.q', 'l_9': 'decoder.15.1.EncDecAttention.k', 'l_10': 'decoder.15.1.EncDecAttention.v', 'l_11': 'decoder.15.1.EncDecAttention.dropout', 'l_12': 'decoder.15.1.EncDecAttention.o', 'l_13': 'decoder.15.1.dropout', 'l_14': 'decoder.15.2.layer_norm', 'l_15': 'decoder.15.2.DenseReluDense.wi', 'l_16': 'decoder.15.2.DenseReluDense.dropout', 'l_17': 'decoder.15.2.DenseReluDense.wo', 'l_18': 'decoder.15.2.dropout', 'l_19': 'decoder.16.0.layer_norm', 'l_20': 'decoder.16.0.SelfAttention.q', 'l_21': 'decoder.16.0.SelfAttention.k', 'l_22': 'decoder.16.0.SelfAttention.v', 'l_23': 'decoder.16.0.SelfAttention.dropout', 'l_24': 'decoder.16.0.SelfAttention.o', 'l_25': 'decoder.16.0.dropout', 'l_26': 'decoder.16.1.layer_norm', 'l_27': 'decoder.16.1.EncDecAttention.q', 'l_28': 'decoder.16.1.EncDecAttention.k', 'l_29': 'decoder.16.1.EncDecAttention.v', 'l_30': 'decoder.16.1.EncDecAttention.dropout', 'l_31': 'decoder.16.1.EncDecAttention.o', 'l_32': 'decoder.16.1.dropout', 'l_33': 'decoder.16.2.layer_norm', 'l_34': 'decoder.16.2.DenseReluDense.wi', 'l_35': 'decoder.16.2.DenseReluDense.dropout', 'l_36': 'decoder.16.2.DenseReluDense.wo', 'l_37': 'decoder.16.2.dropout', 'l_38': 'decoder.17.0.layer_norm', 'l_39': 'decoder.17.0.SelfAttention.q', 'l_40': 'decoder.17.0.SelfAttention.k', 'l_41': 'decoder.17.0.SelfAttention.v', 'l_42': 'decoder.17.0.SelfAttention.dropout', 'l_43': 'decoder.17.0.SelfAttention.o', 'l_44': 'decoder.17.0.dropout', 'l_45': 'decoder.17.1.layer_norm', 'l_46': 'decoder.17.1.EncDecAttention.q', 'l_47': 'decoder.17.1.EncDecAttention.k', 'l_48': 'decoder.17.1.EncDecAttention.v', 'l_49': 'decoder.17.1.EncDecAttention.dropout', 'l_50': 'decoder.17.1.EncDecAttention.o', 'l_51': 'decoder.17.1.dropout', 'l_52': 'decoder.17.2.layer_norm', 'l_53': 'decoder.17.2.DenseReluDense.wi', 'l_54': 'decoder.17.2.DenseReluDense.dropout', 'l_55': 'decoder.17.2.DenseReluDense.wo', 'l_56': 'decoder.17.2.dropout'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3) = unflatten(args, self.input_structure)
t_0 = self.l_9(x0)
t_1 = self.l_10(x0)
t_2 = self.l_28(x0)
t_3 = self.l_29(x0)
t_4 = self.l_47(x0)
t_5 = self.l_48(x0)
t_6 = self.l_0(x3)
t_7 = t_6.size()
t_8 = self.l_1(t_6)
t_9 = self.l_2(t_6)
t_6 = self.l_3(t_6)
t_7 = t_7[0]
t_8 = t_8.view(t_7, (- 1), 32, 128)
t_8 = t_8.transpose(1, 2)
t_9 = t_9.view(t_7, (- 1), 32, 128)
t_9 = t_9.transpose(1, 2)
t_6 = t_6.view(t_7, (- 1), 32, 128)
t_6 = t_6.transpose(1, 2)
t_9 = t_9.transpose(3, 2)
t_9 = torch.matmul(t_8, t_9)
t_9 += x1
t_8 = t_9.float()
t_8 = torch.nn.functional.softmax(t_8, dim=(- 1), _stacklevel=3, dtype=None)
t_9 = t_8.type_as(t_9)
t_9 = self.l_4(t_9)
t_6 = torch.matmul(t_9, t_6)
t_6 = t_6.transpose(1, 2)
t_6 = t_6.contiguous()
t_7 = t_6.view(t_7, (- 1), 4096)
t_7 = self.l_5(t_7)
t_7 = self.l_6(t_7)
t_7 = (x3 + t_7)
t_6 = self.l_7(t_7)
t_9 = t_6.size()
t_6 = self.l_8(t_6)
t_9 = t_9[0]
t_6 = t_6.view(t_9, (- 1), 32, 128)
t_6 = t_6.transpose(1, 2)
t_0 = t_0.view(t_9, (- 1), 32, 128)
t_0 = t_0.transpose(1, 2)
t_1 = t_1.view(t_9, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_0 = t_0.transpose(3, 2)
t_0 = torch.matmul(t_6, t_0)
t_0 += x2
t_6 = t_0.float()
t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None)
t_0 = t_6.type_as(t_0)
t_0 = self.l_11(t_0)
t_1 = torch.matmul(t_0, t_1)
t_1 = t_1.transpose(1, 2)
t_1 = t_1.contiguous()
t_9 = t_1.view(t_9, (- 1), 4096)
t_9 = self.l_12(t_9)
t_9 = self.l_13(t_9)
t_9 = (t_7 + t_9)
t_7 = self.l_14(t_9)
t_7 = self.l_15(t_7)
t_7 = torch.nn.functional.relu(t_7, inplace=False)
t_7 = self.l_16(t_7)
t_7 = self.l_17(t_7)
t_7 = self.l_18(t_7)
t_7 = (t_9 + t_7)
t_9 = self.l_19(t_7)
t_1 = t_9.size()
t_0 = self.l_20(t_9)
t_6 = self.l_21(t_9)
t_9 = self.l_22(t_9)
t_1 = t_1[0]
t_0 = t_0.view(t_1, (- 1), 32, 128)
t_0 = t_0.transpose(1, 2)
t_6 = t_6.view(t_1, (- 1), 32, 128)
t_6 = t_6.transpose(1, 2)
t_9 = t_9.view(t_1, (- 1), 32, 128)
t_9 = t_9.transpose(1, 2)
t_6 = t_6.transpose(3, 2)
t_6 = torch.matmul(t_0, t_6)
t_6 += x1
t_0 = t_6.float()
t_0 = torch.nn.functional.softmax(t_0, dim=(- 1), _stacklevel=3, dtype=None)
t_6 = t_0.type_as(t_6)
t_6 = self.l_23(t_6)
t_9 = torch.matmul(t_6, t_9)
t_9 = t_9.transpose(1, 2)
t_9 = t_9.contiguous()
t_1 = t_9.view(t_1, (- 1), 4096)
t_1 = self.l_24(t_1)
t_1 = self.l_25(t_1)
t_1 = (t_7 + t_1)
t_7 = self.l_26(t_1)
t_9 = t_7.size()
t_7 = self.l_27(t_7)
t_9 = t_9[0]
t_7 = t_7.view(t_9, (- 1), 32, 128)
t_7 = t_7.transpose(1, 2)
t_2 = t_2.view(t_9, (- 1), 32, 128)
t_2 = t_2.transpose(1, 2)
t_3 = t_3.view(t_9, (- 1), 32, 128)
t_3 = t_3.transpose(1, 2)
t_2 = t_2.transpose(3, 2)
t_2 = torch.matmul(t_7, t_2)
t_2 += x2
t_7 = t_2.float()
t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None)
t_2 = t_7.type_as(t_2)
t_2 = self.l_30(t_2)
t_3 = torch.matmul(t_2, t_3)
t_3 = t_3.transpose(1, 2)
t_3 = t_3.contiguous()
t_9 = t_3.view(t_9, (- 1), 4096)
t_9 = self.l_31(t_9)
t_9 = self.l_32(t_9)
t_9 = (t_1 + t_9)
t_1 = self.l_33(t_9)
t_1 = self.l_34(t_1)
t_1 = torch.nn.functional.relu(t_1, inplace=False)
t_1 = self.l_35(t_1)
t_1 = self.l_36(t_1)
t_1 = self.l_37(t_1)
t_1 = (t_9 + t_1)
t_9 = self.l_38(t_1)
t_3 = t_9.size()
t_2 = self.l_39(t_9)
t_7 = self.l_40(t_9)
t_9 = self.l_41(t_9)
t_3 = t_3[0]
t_2 = t_2.view(t_3, (- 1), 32, 128)
t_2 = t_2.transpose(1, 2)
t_7 = t_7.view(t_3, (- 1), 32, 128)
t_7 = t_7.transpose(1, 2)
t_9 = t_9.view(t_3, (- 1), 32, 128)
t_9 = t_9.transpose(1, 2)
t_7 = t_7.transpose(3, 2)
t_7 = torch.matmul(t_2, t_7)
t_7 += x1
t_2 = t_7.float()
t_2 = torch.nn.functional.softmax(t_2, dim=(- 1), _stacklevel=3, dtype=None)
t_7 = t_2.type_as(t_7)
t_7 = self.l_42(t_7)
t_9 = torch.matmul(t_7, t_9)
t_9 = t_9.transpose(1, 2)
t_9 = t_9.contiguous()
t_3 = t_9.view(t_3, (- 1), 4096)
t_3 = self.l_43(t_3)
t_3 = self.l_44(t_3)
t_3 = (t_1 + t_3)
t_1 = self.l_45(t_3)
t_9 = t_1.size()
t_1 = self.l_46(t_1)
t_9 = t_9[0]
t_1 = t_1.view(t_9, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_4 = t_4.view(t_9, (- 1), 32, 128)
t_4 = t_4.transpose(1, 2)
t_5 = t_5.view(t_9, (- 1), 32, 128)
t_5 = t_5.transpose(1, 2)
t_4 = t_4.transpose(3, 2)
t_4 = torch.matmul(t_1, t_4)
t_4 += x2
t_1 = t_4.float()
t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None)
t_4 = t_1.type_as(t_4)
t_4 = self.l_49(t_4)
t_5 = torch.matmul(t_4, t_5)
t_5 = t_5.transpose(1, 2)
t_5 = t_5.contiguous()
t_9 = t_5.view(t_9, (- 1), 4096)
t_9 = self.l_50(t_9)
t_9 = self.l_51(t_9)
t_9 = (t_3 + t_9)
t_3 = self.l_52(t_9)
t_3 = self.l_53(t_3)
t_3 = torch.nn.functional.relu(t_3, inplace=False)
t_3 = self.l_54(t_3)
t_3 = self.l_55(t_3)
t_3 = self.l_56(t_3)
t_3 = (t_9 + t_3)
return list(flatten((x0, x1, x2, t_3)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def test_array_constructors():
data = np.arange(1, 7, dtype='int32')
for i in range(8):
np.testing.assert_array_equal(m.test_array_ctors((10 + i)), data.reshape((3, 2)))
np.testing.assert_array_equal(m.test_array_ctors((20 + i)), data.reshape((3, 2)))
for i in range(5):
np.testing.assert_array_equal(m.test_array_ctors((30 + i)), data)
np.testing.assert_array_equal(m.test_array_ctors((40 + i)), data) |
def argsort(items, key=(lambda x: x), reverse=False):
(orig_to_sort, sorted_items) = zip(*sorted(enumerate(items), key=(lambda x: key(x[1])), reverse=reverse))
sort_to_orig = tuple((x[0] for x in sorted(enumerate(orig_to_sort), key=operator.itemgetter(1))))
return (sorted_items, sort_to_orig, orig_to_sort) |
class Vocab():
def __init__(self, data_config, save_dir, data_filenames=None):
self.data_config = data_config
self.save_dir = save_dir
self.joint_label_lookup_maps = {}
self.reverse_maps = {}
self.vocab_maps = {}
self.vocab_lookups = None
self.oovs = {}
self.vocabs_dir = ('%s/assets.extra' % save_dir)
if (not os.path.exists(self.vocabs_dir)):
try:
os.mkdir(self.vocabs_dir)
except OSError as e:
util.fatal_error(('Failed to create vocabs directory: %s; %s' % (self.vocabs_dir, e.strerror)))
else:
tf.logging.log(tf.logging.INFO, ('Successfully created vocabs directory: %s' % self.vocabs_dir))
else:
tf.logging.log(tf.logging.INFO, ('Using vocabs directory: %s' % self.vocabs_dir))
self.vocab_names_sizes = self.make_vocab_files(self.data_config, self.save_dir, data_filenames)
'\n Creates tf.contrib.lookup ops for all the vocabs defined in self.data_config.\n \n Args: \n word_embedding_file: File containing word embedding vocab, with words in the first space-separated column\n \n Returns:\n Map from vocab names to tf.contrib.lookup ops, map from vocab names to vocab sizes\n '
def create_vocab_lookup_ops(self, embedding_files=None):
with tf.device('/cpu:0'):
vocab_lookup_ops = {}
for v in self.vocab_names_sizes.keys():
if (v in self.data_config):
num_oov = (1 if (('oov' in self.data_config[v]) and self.data_config[v]['oov']) else 0)
this_lookup = tf.contrib.lookup.index_table_from_file(('%s/%s.txt' % (self.vocabs_dir, v)), num_oov_buckets=num_oov, key_column_index=0)
vocab_lookup_ops[v] = this_lookup
if embedding_files:
for embedding_file in embedding_files:
embeddings_name = embedding_file
vocab_lookup_ops[embeddings_name] = tf.contrib.lookup.index_table_from_file(embedding_file, num_oov_buckets=1, key_column_index=0, delimiter=' ')
self.vocab_names_sizes[embeddings_name] = vocab_lookup_ops[embeddings_name].size()
tf.logging.log(tf.logging.INFO, ('Created %d vocab lookup ops: %s' % (len(vocab_lookup_ops), str([k for k in vocab_lookup_ops.keys()]))))
return vocab_lookup_ops
'\n Gets the cached vocab ops for the given datafile, creating them if they already exist.\n This is needed in order to avoid re-creating duplicate lookup ops for each dataset input_fn, \n since the lookup ops need to be called lazily from the input_fn in order to end up in the same tf.Graph.\n \n Args:\n word_embedding_file: (Optional) file containing word embedding vocab, with words in the first space-separated column\n \n Returns:\n Map from vocab names to tf.contrib.lookup ops.\n \n '
def get_lookup_ops(self, word_embedding_file=None):
if (self.vocab_lookups is None):
self.vocab_lookups = self.create_vocab_lookup_ops(word_embedding_file)
return self.vocab_lookups
'\n Generates vocab files with counts for all the data with the vocab key\n set to True in data_config. Assumes the input file is in CoNLL format.\n \n Args:\n filename: Name of data file to generate vocab files from\n data_config: Data configuration map\n \n Returns:\n Map from vocab names to their sizes\n '
def create_load_or_update_vocab_files(self, data_config, save_dir, filenames=None, update_only=False):
vocabs = []
vocabs_index = {}
for d in data_config:
updatable = (('updatable' in data_config[d]) and data_config[d]['updatable'])
if (('vocab' in data_config[d]) and (data_config[d]['vocab'] == d) and (updatable or (not update_only))):
this_vocab = {}
if (update_only and updatable and (d in self.vocab_maps)):
this_vocab = self.vocab_maps[d]
vocabs.append(this_vocab)
vocabs_index[d] = len(vocabs_index)
if filenames:
for filename in filenames:
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if line:
split_line = line.split()
for d in vocabs_index.keys():
datum_idx = data_config[d]['conll_idx']
this_vocab_map = vocabs[vocabs_index[d]]
converter_name = (data_config[d]['converter']['name'] if ('converter' in data_config[d]) else 'default_converter')
converter_params = data_converters.get_params(data_config[d], split_line, datum_idx)
this_data = data_converters.dispatch(converter_name)(**converter_params)
for this_datum in this_data:
if (this_datum not in this_vocab_map):
this_vocab_map[this_datum] = 0
this_vocab_map[this_datum] += 1
else:
for d in vocabs_index.keys():
this_vocab_map = vocabs[vocabs_index[d]]
with open(('%s/%s.txt' % (self.vocabs_dir, d)), 'r') as f:
for line in f:
(datum, count) = line.strip().split()
this_vocab_map[datum] = int(count)
for v in vocabs_index.keys():
this_counts_map = vocabs[vocabs_index[v]]
this_map = dict(zip(this_counts_map.keys(), range(len(this_counts_map.keys()))))
reverse_map = dict(zip(range(len(this_counts_map.keys())), this_counts_map.keys()))
self.oovs[v] = False
if (('oov' in self.data_config[v]) and self.data_config[v]['oov']):
self.oovs[v] = True
self.reverse_maps[v] = reverse_map
self.vocab_maps[v] = this_map
if ('label_components' in self.data_config[v]):
joint_vocab_map = vocabs[vocabs_index[v]]
label_components = self.data_config[v]['label_components']
component_keys = [vocabs[vocabs_index[d]].keys() for d in label_components]
component_maps = [dict(zip(comp_keys, range(len(comp_keys)))) for comp_keys in component_keys]
map_names = [('%s_to_%s' % (v, label_comp)) for label_comp in label_components]
joint_to_comp_maps = [np.zeros([len(joint_vocab_map), 1], dtype=np.int32) for _ in label_components]
for (joint_idx, joint_label) in enumerate(joint_vocab_map.keys()):
split_label = joint_label.split(constants.JOINT_LABEL_SEP)
for (label_comp, comp_map, joint_to_comp_map) in zip(split_label, component_maps, joint_to_comp_maps):
comp_idx = comp_map[label_comp]
joint_to_comp_map[joint_idx] = comp_idx
for (map_name, joint_to_comp_map) in zip(map_names, joint_to_comp_maps):
self.joint_label_lookup_maps[map_name] = joint_to_comp_map
for d in vocabs_index.keys():
this_vocab_map = vocabs[vocabs_index[d]]
with open(('%s/%s.txt' % (self.vocabs_dir, d)), 'w') as f:
for (k, v) in this_vocab_map.items():
print(('%s\t%d' % (k, v)), file=f)
return {k: len(vocabs[vocabs_index[k]]) for k in vocabs_index.keys()}
def make_vocab_files(self, data_config, save_dir, filenames=None):
return self.create_load_or_update_vocab_files(data_config, save_dir, filenames, False)
def update(self, filenames):
vocab_names_sizes = self.create_load_or_update_vocab_files(self.data_config, self.save_dir, filenames, True)
for (vocab_name, vocab_size) in vocab_names_sizes.items():
self.vocab_names_sizes[vocab_name] = vocab_size |
def analyze_results(results_dir: str, data_path: str, dormant_unit_threshold: float=0.01):
parameter_dir_path = os.path.join(results_dir, 'model_parameters')
experiment_indices_file_path = os.path.join(results_dir, 'experiment_indices.npy')
class_order_dir_path = os.path.join(results_dir, 'class_order')
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
number_of_epochs = (np.arange(21) * 200)
classes_per_task = 5
last_epoch = 4000
experiment_indices = np.load(experiment_indices_file_path)
net = build_resnet18(num_classes=100, norm_layer=torch.nn.BatchNorm2d)
net.to(device)
(cifar_data, cifar_data_loader) = load_cifar_data(data_path, train=True)
(test_data, test_data_loader) = load_cifar_data(data_path, train=False)
for exp_index in tqdm(experiment_indices):
ordered_classes = load_classes(class_order_dir_path, index=exp_index)
average_weight_magnitude_per_epoch = np.zeros((number_of_epochs.size - 1), dtype=np.float32)
dormant_units_prop_before = np.zeros_like(average_weight_magnitude_per_epoch)
effective_rank_before = np.zeros_like(average_weight_magnitude_per_epoch)
stable_rank_before = np.zeros_like(average_weight_magnitude_per_epoch)
dormant_units_prop_after = np.zeros_like(average_weight_magnitude_per_epoch)
effective_rank_after = np.zeros_like(average_weight_magnitude_per_epoch)
stable_rank_after = np.zeros_like(average_weight_magnitude_per_epoch)
for (i, epoch_number) in enumerate(number_of_epochs[:(- 1)]):
model_parameters = load_model_parameters(parameter_dir_path, index=exp_index, epoch_number=epoch_number)
net.load_state_dict(model_parameters)
average_weight_magnitude_per_epoch[i] = compute_average_weight_magnitude(net)
current_classes = ordered_classes[(i * classes_per_task):((i + 1) * classes_per_task)]
cifar_data.select_new_partition(current_classes)
(prop_dormant, last_layer_features) = compute_dormant_units_proportion(net, cifar_data_loader, dormant_unit_threshold)
dormant_units_prop_after[i] = prop_dormant
singular_values = svd(last_layer_features, compute_uv=False, lapack_driver='gesvd')
effective_rank_after[i] = compute_effective_rank(singular_values)
stable_rank_after[i] = compute_stable_rank(singular_values)
if (i == 0):
continue
current_classes = ordered_classes[:(i * classes_per_task)]
cifar_data.select_new_partition(current_classes)
(prop_dormant, last_layer_features) = compute_dormant_units_proportion(net, cifar_data_loader, dormant_unit_threshold)
dormant_units_prop_before[i] = prop_dormant
singular_values = svd(last_layer_features, compute_uv=False, lapack_driver='gesvd')
effective_rank_before[i] = compute_effective_rank(singular_values)
stable_rank_before[i] = compute_stable_rank(singular_values)
net.load_state_dict(load_model_parameters(parameter_dir_path, exp_index, last_epoch))
accuracy_per_class_in_order = compute_last_task_accuracy_per_class_in_order(net, ordered_classes, test_data_loader, exp_index)
store_analysis_results(weight_magnitude_results=average_weight_magnitude_per_epoch, dormant_units_results=(dormant_units_prop_before, dormant_units_prop_after), effective_rank_results=(effective_rank_before, effective_rank_after), stable_rank_results=(stable_rank_before, stable_rank_after), accuracy_per_class_in_order=accuracy_per_class_in_order, results_dir=results_dir, experiment_index=exp_index) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.