code stringlengths 101 5.91M |
|---|
_function_dispatch(_around_dispatcher)
def round_(a, decimals=0, out=None):
return around(a, decimals=decimals, out=out) |
.parametrize('numeric,subtypes', [(complex, [complex, float, int, bool]), (float, [float, int, bool]), (int, [int, bool]), (bool, [bool])])
def test_numeric_tower(type_system, numeric, subtypes):
assert (type_system.numeric_tower[type_system.convert_type_hint(numeric)] == [type_system.convert_type_hint(typ) for typ in subtypes]) |
class EM1D_FD_Jacobian_Test_CircularLoop(unittest.TestCase):
def setUp(self):
nearthick = np.logspace((- 1), 1, 5)
deepthick = np.logspace(1, 2, 10)
thicknesses = np.r_[(nearthick, deepthick)]
topo = np.r_[(0.0, 0.0, 100.0)]
height = 1e-05
src_location = np.array([0.0, 0.0, (100.0 + height)])
rx_location = np.array([0.0, 0.0, (100.0 + height)])
frequencies = np.logspace(1, 8, 9)
orientations = ['x', 'y', 'z']
components = ['real', 'imag', 'both']
I = 1.0
a = 10.0
source_list = []
for f in frequencies:
receiver_list = []
for rx_orientation in orientations:
for comp in components:
receiver_list.append(fdem.receivers.PointMagneticFieldSecondary(rx_location, orientation=rx_orientation, component=comp))
source_list.append(fdem.sources.CircularLoop(receiver_list, f, src_location, radius=a, current=I))
survey = fdem.Survey(source_list)
self.topo = topo
self.survey = survey
self.showIt = False
self.height = height
self.frequencies = frequencies
self.thicknesses = thicknesses
self.nlayers = (len(thicknesses) + 1)
nP = len(source_list)
wire_map = maps.Wires(('sigma', self.nlayers), ('mu', self.nlayers), ('thicknesses', (self.nlayers - 1)), ('h', 1))
self.sigma_map = (maps.ExpMap(nP=self.nlayers) * wire_map.sigma)
self.mu_map = (maps.ExpMap(nP=self.nlayers) * wire_map.mu)
self.thicknesses_map = (maps.ExpMap(nP=(self.nlayers - 1)) * wire_map.thicknesses)
surject_mesh = TensorMesh([np.ones(nP)])
self.h_map = ((maps.SurjectFull(surject_mesh) * maps.ExpMap(nP=1)) * wire_map.h)
sim = fdem.Simulation1DLayered(survey=self.survey, sigmaMap=self.sigma_map, muMap=self.mu_map, thicknessesMap=self.thicknesses_map, hMap=self.h_map, topo=self.topo)
self.sim = sim
def test_EM1DFDJvec_Layers(self):
sigma_half = 0.01
sigma_blk = 0.1
sig = (np.ones(self.nlayers) * sigma_half)
sig[3] = sigma_blk
mu_half = mu_0
mu_blk = (2 * mu_0)
mu = (np.ones(self.nlayers) * mu_half)
mu[3] = mu_blk
m_1D = np.r_[(np.log(sig), np.log(mu), np.log(self.thicknesses), np.log(self.height))]
def fwdfun(m):
resp = self.sim.dpred(m)
return resp
def jacfun(m, dm):
Jvec = self.sim.Jvec(m, dm)
return Jvec
def derChk(m):
return [fwdfun(m), (lambda mx: jacfun(m, mx))]
dm = (m_1D * 0.5)
passed = tests.check_derivative(derChk, m_1D, num=4, dx=dm, plotIt=False, eps=1e-15)
self.assertTrue(passed)
if passed:
print('EM1DFM Circular Loop Jvec test works')
def test_EM1DFDJtvec_Layers(self):
sigma_half = 0.01
sigma_blk = 0.1
sig = (np.ones(self.nlayers) * sigma_half)
sig[3] = sigma_blk
mu_half = mu_0
mu_blk = (2 * mu_0)
mu = (np.ones(self.nlayers) * mu_half)
mu[3] = mu_blk
m_true = np.r_[(np.log(sig), np.log(mu), np.log(self.thicknesses), np.log(self.height))]
dobs = self.sim.dpred(m_true)
m_ini = np.r_[(np.log((np.ones(self.nlayers) * sigma_half)), np.log(((np.ones(self.nlayers) * 1.5) * mu_half)), (np.log(self.thicknesses) * 0.9), np.log((0.5 * self.height)))]
resp_ini = self.sim.dpred(m_ini)
dr = (resp_ini - dobs)
def misfit(m, dobs):
dpred = self.sim.dpred(m)
misfit = (0.5 * (np.linalg.norm((dpred - dobs)) ** 2))
dmisfit = self.sim.Jtvec(m, dr)
return (misfit, dmisfit)
def derChk(m):
return misfit(m, dobs)
passed = tests.check_derivative(derChk, m_ini, num=4, plotIt=False, eps=1e-27)
self.assertTrue(passed)
if passed:
print('EM1DFM Circular Loop Jtvec test works') |
class PyTorchBenchmark(Benchmark):
args: PyTorchBenchmarkArguments
configs: PretrainedConfig
framework: str = 'PyTorch'
def framework_version(self):
return torch.__version__
def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
_inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
return self._measure_speed(_inference)
def _inference_memory(self, model_name: str, batch_size: int, sequence_length: int) -> [Memory, Optional[MemorySummary]]:
_inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
return self._measure_memory(_inference)
def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
_train = self._prepare_train_func(model_name, batch_size, sequence_length)
return self._measure_speed(_train)
def _train_memory(self, model_name: str, batch_size: int, sequence_length: int) -> [Memory, Optional[MemorySummary]]:
_train = self._prepare_train_func(model_name, batch_size, sequence_length)
return self._measure_memory(_train)
def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[([], None)]:
config = self.config_dict[model_name]
if self.args.torchscript:
config.torchscript = True
has_model_class_in_config = (hasattr(config, 'architectures') and isinstance(config.architectures, list) and (len(config.architectures) > 0))
if ((not self.args.only_pretrain_model) and has_model_class_in_config):
try:
model_class = config.architectures[0]
transformers_module = __import__('transformers', fromlist=[model_class])
model_cls = getattr(transformers_module, model_class)
model = model_cls(config)
except ImportError:
raise ImportError(f'{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`.')
else:
model = MODEL_MAPPING[config.__class__](config)
model.eval()
model.to(self.args.device)
vocab_size = (config.vocab_size if hasattr(config, 'vocab_size') else config.encoder.vocab_size)
input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
if self.args.fp16:
logger.info('Running training in Mixed Precision...')
if (not self.args.is_gpu):
raise ValueError('Mixed precision is possible only for GPU.')
model.half()
if self.args.torchscript:
with torch.no_grad():
inference_model = torch.jit.trace(model, input_ids)
else:
inference_model = model
def encoder_decoder_forward():
with torch.no_grad():
outputs = inference_model(input_ids, decoder_input_ids=input_ids)
return outputs
def encoder_forward():
with torch.no_grad():
outputs = inference_model(input_ids)
return outputs
_forward = (encoder_decoder_forward if config.is_encoder_decoder else encoder_forward)
return _forward
def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[([], None)]:
config = self.config_dict[model_name]
has_model_class_in_config = (hasattr(config, 'architectures') and isinstance(config.architectures, list) and (len(config.architectures) > 0))
if ((not self.args.only_pretrain_model) and has_model_class_in_config):
try:
model_class = config.architectures[0]
transformers_module = __import__('transformers', fromlist=[model_class])
model_cls = getattr(transformers_module, model_class)
model = model_cls(config)
except ImportError:
raise ImportError(f'{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`.')
else:
model = MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config)
if self.args.torchscript:
raise NotImplementedError('Training for torchscript is currently not implemented')
else:
train_model = model
model.train()
model.to(self.args.device)
vocab_size = (config.vocab_size if hasattr(config, 'vocab_size') else config.encoder.vocab_size)
input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
if self.args.fp16:
logger.info('Running training in Mixed Precision...')
if (not self.args.is_gpu):
raise ValueError('Mixed precision is possible only for GPU.')
model.half()
def compute_loss_and_backprob_encoder():
loss = train_model(input_ids, labels=input_ids)[0]
loss.backward()
return loss
def compute_loss_and_backprob_encoder_decoder():
loss = train_model(input_ids, decoder_input_ids=input_ids, labels=input_ids)[0]
loss.backward()
return loss
_train = (compute_loss_and_backprob_encoder_decoder if config.is_encoder_decoder else compute_loss_and_backprob_encoder)
return _train
def _measure_speed(self, func) -> float:
try:
if (self.args.is_tpu or self.args.torchscript):
logger.info('Do inference on TPU or torchscript. Running model 5 times to stabilize compilation')
timeit.repeat(func, repeat=1, number=5)
runtimes = timeit.repeat(func, repeat=self.args.repeat, number=10)
if (self.args.is_tpu and self.args.torch_xla_tpu_print_metrics):
import torch_xla.debug.metrics as met
self.print_fn(met.metrics_report())
return (min(runtimes) / 10.0)
except RuntimeError as e:
self.print_fn(f"Doesn't fit on GPU. {e}")
return 'N/A'
def _measure_memory(self, func: Callable[([], None)]) -> [Memory, MemorySummary]:
try:
if self.args.trace_memory_line_by_line:
trace = start_memory_tracing('transformers')
if self.args.is_tpu:
raise NotImplementedError('Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with `--no-memory` or `args.memory=False`')
elif self.args.is_gpu:
if (not is_py3nvml_available()):
logger.warning("py3nvml not installed, we won't log GPU memory usage. Install py3nvml (pip install py3nvml) to log information about GPU.")
memory = 'N/A'
else:
logger.info('Measuring total GPU usage on GPU device. Make sure to not have additional processes running on the same GPU.')
nvml.nvmlInit()
func()
handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
max_bytes_in_use = meminfo.used
memory = Memory(max_bytes_in_use)
nvml.nvmlShutdown()
else:
memory_bytes = measure_peak_memory_cpu(func)
memory = (Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes)
if self.args.trace_memory_line_by_line:
summary = stop_memory_tracing(trace)
else:
summary = None
return (memory, summary)
except RuntimeError as e:
self.print_fn(f"Doesn't fit on GPU. {e}")
return ('N/A', None) |
class DecisionNet(nn.Module):
def __init__(self, init_weights=True):
super(DecisionNet, self).__init__()
self.layer1 = nn.Sequential(nn.MaxPool2d(2), nn.Conv2d(1025, 8, 5, stride=1, padding=2), nn.BatchNorm2d(8), nn.ReLU(inplace=True), nn.MaxPool2d(2), nn.Conv2d(8, 16, 5, stride=1, padding=2), nn.BatchNorm2d(16), nn.ReLU(inplace=True), nn.Conv2d(16, 32, 5, stride=1, padding=2), nn.BatchNorm2d(32), nn.ReLU(inplace=True))
self.fc = nn.Sequential(nn.Linear(66, 1, bias=False), nn.Sigmoid())
if (init_weights == True):
pass
def forward(self, f, s):
xx = torch.cat((f, s), 1)
x1 = self.layer1(xx)
x2 = x1.view(x1.size(0), x1.size(1), (- 1))
s2 = s.view(s.size(0), s.size(1), (- 1))
(x_max, x_max_idx) = torch.max(x2, dim=2)
x_avg = torch.mean(x2, dim=2)
(s_max, s_max_idx) = torch.max(s2, dim=2)
s_avg = torch.mean(s2, dim=2)
y = torch.cat((x_max, x_avg, s_avg, s_max), 1)
y = y.view(y.size(0), (- 1))
return self.fc(y) |
def monomials(v, n):
if (len(v) != len(n)):
raise ValueError('inputs must be of the same length.')
if (len(v) == 0):
return []
v = Sequence(v)
R = v.universe()
return _monomials(v, R, n, 0) |
def test_full_like_types():
array = ak.highlevel.Array(np.array(['2020-07-27T10:41:11', '2019-01-01', '2020-01-01'], 'datetime64[s]'))
assert (ak.operations.full_like(array, '2020-07-27T10:41:11').to_list() == [datetime.datetime(2020, 7, 27, 10, 41, 11), datetime.datetime(2020, 7, 27, 10, 41, 11), datetime.datetime(2020, 7, 27, 10, 41, 11)])
array = np.array(['2020-07-27T10:41:11', '2019-01-01', '2020-01-01'], 'datetime64[25s]')
assert (ak.operations.full_like(array, '2021-06-03T10:00').to_list() == [datetime.datetime(2021, 6, 3, 10, 0), datetime.datetime(2021, 6, 3, 10, 0), datetime.datetime(2021, 6, 3, 10, 0)])
array = ak.contents.NumpyArray(np.array([0, 2, 2, 3], dtype='i4'))
assert (str(ak.operations.full_like(array, 11, dtype='i8').type) == '4 * int64')
assert (str(ak.operations.full_like(array, 11, dtype=np.dtype(np.int64)).type) == '4 * int64')
assert (str(ak.operations.full_like(array, 11, dtype=np.int64).type) == '4 * int64') |
(frozen=True, eq=True)
class MetricName():
name: str
split: Optional[str] = None
sub_split: Optional[str] = None
perturbation: Optional[PerturbationDescription] = None |
class OfflineTests(TestCasePlus):
_torch
def test_offline_mode(self):
load = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
run = '\nmname = "lysandre/tiny-bert-random"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\nprint("success")\n '
mock = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
cmd = [sys.executable, '-c', '\n'.join([load, run])]
env = self.get_env()
result = subprocess.run(cmd, env=env, check=False, capture_output=True)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
cmd = [sys.executable, '-c', '\n'.join([load, mock, run])]
env['TRANSFORMERS_OFFLINE'] = '0'
result = subprocess.run(cmd, env=env, check=False, capture_output=True)
self.assertEqual(result.returncode, 1, result.stderr)
env['TRANSFORMERS_OFFLINE'] = '1'
result = subprocess.run(cmd, env=env, check=False, capture_output=True)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode()) |
def test_torch_summer():
model_with_sum = Summer(PositionalEncoding2D(125))
model_wo_sum = PositionalEncoding2D(125)
z = torch.rand(3, 5, 6, 125)
assert (np.sum(np.abs(((model_wo_sum(z) + z).numpy() - model_with_sum(z).numpy()))) < 0.0001), 'The summer is not working properly!' |
def get_env_info():
run_lambda = run
(pip_version, pip_list_output) = get_pip_packages(run_lambda)
if TORCH_AVAILABLE:
version_str = torch.__version__
debug_mode_str = str(torch.version.debug)
cuda_available_str = str(torch.cuda.is_available())
cuda_version_str = torch.version.cuda
if ((not hasattr(torch.version, 'hip')) or (torch.version.hip is None)):
hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
else:
cfg = torch._C._show_config().split('\n')
hip_runtime_version = [s.rsplit(None, 1)[(- 1)] for s in cfg if ('HIP Runtime' in s)][0]
miopen_runtime_version = [s.rsplit(None, 1)[(- 1)] for s in cfg if ('MIOpen' in s)][0]
cuda_version_str = 'N/A'
hip_compiled_version = torch.version.hip
else:
version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A'
hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
sys_version = sys.version.replace('\n', ' ')
return SystemEnv(torch_version=version_str, is_debug_build=debug_mode_str, python_version='{} ({}-bit runtime)'.format(sys_version, (sys.maxsize.bit_length() + 1)), python_platform=get_python_platform(), is_cuda_available=cuda_available_str, cuda_compiled_version=cuda_version_str, cuda_runtime_version=get_running_cuda_version(run_lambda), nvidia_gpu_models=get_gpu_info(run_lambda), nvidia_driver_version=get_nvidia_driver_version(run_lambda), cudnn_version=get_cudnn_version(run_lambda), hip_compiled_version=hip_compiled_version, hip_runtime_version=hip_runtime_version, miopen_runtime_version=miopen_runtime_version, pip_version=pip_version, pip_packages=pip_list_output, conda_packages=get_conda_packages(run_lambda), os=get_os(run_lambda), libc_version=get_libc_version(), gcc_version=get_gcc_version(run_lambda), clang_version=get_clang_version(run_lambda), cmake_version=get_cmake_version(run_lambda), caching_allocator_config=get_cachingallocator_config()) |
_REGISTRY.register()
class SingleImageDataset(data.Dataset):
def __init__(self, opt):
super(SingleImageDataset, self).__init__()
self.opt = opt
self.file_client = None
self.io_backend_opt = opt['io_backend']
self.mean = (opt['mean'] if ('mean' in opt) else None)
self.std = (opt['std'] if ('std' in opt) else None)
self.lq_folder = opt['dataroot_lq']
if (self.io_backend_opt['type'] == 'lmdb'):
self.io_backend_opt['db_paths'] = [self.lq_folder]
self.io_backend_opt['client_keys'] = ['lq']
self.paths = paths_from_lmdb(self.lq_folder)
elif ('meta_info_file' in self.opt):
with open(self.opt['meta_info_file'], 'r') as fin:
self.paths = [osp.join(self.lq_folder, line.rstrip().split(' ')[0]) for line in fin]
else:
self.paths = sorted(list(scandir(self.lq_folder, full_path=True)))
def __getitem__(self, index):
if (self.file_client is None):
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
lq_path = self.paths[index]
img_bytes = self.file_client.get(lq_path, 'lq')
img_lq = imfrombytes(img_bytes, float32=True)
if (('color' in self.opt) and (self.opt['color'] == 'y')):
img_lq = rgb2ycbcr(img_lq, y_only=True)[(..., None)]
img_lq = img2tensor(img_lq, bgr2rgb=True, float32=True)
if ((self.mean is not None) or (self.std is not None)):
normalize(img_lq, self.mean, self.std, inplace=True)
return {'lq': img_lq, 'lq_path': lq_path}
def __len__(self):
return len(self.paths) |
.parametrize('seed', [311])
.parametrize('clear_buffer', [True, False])
def test_graph_rewire(seed, clear_buffer):
nn.clear_parameters()
def mlp2(x, scope):
with nn.parameter_scope(scope):
h = F.tanh(PF.affine(x, 10, name='a1'))
h = F.tanh(PF.affine(h, 10, name='a1'))
return h
xa = nn.Variable((2, 10), need_grad=True)
ya = mlp2(xa, 'a')
xb = nn.Variable((2, 10), need_grad=True)
yb1 = mlp2(xb, 'b1')
yb2 = mlp2(xb, 'b2')
xc = nn.Variable((2, 10))
h = mlp2(xc, 'a')
yc1 = mlp2(h, 'b1')
yc2 = mlp2(h, 'b2')
xb.rewire_on(ya)
rng = np.random.RandomState(seed)
data = rng.randn(*xa.shape)
xa.d = data
xc.d = data
params = nn.get_parameters()
def zero_grad():
for p in params.values():
p.grad.zero()
def backup_params():
return [p.g.copy() for p in params.values()]
nn.forward_all([yb1, yb2, yc1, yc2], clear_no_need_grad=clear_buffer)
assert_allclose(yb1.d, yc1.d)
assert_allclose(yb2.d, yc2.d)
zero_grad()
yb1.backward(clear_buffer=False)
gb = backup_params()
zero_grad()
yc1.backward(clear_buffer=False)
gc = backup_params()
assert_allclose(xa.d, xc.d)
for (b, c) in zip(gb, gc):
assert_allclose(b, c)
zero_grad()
yb2.backward(clear_buffer=clear_buffer)
gb = backup_params()
zero_grad()
yc2.backward(clear_buffer=clear_buffer)
gc = backup_params()
assert_allclose(xa.d, xc.d)
for (b, c) in zip(gb, gc):
assert_allclose(b, c) |
def test_inconsistent_dimensions():
m = 2
n = 4
c = [1, 2, 3, 4]
Agood = np.random.rand(m, n)
Abad = np.random.rand(m, (n + 1))
bgood = np.random.rand(m)
bbad = np.random.rand((m + 1))
boundsbad = ([(0, 1)] * (n + 1))
assert_raises(ValueError, _clean_inputs, c=c, A_ub=Abad, b_ub=bgood)
assert_raises(ValueError, _clean_inputs, c=c, A_ub=Agood, b_ub=bbad)
assert_raises(ValueError, _clean_inputs, c=c, A_eq=Abad, b_eq=bgood)
assert_raises(ValueError, _clean_inputs, c=c, A_eq=Agood, b_eq=bbad)
assert_raises(ValueError, _clean_inputs, c=c, bounds=boundsbad) |
def load_remote_uri(uri: str) -> Any:
response = requests.get(uri, timeout=(DEFAULT_RESPONSE_TIMEOUT / 1000))
return load_yaml(response.content) |
def save_model(args, model):
model_to_save = (model.module if hasattr(model, 'module') else model)
client_name = os.path.basename(args.single_client).split('.')[0]
model_checkpoint = os.path.join(args.output_dir, ('%s_%s_checkpoint.bin' % (args.name, client_name)))
torch.save(model_to_save.state_dict(), model_checkpoint) |
def decode_jpeg(image_buffer, scope=None):
with tf.name_scope(values=[image_buffer], name=scope, default_name='decode_jpeg'):
image = tf.image.decode_jpeg(image_buffer, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image |
class _EstimatorPrettyPrinter(pprint.PrettyPrinter):
def __init__(self, indent=1, width=80, depth=None, stream=None, *, compact=False, indent_at_name=True, n_max_elements_to_show=None):
super().__init__(indent, width, depth, stream, compact=compact)
self._indent_at_name = indent_at_name
if self._indent_at_name:
self._indent_per_level = 1
self._changed_only = False
self.n_max_elements_to_show = n_max_elements_to_show
def format(self, object, context, maxlevels, level):
return _safe_repr(object, context, maxlevels, level, changed_only=self._changed_only)
def _pprint_estimator(self, object, stream, indent, allowance, context, level):
stream.write((object.__class__.__name__ + '('))
if self._indent_at_name:
indent += len(object.__class__.__name__)
if self._changed_only:
params = _changed_params(object)
else:
params = object.get_params(deep=False)
params = OrderedDict(((name, val) for (name, val) in sorted(params.items())))
self._format_params(params.items(), stream, indent, (allowance + 1), context, level)
stream.write(')')
def _format_dict_items(self, items, stream, indent, allowance, context, level):
return self._format_params_or_dict_items(items, stream, indent, allowance, context, level, is_dict=True)
def _format_params(self, items, stream, indent, allowance, context, level):
return self._format_params_or_dict_items(items, stream, indent, allowance, context, level, is_dict=False)
def _format_params_or_dict_items(self, object, stream, indent, allowance, context, level, is_dict):
write = stream.write
indent += self._indent_per_level
delimnl = (',\n' + (' ' * indent))
delim = ''
width = max_width = ((self._width - indent) + 1)
it = iter(object)
try:
next_ent = next(it)
except StopIteration:
return
last = False
n_items = 0
while (not last):
if (n_items == self.n_max_elements_to_show):
write(', ...')
break
n_items += 1
ent = next_ent
try:
next_ent = next(it)
except StopIteration:
last = True
max_width -= allowance
width -= allowance
if self._compact:
(k, v) = ent
krepr = self._repr(k, context, level)
vrepr = self._repr(v, context, level)
if (not is_dict):
krepr = krepr.strip("'")
middle = (': ' if is_dict else '=')
rep = ((krepr + middle) + vrepr)
w = (len(rep) + 2)
if (width < w):
width = max_width
if delim:
delim = delimnl
if (width >= w):
width -= w
write(delim)
delim = ', '
write(rep)
continue
write(delim)
delim = delimnl
class_ = (KeyValTuple if is_dict else KeyValTupleParam)
self._format(class_(ent), stream, indent, (allowance if last else 1), context, level)
def _format_items(self, items, stream, indent, allowance, context, level):
write = stream.write
indent += self._indent_per_level
if (self._indent_per_level > 1):
write(((self._indent_per_level - 1) * ' '))
delimnl = (',\n' + (' ' * indent))
delim = ''
width = max_width = ((self._width - indent) + 1)
it = iter(items)
try:
next_ent = next(it)
except StopIteration:
return
last = False
n_items = 0
while (not last):
if (n_items == self.n_max_elements_to_show):
write(', ...')
break
n_items += 1
ent = next_ent
try:
next_ent = next(it)
except StopIteration:
last = True
max_width -= allowance
width -= allowance
if self._compact:
rep = self._repr(ent, context, level)
w = (len(rep) + 2)
if (width < w):
width = max_width
if delim:
delim = delimnl
if (width >= w):
width -= w
write(delim)
delim = ', '
write(rep)
continue
write(delim)
delim = delimnl
self._format(ent, stream, indent, (allowance if last else 1), context, level)
def _pprint_key_val_tuple(self, object, stream, indent, allowance, context, level):
(k, v) = object
rep = self._repr(k, context, level)
if isinstance(object, KeyValTupleParam):
rep = rep.strip("'")
middle = '='
else:
middle = ': '
stream.write(rep)
stream.write(middle)
self._format(v, stream, ((indent + len(rep)) + len(middle)), allowance, context, level)
_dispatch = pprint.PrettyPrinter._dispatch.copy()
_dispatch[BaseEstimator.__repr__] = _pprint_estimator
_dispatch[KeyValTuple.__repr__] = _pprint_key_val_tuple |
def get_static_parameters_from_examples(operation: APIOperation, examples_field: str) -> list[dict[(str, Any)]]:
operation_definition = fast_deepcopy(operation.definition.resolved)
for parameter in operation.definition.parameters:
parameters = operation_definition.setdefault('parameters', [])
if ((parameter.location == 'body') or (parameter.name in {parameter['name'] for parameter in parameters})):
continue
parameters.append(parameter.definition)
return merge_examples(get_parameter_examples(operation_definition, examples_field), get_request_body_examples(operation_definition, examples_field)) |
class TapasConfig(PretrainedConfig):
model_type = 'tapas'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=1024, type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10], initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, positive_label_weight=10.0, num_aggregation_labels=0, aggregation_loss_weight=1.0, use_answer_as_supervision=None, answer_loss_importance=1.0, use_normalized_answer_loss=False, huber_loss_delta=None, temperature=1.0, aggregation_temperature=1.0, use_gumbel_for_cells=False, use_gumbel_for_aggregation=False, average_approximation_function='ratio', cell_selection_preference=None, answer_loss_cutoff=None, max_num_rows=64, max_num_columns=32, average_logits_per_cell=False, select_one_column=True, allow_empty_column_selection=False, init_cell_selection_weights_to_zero=False, reset_position_index_per_cell=True, disable_per_token_loss=False, aggregation_labels=None, no_aggregation_label_index=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_sizes = type_vocab_sizes
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.positive_label_weight = positive_label_weight
self.num_aggregation_labels = num_aggregation_labels
self.aggregation_loss_weight = aggregation_loss_weight
self.use_answer_as_supervision = use_answer_as_supervision
self.answer_loss_importance = answer_loss_importance
self.use_normalized_answer_loss = use_normalized_answer_loss
self.huber_loss_delta = huber_loss_delta
self.temperature = temperature
self.aggregation_temperature = aggregation_temperature
self.use_gumbel_for_cells = use_gumbel_for_cells
self.use_gumbel_for_aggregation = use_gumbel_for_aggregation
self.average_approximation_function = average_approximation_function
self.cell_selection_preference = cell_selection_preference
self.answer_loss_cutoff = answer_loss_cutoff
self.max_num_rows = max_num_rows
self.max_num_columns = max_num_columns
self.average_logits_per_cell = average_logits_per_cell
self.select_one_column = select_one_column
self.allow_empty_column_selection = allow_empty_column_selection
self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero
self.reset_position_index_per_cell = reset_position_index_per_cell
self.disable_per_token_loss = disable_per_token_loss
self.aggregation_labels = aggregation_labels
self.no_aggregation_label_index = no_aggregation_label_index
if isinstance(self.aggregation_labels, dict):
self.aggregation_labels = {int(k): v for (k, v) in aggregation_labels.items()} |
class DilatedParllelResidualBlockB1(nn.Module):
def __init__(self, nIn, nOut, prob=0.03):
super().__init__()
n = int((nOut / 4))
n1 = (nOut - (3 * n))
self.c1 = C(nIn, n, 3, 1)
self.d1 = CDilated(n, n1, 3, 1, 1)
self.d2 = CDilated(n, n, 3, 1, 2)
self.d4 = CDilated(n, n, 3, 1, 4)
self.d8 = CDilated(n, n, 3, 1, 8)
self.d16 = CDilated(n, n, 3, 1, 16)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=0.001)
self.act = nn.ReLU(True)
def forward(self, input):
output1 = self.c1(input)
d1 = self.d1(output1)
d2 = self.d2(output1)
d4 = self.d4(output1)
d8 = self.d8(output1)
add1 = d2
add2 = (add1 + d4)
add3 = (add2 + d8)
combine = torch.cat([d1, add1, add2, add3], 1)
combine_in_out = (input + combine)
output = self.bn(combine_in_out)
output = self.act(output)
return output |
def is_taichi_class(rhs):
taichi_class = False
try:
if rhs._is_taichi_class:
taichi_class = True
except:
pass
return taichi_class |
def prd_uncertainty(mu_mcs):
return (np.mean(np.square(mu_mcs), 0) - np.square(np.mean(mu_mcs, 0))) |
class Runner(object):
def __init__(self, model, batch_processor, optimizer=None, work_dir=None, log_level=logging.INFO, logger=None, meta=None):
assert callable(batch_processor)
self.model = model
if (optimizer is not None):
self.optimizer = self.init_optimizer(optimizer)
else:
self.optimizer = None
self.batch_processor = batch_processor
if mmcv.is_str(work_dir):
self.work_dir = osp.abspath(work_dir)
mmcv.mkdir_or_exist(self.work_dir)
elif (work_dir is None):
self.work_dir = None
else:
raise TypeError('"work_dir" must be a str or None')
if hasattr(self.model, 'module'):
self._model_name = self.model.module.__class__.__name__
else:
self._model_name = self.model.__class__.__name__
(self._rank, self._world_size) = get_dist_info()
self.timestamp = get_time_str()
if (logger is None):
self.logger = self.init_logger(work_dir, log_level)
else:
self.logger = logger
self.log_buffer = LogBuffer()
if (meta is not None):
assert isinstance(meta, dict), '"meta" must be a dict or None'
self.meta = meta
self.mode = None
self._hooks = []
self._epoch = 0
self._iter = 0
self._inner_iter = 0
self._max_epochs = 0
self._max_iters = 0
def model_name(self):
return self._model_name
def rank(self):
return self._rank
def world_size(self):
return self._world_size
def hooks(self):
return self._hooks
def epoch(self):
return self._epoch
def iter(self):
return self._iter
def inner_iter(self):
return self._inner_iter
def max_epochs(self):
return self._max_epochs
def max_iters(self):
return self._max_iters
def init_optimizer(self, optimizer):
if isinstance(optimizer, dict):
optimizer = obj_from_dict(optimizer, torch.optim, dict(params=self.model.parameters()))
elif (not isinstance(optimizer, torch.optim.Optimizer)):
raise TypeError('optimizer must be either an Optimizer object or a dict, but got {}'.format(type(optimizer)))
return optimizer
def _add_file_handler(self, logger, filename=None, mode='w', level=logging.INFO):
file_handler = logging.FileHandler(filename, mode)
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
file_handler.setLevel(level)
logger.addHandler(file_handler)
return logger
def init_logger(self, log_dir=None, level=logging.INFO):
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=level)
logger = logging.getLogger(__name__)
if (log_dir and (self.rank == 0)):
filename = '{}.log'.format(self.timestamp)
log_file = osp.join(log_dir, filename)
self._add_file_handler(logger, log_file, level=level)
return logger
def current_lr(self):
if (self.optimizer is None):
raise RuntimeError('lr is not applicable because optimizer does not exist.')
return [group['lr'] for group in self.optimizer.param_groups]
def register_hook(self, hook, priority='NORMAL'):
assert isinstance(hook, Hook)
if hasattr(hook, 'priority'):
raise ValueError('"priority" is a reserved attribute for hooks')
priority = get_priority(priority)
hook.priority = priority
inserted = False
for i in range((len(self._hooks) - 1), (- 1), (- 1)):
if (priority >= self._hooks[i].priority):
self._hooks.insert((i + 1), hook)
inserted = True
break
if (not inserted):
self._hooks.insert(0, hook)
def call_hook(self, fn_name):
for hook in self._hooks:
getattr(hook, fn_name)(self)
def load_checkpoint(self, filename, map_location='cpu', strict=False):
self.logger.info('load checkpoint from %s', filename)
return load_checkpoint(self.model, filename, map_location, strict, self.logger)
def save_checkpoint(self, out_dir, filename_tmpl='epoch_{}.pth', save_optimizer=True, meta=None, create_symlink=True):
if (meta is None):
meta = dict(epoch=(self.epoch + 1), iter=self.iter)
else:
meta.update(epoch=(self.epoch + 1), iter=self.iter)
filename = filename_tmpl.format((self.epoch + 1))
filepath = osp.join(out_dir, filename)
optimizer = (self.optimizer if save_optimizer else None)
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
if create_symlink:
mmcv.symlink(filename, osp.join(out_dir, 'latest.pth'))
def train(self, data_loader, **kwargs):
self.model.train()
self.mode = 'train'
self.data_loader = data_loader
self._max_iters = (self._max_epochs * len(data_loader))
self.call_hook('before_train_epoch')
for (i, data_batch) in enumerate(data_loader):
self._inner_iter = i
self.call_hook('before_train_iter')
outputs = self.batch_processor(self.model, data_batch, train_mode=True, **kwargs)
if (not isinstance(outputs, dict)):
raise TypeError('batch_processor() must return a dict')
if ('log_vars' in outputs):
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
self.outputs = outputs
self.call_hook('after_train_iter')
self._iter += 1
self.call_hook('after_train_epoch')
self._epoch += 1
def val(self, data_loader, **kwargs):
self.model.eval()
self.mode = 'val'
self.data_loader = data_loader
self.call_hook('before_val_epoch')
for (i, data_batch) in enumerate(data_loader):
self._inner_iter = i
self.call_hook('before_val_iter')
with torch.no_grad():
outputs = self.batch_processor(self.model, data_batch, train_mode=False, **kwargs)
if (not isinstance(outputs, dict)):
raise TypeError('batch_processor() must return a dict')
if ('log_vars' in outputs):
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
self.outputs = outputs
self.call_hook('after_val_iter')
self.call_hook('after_val_epoch')
def resume(self, checkpoint, resume_optimizer=True, map_location='default'):
if (map_location == 'default'):
device_id = torch.cuda.current_device()
checkpoint = self.load_checkpoint(checkpoint, map_location=(lambda storage, loc: storage.cuda(device_id)))
else:
checkpoint = self.load_checkpoint(checkpoint, map_location=map_location)
self._epoch = checkpoint['meta']['epoch']
self._iter = checkpoint['meta']['iter']
if (('optimizer' in checkpoint) and resume_optimizer):
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)
def run(self, data_loaders, workflow, max_epochs, **kwargs):
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
assert (len(data_loaders) == len(workflow))
self._max_epochs = max_epochs
work_dir = (self.work_dir if (self.work_dir is not None) else 'NONE')
self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir)
self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs)
self.call_hook('before_run')
while (self.epoch < max_epochs):
for (i, flow) in enumerate(workflow):
(mode, epochs) = flow
if isinstance(mode, str):
if (not hasattr(self, mode)):
raise ValueError('runner has no method named "{}" to run an epoch'.format(mode))
epoch_runner = getattr(self, mode)
elif callable(mode):
epoch_runner = mode
else:
raise TypeError('mode in workflow must be a str or callable function, not {}'.format(type(mode)))
for _ in range(epochs):
if ((mode == 'train') and (self.epoch >= max_epochs)):
return
epoch_runner(data_loaders[i], **kwargs)
time.sleep(1)
self.call_hook('after_run')
def register_lr_hook(self, lr_config):
if isinstance(lr_config, dict):
assert ('policy' in lr_config)
hook_type = (lr_config.pop('policy').title() + 'LrUpdaterHook')
lr_config['type'] = hook_type
hook = mmcv.build_from_cfg(lr_config, HOOKS)
else:
hook = lr_config
self.register_hook(hook)
def register_optimizer_hook(self, optimizer_config):
if (optimizer_config is None):
return
if isinstance(optimizer_config, dict):
optimizer_config.setdefault('type', 'OptimizerHook')
hook = mmcv.build_from_cfg(optimizer_config, HOOKS)
else:
hook = optimizer_config
self.register_hook(hook)
def register_checkpoint_hook(self, checkpoint_config):
if (checkpoint_config is None):
return
if isinstance(checkpoint_config, dict):
checkpoint_config.setdefault('type', 'CheckpointHook')
hook = mmcv.build_from_cfg(checkpoint_config, HOOKS)
else:
hook = checkpoint_config
self.register_hook(hook)
def register_logger_hooks(self, log_config):
log_interval = log_config['interval']
for info in log_config['hooks']:
logger_hook = mmcv.build_from_cfg(info, HOOKS, default_args=dict(interval=log_interval))
self.register_hook(logger_hook, priority='VERY_LOW')
def register_training_hooks(self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None):
self.register_lr_hook(lr_config)
self.register_optimizer_hook(optimizer_config)
self.register_checkpoint_hook(checkpoint_config)
self.register_hook(IterTimerHook())
self.register_logger_hooks(log_config) |
class CmdLineParserTest(TestCase):
def setUp(self):
backup = {}
for (name, value) in vars(Options).items():
backup[name] = value
self._options_backup = backup
def tearDown(self):
no_value = object()
for (name, orig_value) in self._options_backup.items():
if (getattr(Options, name, no_value) != orig_value):
setattr(Options, name, orig_value)
def check_default_global_options(self, white_list=[]):
self.assertEqual(check_global_options(self._options_backup, white_list), '')
def check_default_options(self, options, white_list=[]):
from ..Main import CompilationOptions, default_options
default_options = CompilationOptions(default_options)
no_value = object()
for name in default_options.__dict__.keys():
if (name not in white_list):
self.assertEqual(getattr(options, name, no_value), getattr(default_options, name), msg=('error in option ' + name))
def test_short_options(self):
(options, sources) = parse_command_line(['-V', '-l', '-+', '-t', '-v', '-v', '-v', '-p', '-D', '-a', '-3'])
self.assertFalse(sources)
self.assertTrue(options.show_version)
self.assertTrue(options.use_listing_file)
self.assertTrue(options.cplus)
self.assertTrue(options.timestamps)
self.assertTrue((options.verbose >= 3))
self.assertTrue(Options.embed_pos_in_docstring)
self.assertFalse(Options.docstrings)
self.assertTrue(Options.annotate)
self.assertEqual(options.language_level, 3)
(options, sources) = parse_command_line(['-f', '-2', 'source.pyx'])
self.assertTrue(sources)
self.assertTrue((len(sources) == 1))
self.assertFalse(options.timestamps)
self.assertEqual(options.language_level, 2)
def test_long_options(self):
(options, sources) = parse_command_line(['--version', '--create-listing', '--cplus', '--embed', '--timestamps', '--verbose', '--verbose', '--verbose', '--embed-positions', '--no-docstrings', '--annotate', '--lenient'])
self.assertFalse(sources)
self.assertTrue(options.show_version)
self.assertTrue(options.use_listing_file)
self.assertTrue(options.cplus)
self.assertEqual(Options.embed, 'main')
self.assertTrue(options.timestamps)
self.assertTrue((options.verbose >= 3))
self.assertTrue(Options.embed_pos_in_docstring)
self.assertFalse(Options.docstrings)
self.assertTrue(Options.annotate)
self.assertFalse(Options.error_on_unknown_names)
self.assertFalse(Options.error_on_uninitialized)
(options, sources) = parse_command_line(['--force', 'source.pyx'])
self.assertTrue(sources)
self.assertTrue((len(sources) == 1))
self.assertFalse(options.timestamps)
def test_options_with_values(self):
(options, sources) = parse_command_line(['--embed=huhu', '-I/test/include/dir1', '--include-dir=/test/include/dir2', '--include-dir', '/test/include/dir3', '--working=/work/dir', 'source.pyx', '--output-file=/output/dir', '--pre-import=/pre/import', '--cleanup=3', '--annotate-coverage=cov.xml', '--gdb-outdir=/gdb/outdir', '--directive=wraparound=false'])
self.assertEqual(sources, ['source.pyx'])
self.assertEqual(Options.embed, 'huhu')
self.assertEqual(options.include_path, ['/test/include/dir1', '/test/include/dir2', '/test/include/dir3'])
self.assertEqual(options.working_path, '/work/dir')
self.assertEqual(options.output_file, '/output/dir')
self.assertEqual(Options.pre_import, '/pre/import')
self.assertEqual(Options.generate_cleanup_code, 3)
self.assertTrue(Options.annotate)
self.assertEqual(Options.annotate_coverage_xml, 'cov.xml')
self.assertTrue(options.gdb_debug)
self.assertEqual(options.output_dir, '/gdb/outdir')
def test_module_name(self):
(options, sources) = parse_command_line(['source.pyx'])
self.assertEqual(options.module_name, None)
self.check_default_global_options()
self.check_default_options(options)
(options, sources) = parse_command_line(['--module-name', 'foo.bar', 'source.pyx'])
self.assertEqual(options.module_name, 'foo.bar')
self.check_default_global_options()
self.check_default_options(options, ['module_name'])
def test_errors(self):
def error(args, regex=None):
old_stderr = sys.stderr
stderr = sys.stderr = StringIO()
try:
self.assertRaises(SystemExit, parse_command_line, list(args))
finally:
sys.stderr = old_stderr
msg = stderr.getvalue().strip()
self.assertTrue(msg)
if regex:
self.assertTrue(re.search(regex, msg), ('"%s" does not match search "%s"' % (msg, regex)))
error(['-1'], 'Unknown compiler flag: -1')
error(['-I'])
error(['--version=-a'])
error(['--version=--annotate=true'])
error(['--working'])
error(['--verbose=1'])
error(['--cleanup'])
error(['--debug-disposal-code-wrong-name', 'file3.pyx'], 'Unknown debug flag: debug_disposal_code_wrong_name')
error(['--module-name', 'foo.pyx'])
error(['--module-name', 'foo.bar'])
error(['--module-name', 'foo.bar', 'foo.pyx', 'bar.pyx'], 'Only one source file allowed when using --module-name')
error(['--module-name', 'foo.bar', '--timestamps', 'foo.pyx'], 'Cannot use --module-name with --timestamps') |
_utils.test()
def test_mod_scan():
z = ti.field(ti.i32, shape=())
w = ti.field(ti.i32, shape=())
def func(x: ti.i32, y: ti.i32):
z[None] = (x % y)
w[None] = ti.raw_mod(x, y)
for i in range((- 10), 11):
for j in range((- 10), 11):
if (j != 0):
func(i, j)
assert (z[None] == (i % j))
assert (w[None] == _c_mod(i, j)) |
class FlaubertForTokenClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def max_dict(myDict, byValue=False):
if byValue:
skey = (lambda x: x[1])
else:
skey = (lambda x: x[0])
return max(myDict.items(), key=skey) |
def choose_optimiser(optimiser_name=ADAM):
if (optimiser_name == ADAM):
return torch.optim.Adam
elif (optimiser_name == SGD):
return torch.optim.SGD
elif (optimiser_name == RMSPROP):
return torch.optim.RMSprop |
def test_predict_with_predict_params():
pipe = Pipeline([('transf', Transf()), ('clf', DummyEstimatorParams())])
pipe.fit(None, None)
pipe.predict(X=None, got_attribute=True)
assert pipe.named_steps['clf'].got_attribute |
class Gen():
def __init__(self, gen):
self._gen = gen
def __call__(self):
for case in self._gen:
source_ints = case['inputs']
target_ints = case['targets']
(yield generator_utils.to_example({'inputs': source_ints, 'targets': target_ints})) |
class SymmetricFunctionAlgebra_multiplicative(classical.SymmetricFunctionAlgebra_classical):
def product_on_basis(self, left, right):
m = (list(left) + list(right))
m.sort(reverse=True)
return self.monomial(sage.combinat.partition.Partition(m))
def coproduct_on_basis(self, mu):
T = self.tensor_square()
return T.prod((self.coproduct_on_generators(p) for p in mu)) |
class AggregateMetric(BaseMetric):
def __init__(self, func, method='jackknife', name=None, **kwargs):
allowed_methods = ('jackknife',)
if (method not in allowed_methods):
raise NotImplementedError(f'Provided method is not implemented yet. Currently only: {allowed_methods} are implemented')
self.method = method
name = (func.__name__ if (name is None) else name)
self.func = func
super(AggregateMetric, self).__init__(name=name, **kwargs)
def compute(self, y_true, y_pred):
mean = self.func(y_true, y_pred, **self.kwargs)
n_instances = len(y_true)
index = np.arange(n_instances)
jack_idx = self._jackknife_resampling(index)
jack_pointwise_metric = np.array([self.func(y_true[idx], y_pred[idx], **self.kwargs) for idx in jack_idx])
jack_stderr = self._compute_jackknife_stderr(jack_pointwise_metric)
return (mean, jack_stderr)
def _compute_jackknife_stderr(x):
n_instances = x.shape[0]
return (np.sqrt((n_instances - 1)) * np.std(x))
def _jackknife_resampling(x):
n_instances = x.shape[0]
dtype = x.dtype
resamples = np.empty([n_instances, (n_instances - 1)], dtype=dtype)
for i in range(n_instances):
resamples[i] = np.delete(x, i)
return resamples |
def save_as_json(filename, data):
with open(fix_filetype(filename, '.json'), 'w') as outfile:
json.dump(data, outfile) |
class MMFSubset(Subset):
def __init__(self, dataset, indices):
super().__init__(dataset, indices)
self._dir_representation = dir(self)
def __getattr__(self, name):
if (('_dir_representation' in self.__dict__) and (name in self._dir_representation)):
return getattr(self, name)
elif (('dataset' in self.__dict__) and hasattr(self.dataset, name)):
return getattr(self.dataset, name)
else:
raise AttributeError(name) |
def create_key_pair():
key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
return key |
def mobilecrnn_v1(inputdim=64, outputdim=527, pretrained=True):
model = MobileCRNN(inputdim, outputdim)
if pretrained:
state = torch.load((Path(__file__).parent / 'mobilecrnn_v1.pth'))
model.load_state_dict(state, strict=False)
return model |
class MLPDropout(object):
def __init__(self, rng, input, layer_sizes, dropout_rates, activations, use_bias=True):
self.weight_matrix_sizes = zip(layer_sizes, layer_sizes[1:])
self.layers = []
self.dropout_layers = []
self.activations = activations
next_layer_input = input
next_dropout_layer_input = _dropout_from_layer(rng, input, p=dropout_rates[0])
layer_counter = 0
for (n_in, n_out) in self.weight_matrix_sizes[:(- 1)]:
next_dropout_layer = DropoutHiddenLayer(rng=rng, input=next_dropout_layer_input, activation=activations[layer_counter], n_in=n_in, n_out=n_out, use_bias=use_bias, dropout_rate=dropout_rates[layer_counter])
self.dropout_layers.append(next_dropout_layer)
next_dropout_layer_input = next_dropout_layer.output
next_layer = HiddenLayer(rng=rng, input=next_layer_input, activation=activations[layer_counter], W=(next_dropout_layer.W * (1 - dropout_rates[layer_counter])), b=next_dropout_layer.b, n_in=n_in, n_out=n_out, use_bias=use_bias)
self.layers.append(next_layer)
next_layer_input = next_layer.output
layer_counter += 1
(n_in, n_out) = self.weight_matrix_sizes[(- 1)]
dropout_output_layer = LogisticRegression(input=next_dropout_layer_input, n_in=n_in, n_out=n_out)
self.dropout_layers.append(dropout_output_layer)
output_layer = LogisticRegression(input=next_layer_input, W=(dropout_output_layer.W * (1 - dropout_rates[(- 1)])), b=dropout_output_layer.b, n_in=n_in, n_out=n_out)
self.layers.append(output_layer)
self.dropout_negative_log_likelihood = self.dropout_layers[(- 1)].negative_log_likelihood
self.dropout_errors = self.dropout_layers[(- 1)].errors
self.negative_log_likelihood = self.layers[(- 1)].negative_log_likelihood
self.errors = self.layers[(- 1)].errors
self.params = [param for layer in self.dropout_layers for param in layer.params]
def predict(self, new_data):
next_layer_input = new_data
for (i, layer) in enumerate(self.layers):
if (i < (len(self.layers) - 1)):
next_layer_input = self.activations[i]((T.dot(next_layer_input, layer.W) + layer.b))
else:
p_y_given_x = T.nnet.softmax((T.dot(next_layer_input, layer.W) + layer.b))
y_pred = T.argmax(p_y_given_x, axis=1)
return y_pred
def predict_p(self, new_data):
next_layer_input = new_data
for (i, layer) in enumerate(self.layers):
if (i < (len(self.layers) - 1)):
next_layer_input = self.activations[i]((T.dot(next_layer_input, layer.W) + layer.b))
else:
p_y_given_x = T.nnet.softmax((T.dot(next_layer_input, layer.W) + layer.b))
return p_y_given_x |
class resnetv1(Network):
def __init__(self, num_layers=50):
Network.__init__(self)
self._feat_stride = [16]
self._feat_compress = [(1.0 / float(self._feat_stride[0]))]
self._num_layers = num_layers
self._net_conv_channels = 1024
self._fc7_channels = 2048
def _crop_pool_layer(self, bottom, rois):
return Network._crop_pool_layer(self, bottom, rois, cfg.RESNET.MAX_POOL)
def _image_to_head(self):
net_conv = self._layers['head'](self._image)
self._act_summaries['conv'] = net_conv
return net_conv
def _head_to_tail(self, pool5):
fc7 = self.resnet.layer4(pool5).mean(3).mean(2)
return fc7
def _init_head_tail(self):
if (self._num_layers == 50):
self.resnet = resnet50()
elif (self._num_layers == 101):
self.resnet = resnet101()
elif (self._num_layers == 152):
self.resnet = resnet152()
else:
raise NotImplementedError
for p in self.resnet.bn1.parameters():
p.requires_grad = False
for p in self.resnet.conv1.parameters():
p.requires_grad = False
assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)
if (cfg.RESNET.FIXED_BLOCKS >= 3):
for p in self.resnet.layer3.parameters():
p.requires_grad = False
if (cfg.RESNET.FIXED_BLOCKS >= 2):
for p in self.resnet.layer2.parameters():
p.requires_grad = False
if (cfg.RESNET.FIXED_BLOCKS >= 1):
for p in self.resnet.layer1.parameters():
p.requires_grad = False
def set_bn_fix(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm') != (- 1)):
for p in m.parameters():
p.requires_grad = False
self.resnet.apply(set_bn_fix)
self._layers['head'] = nn.Sequential(self.resnet.conv1, self.resnet.bn1, self.resnet.relu, self.resnet.maxpool, self.resnet.layer1, self.resnet.layer2, self.resnet.layer3)
def train(self, mode=True):
nn.Module.train(self, mode)
if mode:
self.resnet.eval()
if (cfg.RESNET.FIXED_BLOCKS <= 3):
self.resnet.layer4.train()
if (cfg.RESNET.FIXED_BLOCKS <= 2):
self.resnet.layer3.train()
if (cfg.RESNET.FIXED_BLOCKS <= 1):
self.resnet.layer2.train()
if (cfg.RESNET.FIXED_BLOCKS == 0):
self.resnet.layer1.train()
def set_bn_eval(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm') != (- 1)):
m.eval()
self.resnet.apply(set_bn_eval)
def load_pretrained_cnn(self, state_dict):
self.resnet.load_state_dict({k: v for (k, v) in state_dict.items() if (k in self.resnet.state_dict())}) |
_model_architecture('s2t_transformer', 's2t_transformer_sp')
def s2t_transformer_sp(args):
args.encoder_layers = getattr(args, 'encoder_layers', 16)
s2t_transformer_s(args) |
class CheckCommand(Command):
usage = '\n %prog [options]'
def run(self, options, args):
(package_set, parsing_probs) = create_package_set_from_installed()
(missing, conflicting) = check_package_set(package_set)
for project_name in missing:
version = package_set[project_name].version
for dependency in missing[project_name]:
write_output('%s %s requires %s, which is not installed.', project_name, version, dependency[0])
for project_name in conflicting:
version = package_set[project_name].version
for (dep_name, dep_version, req) in conflicting[project_name]:
write_output('%s %s has requirement %s, but you have %s %s.', project_name, version, req, dep_name, dep_version)
if (missing or conflicting or parsing_probs):
return ERROR
else:
write_output('No broken requirements found.')
return SUCCESS |
def extract_gold_corefs(document):
gold_links = defaultdict(list)
gold_mentions = set([coref['span'] for coref in document.corefs])
total_mentions = len(gold_mentions)
for coref_entry in document.corefs:
(label, span_idx) = (coref_entry['label'], coref_entry['span'])
gold_links[label].append(span_idx)
gold_corefs = flatten([[coref for coref in combinations(gold, 2)] for gold in gold_links.values()])
gold_corefs = sorted(gold_corefs)
total_corefs = len(gold_corefs)
return (gold_corefs, total_corefs, gold_mentions, total_mentions) |
class AEGenerator(object):
def __init__(self, segan):
self.segan = segan
def __call__(self, noisy_w, is_ref, spk=None, z_on=True, do_prelu=False):
segan = self.segan
def make_z(shape, mean=0.0, std=1.0, name='z'):
if is_ref:
with tf.variable_scope(name) as scope:
z_init = tf.random_normal_initializer(mean=mean, stddev=std)
z = tf.get_variable('z', shape, initializer=z_init, trainable=False)
if (z.device != '/device:GPU:0'):
print('z.device is {}'.format(z.device))
assert False
else:
z = tf.random_normal(shape, mean=mean, stddev=std, name=name, dtype=tf.float32)
return z
if hasattr(segan, 'generator_built'):
tf.get_variable_scope().reuse_variables()
make_vars = False
else:
make_vars = True
if is_ref:
print('*** Building Generator ***')
in_dims = noisy_w.get_shape().as_list()
h_i = noisy_w
if (len(in_dims) == 2):
h_i = tf.expand_dims(noisy_w, (- 1))
elif ((len(in_dims) < 2) or (len(in_dims) > 3)):
raise ValueError('Generator input must be 2-D or 3-D')
noisy_input = h_i
kwidth = 31
enc_layers = 7
skips = []
if (is_ref and do_prelu):
alphas = []
with tf.variable_scope('g_ae'):
for (layer_idx, layer_depth) in enumerate(segan.g_enc_depths):
h_i_dwn = downconv(h_i, layer_depth, kwidth=kwidth, init=tf.truncated_normal_initializer(stddev=0.02), name='enc_{}'.format(layer_idx))
if is_ref:
print('Downconv {} -> {}'.format(h_i.get_shape(), h_i_dwn.get_shape()))
h_i = h_i_dwn
if (layer_idx < (len(segan.g_enc_depths) - 1)):
if is_ref:
print('Adding skip connection downconv {}'.format(layer_idx))
skips.append(h_i)
if do_prelu:
if is_ref:
print('-- Enc: prelu activation --')
h_i = prelu(h_i, ref=is_ref, name='enc_prelu_{}'.format(layer_idx))
if is_ref:
alpha_i = h_i[1]
h_i = h_i[0]
alphas.append(alpha_i)
else:
if is_ref:
print('-- Enc: leakyrelu activation --')
h_i = leakyrelu(h_i)
if z_on:
z = make_z([segan.batch_size, h_i.get_shape().as_list()[1], segan.g_enc_depths[(- 1)]])
h_i = tf.concat(2, [z, h_i])
g_dec_depths = (segan.g_enc_depths[:(- 1)][::(- 1)] + [1])
if is_ref:
print('g_dec_depths: ', g_dec_depths)
for (layer_idx, layer_depth) in enumerate(g_dec_depths):
h_i_dim = h_i.get_shape().as_list()
out_shape = [h_i_dim[0], (h_i_dim[1] * 2), layer_depth]
h_i_dcv = deconv(h_i, out_shape, kwidth=kwidth, dilation=2, init=tf.truncated_normal_initializer(stddev=0.02), name='dec_{}'.format(layer_idx))
if is_ref:
print('Deconv {} -> {}'.format(h_i.get_shape(), h_i_dcv.get_shape()))
h_i = h_i_dcv
if (layer_idx < (len(g_dec_depths) - 1)):
if do_prelu:
if is_ref:
print('-- Dec: prelu activation --')
h_i = prelu(h_i, ref=is_ref, name='dec_prelu_{}'.format(layer_idx))
if is_ref:
alpha_i = h_i[1]
h_i = h_i[0]
alphas.append(alpha_i)
else:
if is_ref:
print('-- Dec: leakyrelu activation --')
h_i = leakyrelu(h_i)
skip_ = skips[(- (layer_idx + 1))]
if is_ref:
print('Fusing skip connection of shape {}'.format(skip_.get_shape()))
h_i = tf.concat(2, [h_i, skip_])
else:
if is_ref:
print('-- Dec: tanh activation --')
h_i = tf.tanh(h_i)
wave = h_i
if (is_ref and do_prelu):
print('Amount of alpha vectors: ', len(alphas))
segan.gen_wave_summ = histogram_summary('gen_wave', wave)
if is_ref:
print('Amount of skip connections: ', len(skips))
print('Last wave shape: ', wave.get_shape())
print('')
segan.generator_built = True
wave = tf.add(wave, noisy_input)
ret_feats = [wave]
if ((not z_on) and (not is_ref)):
ret_feats = ret_feats[0]
if z_on:
ret_feats.append(z)
if (is_ref and do_prelu):
ret_feats += alphas
return ret_feats |
def int_var_cuda(x, requires_grad=False):
return Variable(x, requires_grad=requires_grad).long().cuda() |
def test_identities2():
x = np.array([(- 99.5), (- 9.5), (- 0.5), 0.5, 9.5, 99.5])
y = x.copy()
(x, y) = np.meshgrid(x, y)
z = (x + (1j * y)).flatten()
dataset = np.vstack((z, (np.log(z) + loggamma(z)))).T
def f(z):
return loggamma((z + 1))
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check() |
def register_Ns3TcpWestwood_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::TcpWestwood const &', 'sock')])
cls.add_method('Fork', 'ns3::Ptr< ns3::TcpCongestionOps >', [], is_virtual=True)
cls.add_method('GetSsThresh', 'uint32_t', [param('ns3::Ptr< ns3::TcpSocketState const >', 'tcb'), param('uint32_t', 'bytesInFlight')], is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('PktsAcked', 'void', [param('ns3::Ptr< ns3::TcpSocketState >', 'tcb'), param('uint32_t', 'packetsAcked'), param('ns3::Time const &', 'rtt')], is_virtual=True)
return |
def evaluate(model, data):
model.eval()
with torch.no_grad():
logits = model(data)
outs = {}
for key in ['train', 'val', 'test']:
mask = data['{}_mask'.format(key)]
loss = F.nll_loss(logits[mask], data.y[mask]).item()
pred = logits[mask].max(1)[1]
acc = (pred.eq(data.y[mask]).sum().item() / mask.sum().item())
outs['{}_loss'.format(key)] = loss
outs['{}_acc'.format(key)] = acc
return outs |
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--prot_file', '-p', required=True, help='input protein file (pdb)')
parser.add_argument('--model_path', '-mp', required=True, help='directory of models')
parser.add_argument('--model', '-m', choices=['orig', 'lds'], default='orig', help='select model')
parser.add_argument('--output', '-o', required=True, help='name of the output directory')
parser.add_argument('--f', type=int, default=10, help='parameter for the simplification of points mesh')
parser.add_argument('--T', type=float, default=0.9, help='ligandability threshold')
parser.add_argument('--batch', type=int, default=32, help='batch size')
parser.add_argument('--voxel_size', type=float, default=1.0, help='size of voxel in angstrom')
parser.add_argument('--protonate', action='store_true', help='whether to protonate or not the input protein')
parser.add_argument('--expand', action='store_true', help='whether to expand on residue level the extracted binding sites')
parser.add_argument('--discard_points', action='store_true', help='whether to output or not the computed surface points')
parser.add_argument('--seed', type=int, default=None, help='random seed for KMeans clustering')
return parser.parse_args() |
class ConditionedBatchNorm2d(Module):
def __init__(self, num_features, cond_features, *args, **kwargs):
super(ConditionedBatchNorm2d, self).__init__()
self.cond_features = cond_features
self.bn = BatchNorm2d(num_features, *args, affine=False, **kwargs)
self.mlp_gamma = Sequential(Linear(cond_features, num_features), ReLU(num_features), Linear(num_features, num_features))
self.mlp_beta = Sequential(Linear(cond_features, num_features), ReLU(num_features), Linear(num_features, num_features))
self._condition = None
def set_condition(self, input):
input = input.view(input.size(0), (- 1))
assert (input.size(1) == self.cond_features)
self._condition = input
def forward(self, input):
(n, c, _, _) = input.size()
out = self.bn(input)
gamma = (self.mlp_gamma(self._condition).view(n, c, 1, 1) + 1.0)
beta = self.mlp_beta(self._condition).view(n, c, 1, 1)
return ((gamma * out) + beta) |
class SubPolicy(object):
def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)):
ranges = {'shearX': np.linspace(0, 0.3, 10), 'shearY': np.linspace(0, 0.3, 10), 'translateX': np.linspace(0, (150 / 331), 10), 'translateY': np.linspace(0, (150 / 331), 10), 'rotate': np.linspace(0, 30, 10), 'color': np.linspace(0.0, 0.9, 10), 'posterize': np.round(np.linspace(8, 4, 10), 0).astype(np.int), 'solarize': np.linspace(256, 0, 10), 'contrast': np.linspace(0.0, 0.9, 10), 'sharpness': np.linspace(0.0, 0.9, 10), 'brightness': np.linspace(0.0, 0.9, 10), 'autocontrast': ([0] * 10), 'equalize': ([0] * 10), 'invert': ([0] * 10)}
def rotate_with_fill(img, magnitude):
rot = img.convert('RGBA').rotate(magnitude)
return Image.composite(rot, Image.new('RGBA', rot.size, ((128,) * 4)), rot).convert(img.mode)
func = {'shearX': (lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, (magnitude * random.choice([(- 1), 1])), 0, 0, 1, 0), Image.BICUBIC, fillcolor=fillcolor)), 'shearY': (lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, 0, 0, (magnitude * random.choice([(- 1), 1])), 1, 0), Image.BICUBIC, fillcolor=fillcolor)), 'translateX': (lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, 0, ((magnitude * img.size[0]) * random.choice([(- 1), 1])), 0, 1, 0), fillcolor=fillcolor)), 'translateY': (lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, ((magnitude * img.size[1]) * random.choice([(- 1), 1]))), fillcolor=fillcolor)), 'rotate': (lambda img, magnitude: rotate_with_fill(img, magnitude)), 'color': (lambda img, magnitude: ImageEnhance.Color(img).enhance((1 + (magnitude * random.choice([(- 1), 1]))))), 'posterize': (lambda img, magnitude: ImageOps.posterize(img, magnitude)), 'solarize': (lambda img, magnitude: ImageOps.solarize(img, magnitude)), 'contrast': (lambda img, magnitude: ImageEnhance.Contrast(img).enhance((1 + (magnitude * random.choice([(- 1), 1]))))), 'sharpness': (lambda img, magnitude: ImageEnhance.Sharpness(img).enhance((1 + (magnitude * random.choice([(- 1), 1]))))), 'brightness': (lambda img, magnitude: ImageEnhance.Brightness(img).enhance((1 + (magnitude * random.choice([(- 1), 1]))))), 'autocontrast': (lambda img, magnitude: ImageOps.autocontrast(img)), 'equalize': (lambda img, magnitude: ImageOps.equalize(img)), 'invert': (lambda img, magnitude: ImageOps.invert(img))}
self.p1 = p1
self.operation1 = func[operation1]
self.magnitude1 = ranges[operation1][magnitude_idx1]
self.p2 = p2
self.operation2 = func[operation2]
self.magnitude2 = ranges[operation2][magnitude_idx2]
def __call__(self, img):
if (random.random() < self.p1):
img = self.operation1(img, self.magnitude1)
if (random.random() < self.p2):
img = self.operation2(img, self.magnitude2)
return img |
def test_input_is_not_empty_list():
with pytest.raises(ValueError, match='Empty list was given as input, list should not be empty!'):
manager_test.add_solution([], model_test) |
class InceptionV1Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, end_points) = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
self.assertTrue(('Predictions' in end_points))
self.assertListEqual(end_points['Predictions'].get_shape().as_list(), [batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
(net, end_points) = inception.inception_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
self.assertFalse(('Logits' in end_points))
self.assertFalse(('Predictions' in end_points))
def testBuildBaseNetwork(self):
batch_size = 5
(height, width) = (224, 224)
inputs = tf.random_uniform((batch_size, height, width, 3))
(mixed_6c, end_points) = inception.inception_v1_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(), [batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
(height, width) = (224, 224)
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
for (index, endpoint) in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
(out_tensor, end_points) = inception.inception_v1_base(inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(('InceptionV1/' + endpoint)))
self.assertItemsEqual(endpoints[:(index + 1)], end_points.keys())
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
(height, width) = (224, 224)
inputs = tf.random_uniform((batch_size, height, width, 3))
(_, end_points) = inception.inception_v1_base(inputs, final_endpoint='Mixed_5c')
endpoints_shapes = {'Conv2d_1a_7x7': [5, 112, 112, 64], 'MaxPool_2a_3x3': [5, 56, 56, 64], 'Conv2d_2b_1x1': [5, 56, 56, 64], 'Conv2d_2c_3x3': [5, 56, 56, 192], 'MaxPool_3a_3x3': [5, 28, 28, 192], 'Mixed_3b': [5, 28, 28, 256], 'Mixed_3c': [5, 28, 28, 480], 'MaxPool_4a_3x3': [5, 14, 14, 480], 'Mixed_4b': [5, 14, 14, 512], 'Mixed_4c': [5, 14, 14, 512], 'Mixed_4d': [5, 14, 14, 512], 'Mixed_4e': [5, 14, 14, 528], 'Mixed_4f': [5, 14, 14, 832], 'MaxPool_5a_2x2': [5, 7, 7, 832], 'Mixed_5b': [5, 7, 7, 832], 'Mixed_5c': [5, 7, 7, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue((endpoint_name in end_points))
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
(height, width) = (224, 224)
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1_base(inputs)
(total_params, _) = slim.model_analyzer.analyze_vars(slim.get_model_variables())
self.assertAlmostEqual(5607184, total_params)
def testHalfSizeImages(self):
batch_size = 5
(height, width) = (112, 112)
inputs = tf.random_uniform((batch_size, height, width, 3))
(mixed_5c, _) = inception.inception_v1_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(), [batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
(height, width) = (224, 224)
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
(logits, end_points) = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 1
(height, width) = (250, 300)
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
(logits, end_points) = inception.inception_v1(inputs, num_classes, global_pool=True)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
def testUnknowBatchSize(self):
batch_size = 1
(height, width) = (224, 224)
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
(logits, _) = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
(height, width) = (224, 224)
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = inception.inception_v1(eval_inputs, num_classes, is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
(height, width) = (224, 224)
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v1(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
(logits, _) = inception.inception_v1(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 224, 224, 3])
(logits, _) = inception.inception_v1(images, num_classes=num_classes, spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) |
def log_specific_params(scope=None):
logging.info(('=' * 30))
scope = (scope or tf.get_variable_scope().name)
logging.info('In {}:'.format(scope))
tvars = tf.trainable_variables(scope)
all_params_num = 0
for elem in tvars:
params_num = 1
for l in elem.get_shape().as_list():
params_num *= l
logging.info(' {}: {}'.format(elem.op.name, params_num))
all_params_num += params_num
logging.info(('Trainable Parameters Number: %d' % all_params_num))
logging.info(('=' * 30)) |
class AbstractLabelledClonableTree(AbstractLabelledTree, AbstractClonableTree):
def set_root_label(self, label):
self._require_mutable()
self._label = label
def set_label(self, path, label):
self._require_mutable()
path = tuple(path)
if (path == ()):
self._label = label
else:
with self[path[0]].clone() as child:
child.set_label(path[1:], label)
self[path[0]] = child
def map_labels(self, f):
if self.is_empty():
return self
return self.parent()([t.map_labels(f) for t in self], label=f(self.label())) |
def divide_by_square_root(data, scale):
output = np.copy(data)
num_examples = len(scale)
assert (num_examples == data.shape[0])
assert (len(data.shape) == 2)
for i in range(0, num_examples):
if (scale[i] > 0):
output[i] = np.multiply(data[i], (1 / math.sqrt(scale[i])))
return (output,) |
_grad()
def init_prompt(prompt, pipeline):
uncond_input = pipeline.tokenizer([''], padding='max_length', max_length=pipeline.tokenizer.model_max_length, return_tensors='pt')
uncond_embeddings = pipeline.text_encoder(uncond_input.input_ids.to(pipeline.device))[0]
text_input = pipeline.tokenizer([prompt], padding='max_length', max_length=pipeline.tokenizer.model_max_length, truncation=True, return_tensors='pt')
text_embeddings = pipeline.text_encoder(text_input.input_ids.to(pipeline.device))[0]
context = torch.cat([uncond_embeddings, text_embeddings])
return context |
def bi_sru_recurrent_network(rep_tensor, rep_mask, is_train=None, keep_prob=1.0, wd=0.0, scope=None):
(bs, sl, vec) = (tf.shape(rep_tensor)[0], tf.shape(rep_tensor)[1], tf.shape(rep_tensor)[2])
ivec = rep_tensor.get_shape().as_list()[2]
with tf.variable_scope((scope or 'bi_sru_recurrent_network')):
with tf.variable_scope('forward'):
U_d_fw = bn_dense_layer([rep_tensor], (3 * ivec), False, 0.0, 'get_frc_fw', 'linear', False, wd, keep_prob, is_train)
U_fw = tf.concat([rep_tensor, U_d_fw], (- 1))
fw_SRUCell = SwitchableDropoutWrapper(SRUCell(ivec, tf.nn.tanh), is_train, keep_prob)
(fw_output, _) = dynamic_rnn(fw_SRUCell, U_fw, tf.reduce_sum(tf.cast(rep_mask, tf.int32), (- 1)), dtype=tf.float32, scope='forward_sru')
with tf.variable_scope('backward'):
U_d_bw = bn_dense_layer([rep_tensor], (3 * ivec), False, 0.0, 'get_frc_bw', 'linear', False, wd, keep_prob, is_train)
U_bw = tf.concat([rep_tensor, U_d_bw], (- 1))
bw_SRUCell = SwitchableDropoutWrapper(SRUCell(ivec, tf.nn.tanh), is_train, keep_prob)
(bw_output, _) = bw_dynamic_rnn(bw_SRUCell, U_bw, tf.reduce_sum(tf.cast(rep_mask, tf.int32), (- 1)), dtype=tf.float32, scope='backward_sru')
all_output = tf.concat([fw_output, bw_output], (- 1))
return all_output |
def idx2word(idx, i2w, pad_idx):
sent_str = ([str()] * len(idx))
for (i, sent) in enumerate(idx):
for word_id in sent:
if (word_id == pad_idx):
break
sent_str[i] += (i2w[str(word_id.item())] + ' ')
sent_str[i] = sent_str[i].strip()
return sent_str |
def test_exists():
board = make_test_boad()
assert _exists(board, 19)
assert _exists(board, 20)
assert (not _exists(board, 4))
board = _flip_board(board)
assert _exists(board, 19)
assert _exists(board, 20)
assert (not _exists(board, 2)) |
def _synth_regression_sparse_dataset(n_samples=10000, n_features=10000, density=0.01, dtype=np.float32):
X = sp.random(m=n_samples, n=n_features, density=density, format='csr', random_state=0)
X.data = np.random.RandomState(0).randn(X.getnnz())
X = X.astype(dtype, copy=False)
coefs = sp.random(m=n_features, n=1, density=0.5, random_state=0)
coefs.data = np.random.RandomState(0).randn(coefs.getnnz())
y = X.dot(coefs.toarray()).reshape((- 1))
y += ((0.2 * y.std()) * np.random.randn(n_samples))
(X, X_val, y, y_val) = train_test_split(X, y, test_size=0.1, random_state=0)
return (X, X_val, y, y_val) |
def is_prod_appengine():
return (('APPENGINE_RUNTIME' in os.environ) and ('Google App Engine/' in os.environ['SERVER_SOFTWARE']) and (not is_prod_appengine_mvms())) |
def filter_glove_embedding(word_dict, glove_path):
vectors = np.zeros(shape=[len(word_dict), 300], dtype=np.float32)
with codecs.open(glove_path, mode='r', encoding='utf-8') as f:
for line in tqdm(f, total=2196018, desc='load glove embeddings'):
line = line.lstrip().rstrip().split(' ')
if ((len(line) == 2) or (len(line) != 301)):
continue
word = line[0]
if (word in word_dict):
vector = [float(x) for x in line[1:]]
word_index = word_dict[word]
vectors[word_index] = np.asarray(vector)
return np.asarray(vectors) |
def build_network(config):
implemented_networks = 'merlion'
assert (config.net_name in implemented_networks)
net = None
if (config.net_name == 'merlion'):
net = Merlion_MLP(config)
return net |
def _l2_project(next_distr_v, rewards_v, dones_mask_t, gamma, delta_z, n_atoms, v_min, v_max):
print('next_distr_v', next_distr_v.shape)
print('rewards_v', rewards_v.shape)
print('dones_mask_t', dones_mask_t.shape)
print('delta_z', delta_z.shape)
next_distr = next_distr_v.data.cpu().numpy()
rewards = rewards_v.data.cpu().numpy()
dones_mask = dones_mask_t.cpu().numpy().astype(bool)
print('dones_mask shape: ', dones_mask.shape)
batch_size = len(rewards)
proj_distr = np.zeros((batch_size, n_atoms), dtype=np.float32)
for atom in range(n_atoms):
tz_j = np.minimum(v_max, np.maximum(v_min, (rewards + ((v_min + (atom * delta_z)) * gamma))))
b_j = ((tz_j - v_min) / delta_z)
l = np.floor(b_j).astype(np.int64)
u = np.ceil(b_j).astype(np.int64)
eq_mask = (u == l)
print('proj_distr: ', proj_distr.shape)
print('eq_mask: ', eq_mask.shape)
print('l u: ', l.shape, u.shape)
print('l[eq_mask]: ', l[eq_mask].shape)
print('next_dist: ', next_distr.shape)
print('atom', atom)
proj_distr[(eq_mask, l[eq_mask])] += next_distr[(eq_mask, atom)]
ne_mask = (u != l)
proj_distr[(ne_mask, l[ne_mask])] += (next_distr[(ne_mask, atom)] * (u - b_j)[ne_mask])
proj_distr[(ne_mask, u[ne_mask])] += (next_distr[(ne_mask, atom)] * (b_j - l)[ne_mask])
if dones_mask.any():
proj_distr[dones_mask] = 0.0
tz_j = np.minimum(v_max, np.maximum(v_min, rewards[dones_mask]))
b_j = ((tz_j - v_min) / delta_z)
l = np.floor(b_j).astype(np.int64)
u = np.ceil(b_j).astype(np.int64)
eq_mask = (u == l)
eq_dones = dones_mask.copy()
eq_dones[dones_mask] = eq_mask
if eq_dones.any():
proj_distr[(eq_dones, l[eq_mask])] = 1.0
ne_mask = (u != l)
ne_dones = dones_mask.copy()
ne_dones[dones_mask] = ne_mask
if ne_dones.any():
proj_distr[(ne_dones, l[ne_mask])] = (u - b_j)[ne_mask]
proj_distr[(ne_dones, u[ne_mask])] = (b_j - l)[ne_mask]
return proj_distr |
_grad()
def run(selected_batch_, config, model, autoencoder, text_encoder, diffusion, condition_null_generator_dict, idx, NULL_CONDITION, SAVE_NAME, seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
starting_noise = torch.randn(1, 4, 64, 64).to(device)
selected_batch = deepcopy(selected_batch_)
uc_batch = deepcopy(selected_batch_)
for mode in condition_null_generator_dict:
if (mode in NULL_CONDITION):
condition_null_generator = condition_null_generator_dict[mode]
condition_null_generator.prepare(selected_batch[mode])
selected_batch[mode] = condition_null_generator.get_null_input(selected_batch[mode])
elif (mode in ['sketch', 'depth']):
continue
condition_null_generator = condition_null_generator_dict[mode]
condition_null_generator.prepare(uc_batch[mode])
uc_batch[mode] = condition_null_generator.get_null_input(uc_batch[mode])
selected_batch = batch_to_device(selected_batch, device)
uc_batch = batch_to_device(uc_batch, device)
torch.cuda.empty_cache()
batch_here = config['batch_size']
context = text_encoder.encode(selected_batch['caption'])
uc = text_encoder.encode((batch_here * ['']))
alpha_generator_func = partial(alpha_generator, config=config)
sampler = PLMSSampler(diffusion, model, alpha_generator_func=alpha_generator_func, set_alpha_scale=set_alpha_scale)
steps = 50
shape = (batch_here, model.in_channels, model.image_size, model.image_size)
input_dict = dict(x=starting_noise, timesteps=None, context=context, inpainting_extra_input=None, condition=selected_batch)
uc_dict = dict(context=uc, condition=uc_batch)
samples = sampler.sample(S=steps, shape=shape, input=input_dict, uc_dict=uc_dict, guidance_scale=config['guidance_scale'])
pred_image = autoencoder.decode(samples)
image_dict = [{'tensors': draw_sketch_with_batch_to_tensor(selected_batch), 'n_in_row': 1, 'pp_type': iutils.PP_SEGM}, {'tensors': draw_depth_with_batch_to_tensor(selected_batch), 'n_in_row': 1, 'pp_type': iutils.PP_SEGM}, {'tensors': draw_boxes_with_batch_to_tensor(selected_batch), 'n_in_row': 1, 'pp_type': iutils.PP_SEGM}, {'tensors': draw_keypoints_with_batch_to_tensor(selected_batch), 'n_in_row': 1, 'pp_type': iutils.PP_SEGM}, {'tensors': draw_image_embedding_with_batch_to_tensor(selected_batch), 'n_in_row': 1, 'pp_type': iutils.PP_RGB}, {'tensors': draw_color_palettes_with_batch_to_tensor(selected_batch), 'n_in_row': 1, 'pp_type': iutils.PP_SEGM}, {'tensors': pred_image, 'n_in_row': 1, 'pp_type': iutils.PP_RGB}]
os.makedirs(os.path.join('inference', SAVE_NAME), exist_ok=True)
iutils.save_images_from_dict(image_dict, dir_path=os.path.join('inference', SAVE_NAME), file_name='sampled_{:4d}'.format(idx), n_instance=config['batch_size'], is_save=True, return_images=False)
save_path = os.path.join('inference', SAVE_NAME, 'captions.txt')
with open(save_path, 'a') as f:
f.write((('idx ' + str(idx)) + ':\n'))
for cap in selected_batch['caption']:
f.write((cap + '\n'))
f.write('\n')
print('Save images and its corresponding captions.. done')
return pred_image.detach().cpu() |
def _find_c_source(base_path):
file_exists = os.path.exists
for ext in C_FILE_EXTENSIONS:
file_name = (base_path + ext)
if file_exists(file_name):
return file_name
return None |
def to_membership_vector(partition):
return {member: partition_id for (partition_id, members) in enumerate(partition) for member in members} |
class Blip2Processor(ProcessorMixin):
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'BlipImageProcessor'
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor, tokenizer):
tokenizer.return_token_type_ids = False
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
def __call__(self, images=None, text: Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_token_type_ids: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[(str, TensorType)]]=None, **kwargs) -> BatchEncoding:
if ((images is None) and (text is None)):
raise ValueError('You have to specify either images or text.')
if (images is None):
self.current_processor = self.tokenizer
text_encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs)
return text_encoding
encoding_image_processor = self.image_processor(images, return_tensors=return_tensors)
if (text is not None):
text_encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs)
else:
text_encoding = None
if (text_encoding is not None):
encoding_image_processor.update(text_encoding)
return encoding_image_processor
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys((tokenizer_input_names + image_processor_input_names))) |
class _ColorfulFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
self._root_name = (kwargs.pop('root_name') + '.')
self._abbrev_name = kwargs.pop('abbrev_name', '')
if len(self._abbrev_name):
self._abbrev_name = (self._abbrev_name + '.')
super(_ColorfulFormatter, self).__init__(*args, **kwargs)
def formatMessage(self, record):
record.name = record.name.replace(self._root_name, self._abbrev_name)
log = super(_ColorfulFormatter, self).formatMessage(record)
if (record.levelno == logging.WARNING):
prefix = colored('WRN', 'red', attrs=['blink'])
elif (record.levelno == logging.DEBUG):
prefix = colored('DBG', 'yellow', attrs=['blink'])
elif ((record.levelno == logging.ERROR) or (record.levelno == logging.CRITICAL)):
prefix = colored('ERROR', 'red', attrs=['blink', 'underline'])
else:
return log
return ((prefix + ' ') + log) |
def linearity_cutoff_test(fluorescence_counts, prediction_counts, start_threshold=500, increment=1, p_cutoff=1e-05, n_neighbors=5):
for test_threshold in range(start_threshold, int(nc_flat.max()), increment):
below_test_threshold = (fluorescence_counts < test_threshold)
y = fluorescence_counts[below_test_threshold]
prediction_counts_2d = np.atleast_2d(prediction_counts[below_test_threshold]).T
linear_model = LinearRegression().fit(prediction_counts_2d, y)
knn_model = KNeighborsRegressor(n_neighbors).fit(prediction_counts_2d, y)
linear_pred_nc = linear_model.predict(prediction_counts_2d)
knn_pred_nc = knn_model.predict(prediction_counts_2d)
knn_residal = (y - knn_pred_nc)
linear_residual = (y - linear_pred_nc)
test_result = stats.levene(knn_residal, linear_residual)
if (test_result.pvalue < p_cutoff):
break
return test_threshold |
def rgb2ycbcr(img, y_only=False):
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = (np.dot(img, [65.481, 128.553, 24.966]) + 16.0)
else:
out_img = (np.matmul(img, [[65.481, (- 37.797), 112.0], [128.553, (- 74.203), (- 93.786)], [24.966, 112.0, (- 18.214)]]) + [16, 128, 128])
out_img = _convert_output_type_range(out_img, img_type)
return out_img |
def _find_python_module_path(module):
proc = os.popen(('python -c "import %s;print(%s.__path__[0])"' % (module, module)))
output = proc.readline()
return output.strip() |
def create_pipeline_configuration(DEBUG=False, batch_size=4):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (T5LayerNorm, CrossEntropyLoss, T5Block, Dropout, StatelessEmbedding, Linear), 'model_inputs': {'attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [0, 1, 2, 3, 4, 5, 6, 7]}, 'decoder_attention_mask': {'shape': torch.Size([4, 1, 4, 4]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [9, 10, 11, 12, 13, 14, 15]}, 'decoder_input_ids': {'shape': torch.Size([4, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [9]}, 'input_ids': {'shape': torch.Size([4, 512]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'inverted_encoder_attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'is_batched': True, 'used_by': [9, 10, 11, 12, 13, 14, 15]}, 'lm_labels': {'shape': torch.Size([4, 4]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [15]}}, 'model_outputs': {'T5ForConditionalGeneration/CrossEntropyLoss[lm_loss]': {'shape': torch.Size([1]), 'dtype': torch.float32, 'is_batched': False, 'created_by': 15}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([4, 512]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'T5ForConditionalGeneration/Parameter[shared_embed_weight]': {'shape': torch.Size([32100, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_1': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[2]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 15}, 1: {'stage_cls': Partition1, 'inputs': {'attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_1': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[2]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_2': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[5]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 14}, 2: {'stage_cls': Partition2, 'inputs': {'attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_2': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[5]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_3': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[8]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 13}, 3: {'stage_cls': Partition3, 'inputs': {'attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_3': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[8]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_4': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[11]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 12}, 4: {'stage_cls': Partition4, 'inputs': {'attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_4': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[11]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_5': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[14]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 11}, 5: {'stage_cls': Partition5, 'inputs': {'attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_5': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[14]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_6': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[17]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 10}, 6: {'stage_cls': Partition6, 'inputs': {'attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_6': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[17]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_7': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[20]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 9}, 7: {'stage_cls': Partition7, 'inputs': {'attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___22_7': {'shape': torch.Size([4, 32, 512, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[20]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[23]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 8}, 8: {'stage_cls': Partition8, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[23]': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 7}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}}, 'devices': [('cpu' if DEBUG else 'cuda:8')], 'stage_depth': 7}, 9: {'stage_cls': Partition9, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([4, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([4, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/Parameter[shared_embed_weight]': {'shape': torch.Size([32100, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_10': {'shape': torch.Size([4, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_10': {'shape': torch.Size([4, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[1]': {'shape': torch.Size([4, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}}, 'devices': [('cpu' if DEBUG else 'cuda:9')], 'stage_depth': 6}, 10: {'stage_cls': Partition10, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([4, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_10': {'shape': torch.Size([4, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_10': {'shape': torch.Size([4, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[1]': {'shape': torch.Size([4, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_11': {'shape': torch.Size([4, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_11': {'shape': torch.Size([4, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]': {'shape': torch.Size([4, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}}, 'devices': [('cpu' if DEBUG else 'cuda:10')], 'stage_depth': 5}, 11: {'stage_cls': Partition11, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([4, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_11': {'shape': torch.Size([4, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_11': {'shape': torch.Size([4, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[5]': {'shape': torch.Size([4, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_12': {'shape': torch.Size([4, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_12': {'shape': torch.Size([4, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[9]': {'shape': torch.Size([4, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}}, 'devices': [('cpu' if DEBUG else 'cuda:11')], 'stage_depth': 4}, 12: {'stage_cls': Partition12, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([4, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_12': {'shape': torch.Size([4, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_12': {'shape': torch.Size([4, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[9]': {'shape': torch.Size([4, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_13': {'shape': torch.Size([4, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_13': {'shape': torch.Size([4, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[13]': {'shape': torch.Size([4, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}}, 'devices': [('cpu' if DEBUG else 'cuda:12')], 'stage_depth': 3}, 13: {'stage_cls': Partition13, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([4, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_13': {'shape': torch.Size([4, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_13': {'shape': torch.Size([4, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[13]': {'shape': torch.Size([4, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_14': {'shape': torch.Size([4, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_14': {'shape': torch.Size([4, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]': {'shape': torch.Size([4, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}}, 'devices': [('cpu' if DEBUG else 'cuda:13')], 'stage_depth': 2}, 14: {'stage_cls': Partition14, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([4, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_14': {'shape': torch.Size([4, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_14': {'shape': torch.Size([4, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]': {'shape': torch.Size([4, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_15': {'shape': torch.Size([4, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_15': {'shape': torch.Size([4, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[21]': {'shape': torch.Size([4, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}}, 'devices': [('cpu' if DEBUG else 'cuda:14')], 'stage_depth': 1}, 15: {'stage_cls': Partition15, 'inputs': {'decoder_attention_mask': {'shape': torch.Size([4, 1, 4, 4]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'inverted_encoder_attention_mask': {'shape': torch.Size([4, 1, 1, 512]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'lm_labels': {'shape': torch.Size([4, 4]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([4, 512, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___130_15': {'shape': torch.Size([4, 32, 4, 4]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___132_15': {'shape': torch.Size([4, 32, 4, 512]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[21]': {'shape': torch.Size([4, 4, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}}, 'outputs': {'T5ForConditionalGeneration/CrossEntropyLoss[lm_loss]': {'shape': torch.Size([1]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:15')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config |
def test_shortest_path():
dist_matrix = generate_graph(20)
dist_matrix[(dist_matrix != 0)] = 1
for directed in (True, False):
if (not directed):
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix, i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[(i, j)]) |
def fused_batch_normalization_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, axes=(1,), decay_rate=0.9, eps=1e-05, batch_stat=True, nonlinearity='relu'):
if (nonlinearity not in ['', 'relu']):
raise ValueError("nonlinearity must be either '' or 'relu'.")
ctx = nn.get_current_context()
df = FusedBatchNormalizationBackward(ctx, axes, decay_rate, eps, batch_stat, nonlinearity)
dy = grad_inputs[0]
x0 = inputs[0]
b0 = inputs[1]
g0 = inputs[2]
rm = inputs[3]
rv = inputs[4]
z0 = (inputs[5] if (len(inputs) == 6) else None)
df.is_add = (True if z0 else False)
y0 = outputs[0]
if df.is_add:
(dx0, db0, dg0, dz0) = df(dy, x0, b0, g0, rm, rv, y0, z0)
return (dx0, db0, dg0, None, None, dz0)
else:
(dx0, db0, dg0) = df(dy, x0, b0, g0, rm, rv, y0)
return (dx0, db0, dg0, None, None) |
def get_combined_args(parser: ArgumentParser):
cmdlne_string = sys.argv[1:]
cfgfile_string = 'Namespace()'
args_cmdline = parser.parse_args(cmdlne_string)
try:
cfgfilepath = os.path.join(args_cmdline.model_path, 'cfg_args')
print('Looking for config file in', cfgfilepath)
with open(cfgfilepath) as cfg_file:
print('Config file found: {}'.format(cfgfilepath))
cfgfile_string = cfg_file.read()
except TypeError:
print('Config file not found at')
pass
args_cfgfile = eval(cfgfile_string)
merged_dict = vars(args_cfgfile).copy()
for (k, v) in vars(args_cmdline).items():
if (v != None):
merged_dict[k] = v
return Namespace(**merged_dict) |
class JoinFeature(Feature):
def __init__(self, name, features, spkg=None, url=None, description=None, type=None, **kwds):
if (spkg is None):
spkgs = set((f.spkg for f in features if f.spkg))
if (len(spkgs) > 1):
raise ValueError('given features have more than one spkg; provide spkg argument')
elif (len(spkgs) == 1):
spkg = next(iter(spkgs))
if (url is None):
urls = set((f.url for f in features if f.url))
if (len(urls) > 1):
raise ValueError('given features have more than one url; provide url argument')
elif (len(urls) == 1):
url = next(iter(urls))
if (type is None):
if any(((f._spkg_type() == 'experimental') for f in features)):
type = 'experimental'
elif any(((f._spkg_type() == 'optional') for f in features)):
type = 'optional'
else:
type = 'standard'
super().__init__(name, spkg=spkg, url=url, description=description, type=type, **kwds)
self._features = features
def _is_present(self):
for f in self._features:
test = f._is_present()
if (not test):
return test
return FeatureTestResult(self, True)
def is_functional(self):
try:
from sage.misc.superseded import deprecation
except ImportError:
pass
else:
deprecation(33114, 'method JoinFeature.is_functional; use is_present instead')
for f in self._features:
test = f.is_functional()
if (not test):
return test
return FeatureTestResult(self, True)
def hide(self):
for f in self._features:
f.hide()
super().hide()
def unhide(self):
for f in self._features:
f.unhide()
super().unhide() |
class ComplexMultiply(Benchmark):
def __init__(self, bits_prec, times):
self.__bits_prec = bits_prec
self.__times = times
self.repr_str = ('List of multiplies of two complex numbers with %s bits of precision %s times' % (self.__bits_prec, self.__times))
def sage(self):
CC = ComplexField(self.__bits_prec)
s = (CC(2).sqrt() + (CC.gen() * 2).sqrt())
t = cputime()
[(s * s) for _ in range(self.__times)]
return cputime(t)
def magma(self):
n = (int((self.__bits_prec / log(10, 2))) + 1)
CC = magma.ComplexField(n)
s = (CC(2).Sqrt() + CC.gen(1).Sqrt())
t = magma.cputime()
magma.eval(('s := %s;' % s.name()))
magma(('[s*s : i in [1..%s]]' % self.__times))
return magma.cputime(t)
def gp(self):
n = (int((self.__bits_prec / log(10, 2))) + 1)
gp.set_real_precision(n)
gp.eval('s = sqrt(2) + sqrt(2*I);')
gp.eval('gettime;')
gp(('vector(%s,i,s*s)' % self.__times))
return float(gp.eval('gettime/1000.0')) |
def dendrogram_coords(leaf_positions, partition_tree):
xout = []
yout = []
_dendrogram_coords_rec((partition_tree.shape[0] - 1), leaf_positions, partition_tree, xout, yout)
return (np.array(xout), np.array(yout)) |
class Solver(object):
def __init__(self, data, models, optimizers, args):
self.tr_loader = data['tr_loader']
self.cv_loader = data['cv_loader']
self.tt_loader = data['tt_loader']
self.args = args
self.adversarial_mode = (('adversarial' in args.experiment) and args.experiment.adversarial)
self.models = models
self.dmodels = {k: distrib.wrap(model) for (k, model) in models.items()}
self.model = self.models['generator']
self.dmodel = self.dmodels['generator']
self.optimizers = optimizers
self.optimizer = optimizers['optimizer']
if self.adversarial_mode:
self.disc_optimizers = {'disc_optimizer': optimizers['disc_optimizer']}
self.device = args.device
self.epochs = args.epochs
self.continue_from = args.continue_from
self.eval_every = args.eval_every
self.cross_valid = args.cross_valid
self.cross_valid_every = args.cross_valid_every
self.checkpoint = args.checkpoint
if self.checkpoint:
self.checkpoint_file = Path(args.checkpoint_file)
self.best_file = Path(args.best_file)
logger.debug('Checkpoint will be saved to %s', self.checkpoint_file.resolve())
self.history_file = args.history_file
self.best_states = None
self.restart = args.restart
self.history = []
self.samples_dir = args.samples_dir
self.num_prints = args.num_prints
if ('stft' in self.args.losses):
self.mrstftloss = MultiResolutionSTFTLoss(factor_sc=args.stft_sc_factor, factor_mag=args.stft_mag_factor).to(self.device)
if (('discriminator_model' in self.args.experiment) and (self.args.experiment.discriminator_model == 'hifi')):
self.melspec_transform = torchaudio.transforms.MelSpectrogram(self.args.experiment.hr_sr, **self.args.experiment.mel_spectrogram).to(self.device)
self._reset()
def _copy_models_states(self):
states = {}
for (name, model) in self.models.items():
states[name] = copy_state(model.state_dict())
return states
def _load(self, package, load_best=False):
if load_best:
for (name, model_package) in package[SERIALIZE_KEY_BEST_STATES][SERIALIZE_KEY_MODELS].items():
self.models[name].load_state_dict(model_package[SERIALIZE_KEY_STATE])
else:
for (name, model_package) in package[SERIALIZE_KEY_MODELS].items():
self.models[name].load_state_dict(model_package[SERIALIZE_KEY_STATE])
for (name, opt_package) in package[SERIALIZE_KEY_OPTIMIZERS].items():
self.optimizers[name].load_state_dict(opt_package)
def _reset(self):
load_from = None
load_best = False
keep_history = True
if (self.checkpoint and self.checkpoint_file.exists() and (not self.restart)):
load_from = self.checkpoint_file
elif self.continue_from:
load_from = self.continue_from
load_best = self.args.continue_best
keep_history = self.args.keep_history
if load_from:
logger.info(f'Loading checkpoint model: {load_from}')
package = torch.load(load_from, 'cpu')
self._load(package, load_best)
if keep_history:
self.history = package[SERIALIZE_KEY_HISTORY]
self.best_states = package[SERIALIZE_KEY_BEST_STATES]
def train(self):
if self.history:
logger.info('Replaying metrics from previous run')
for (epoch, metrics) in enumerate(self.history):
info = ' '.join((f'{k.capitalize()}={v:.5f}' for (k, v) in metrics.items()))
logger.info(f'Epoch {(epoch + 1)}: {info}')
logger.info(('-' * 70))
logger.info('Trainable Params:')
for (name, model) in self.models.items():
n_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
mb = ((n_params * 4) / (2 ** 20))
logger.info(f'{name}: parameters: {n_params}, size: {mb} MB')
torch.set_num_threads(1)
best_loss = None
self.best_states = {}
for epoch in range(len(self.history), self.epochs):
self.model.train()
start = time.time()
logger.info(('-' * 70))
logger.info('Training...')
losses = self._run_one_epoch(epoch)
logger_msg = (f'Train Summary | End of Epoch {(epoch + 1)} | Time {(time.time() - start):.2f}s | ' + ' | '.join([f'{k} Loss {v:.5f}' for (k, v) in losses.items()]))
logger.info(bold(logger_msg))
losses = {(k + '_loss'): v for (k, v) in losses.items()}
valid_losses = {}
evaluation_loss = None
evaluated_on_test_data = False
if (self.cross_valid and ((((epoch + 1) % self.cross_valid_every) == 0) or (epoch == (self.epochs - 1))) and self.cv_loader):
cross_valid_start = time.time()
logger.info(('-' * 70))
logger.info('Cross validation...')
self.model.eval()
with torch.no_grad():
if self.args.valid_equals_test:
enhance_valid_data = ((((epoch + 1) % self.eval_every) == 0) or ((epoch == (self.epochs - 1)) and self.tt_loader))
(valid_losses, enhanced_filenames) = self._get_valid_losses_on_test_data(epoch, enhance=enhance_valid_data)
evaluated_on_test_data = True
else:
valid_losses = self._run_one_epoch(epoch, cross_valid=True)
self.model.train()
evaluation_loss = valid_losses['evaluation']
logger_msg = (f'Validation Summary | End of Epoch {(epoch + 1)} | Time {(time.time() - cross_valid_start):.2f}s | ' + ' | '.join([f'{k} Valid Loss {v:.5f}' for (k, v) in valid_losses.items()]))
logger.info(bold(logger_msg))
valid_losses = {(('valid_' + k) + '_loss'): v for (k, v) in valid_losses.items()}
best_loss = min((pull_metric(self.history, 'valid_evaluation_loss') + [evaluation_loss]))
if (evaluation_loss == best_loss):
logger.info(bold('New best valid loss %.4f'), evaluation_loss)
self.best_states = self._copy_models_states()
metrics = {**losses, **valid_losses}
if evaluation_loss:
metrics.update({METRICS_KEY_EVALUATION_LOSS: evaluation_loss})
if best_loss:
metrics.update({METRICS_KEY_BEST_LOSS: best_loss})
if (((((epoch + 1) % self.eval_every) == 0) or (epoch == (self.epochs - 1))) and self.tt_loader):
logger.info(('-' * 70))
logger.info('Evaluating on the test set...')
if (self.args.evaluate_on_best and self.best_states):
logger.info('Loading best state.')
best_state = self.best_states[GENERATOR_KEY]
else:
logger.info('Using last state.')
best_state = self.model.state_dict()
with swap_state(self.model, best_state):
logger.info('Enhance and save samples...')
evaluation_start = time.time()
if evaluated_on_test_data:
logger.info('Samples already evaluated in cross validation, calculating metrics.')
enhanced_dataset = PrHrSet(self.args.samples_dir, enhanced_filenames)
enhanced_dataloader = distrib.loader(enhanced_dataset, batch_size=1, shuffle=False, num_workers=self.args.num_workers)
(lsd, visqol) = evaluate_on_saved_data(self.args, enhanced_dataloader, epoch)
elif self.args.joint_evaluate_and_enhance:
logger.info('Jointly evaluating and enhancing.')
(lsd, visqol, enhanced_filenames) = evaluate(self.args, self.tt_loader, epoch, self.model)
else:
enhanced_filenames = enhance(self.tt_loader, self.model, self.args)
enhanced_dataset = PrHrSet(self.args.samples_dir, enhanced_filenames)
enhanced_dataloader = DataLoader(enhanced_dataset, batch_size=1, shuffle=False)
(lsd, visqol) = evaluate_on_saved_data(self.args, enhanced_dataloader, epoch)
if ((epoch == (self.epochs - 1)) and self.args.log_results):
if (not ('enhanced_dataloader' in locals())):
enhanced_dataset = PrHrSet(self.args.samples_dir, enhanced_filenames)
enhanced_dataloader = DataLoader(enhanced_dataset, batch_size=1, shuffle=False)
logger.info('logging results to wandb...')
create_wandb_table(self.args, enhanced_dataloader, epoch)
logger.info(bold(f'Evaluation Time {(time.time() - evaluation_start):.2f}s'))
metrics.update({METRICS_KEY_LSD: lsd, METRICS_KEY_VISQOL: visqol})
wandb.log(metrics, step=epoch)
self.history.append(metrics)
info = ' | '.join((f'{k.capitalize()} {v:.5f}' for (k, v) in metrics.items()))
logger.info(('-' * 70))
logger.info(bold(f'Overall Summary | Epoch {(epoch + 1)} | {info}'))
if (distrib.rank == 0):
json.dump(self.history, open(self.history_file, 'w'), indent=2)
if self.checkpoint:
serialize(self.models, self.optimizers, self.history, self.best_states, self.args)
logger.debug('Checkpoint saved to %s', self.checkpoint_file.resolve())
def _run_one_epoch(self, epoch, cross_valid=False):
total_losses = {}
total_loss = 0
data_loader = (self.tr_loader if (not cross_valid) else self.cv_loader)
data_loader.epoch = epoch
label = ['Train', 'Valid'][cross_valid]
name = (label + f' | Epoch {(epoch + 1)}')
logprog = LogProgress(logger, data_loader, updates=self.num_prints, name=name)
return_spec = (('return_spec' in self.args.experiment) and self.args.experiment.return_spec)
for (i, data) in enumerate(logprog):
(lr, hr) = [x.to(self.device) for x in data]
if return_spec:
(pr_time, pr_spec) = self.dmodel(lr, return_spec=return_spec)
if cross_valid:
pr_time = match_signal(pr_time, hr.shape[(- 1)])
hr_spec = self.dmodel._spec(hr, scale=True)
hr_reprs = {'time': hr, 'spec': hr_spec}
pr_reprs = {'time': pr_time, 'spec': pr_spec}
else:
pr_time = self.dmodel(lr)
if cross_valid:
pr_time = match_signal(pr_time, hr.shape[(- 1)])
hr_reprs = {'time': hr}
pr_reprs = {'time': pr_time}
losses = self._get_losses(hr_reprs, pr_reprs)
total_generator_loss = 0
for (loss_name, loss) in losses['generator'].items():
total_generator_loss += loss
if (not cross_valid):
self._optimize(total_generator_loss)
if self.adversarial_mode:
self._optimize_adversarial(losses['discriminator'])
total_loss += total_generator_loss.item()
for (loss_name, loss) in losses['generator'].items():
total_loss_name = ('generator_' + loss_name)
if (total_loss_name in total_losses):
total_losses[total_loss_name] += loss.item()
else:
total_losses[total_loss_name] = loss.item()
for (loss_name, loss) in losses['discriminator'].items():
total_loss_name = ('discriminator_' + loss_name)
if (total_loss_name in total_losses):
total_losses[total_loss_name] += loss.item()
else:
total_losses[total_loss_name] = loss.item()
logprog.update(total_loss=format((total_loss / (i + 1)), '.5f'))
if return_spec:
del pr_spec, hr_spec
del pr_reprs, hr_reprs, pr_time, hr, lr
avg_losses = {'total': (total_loss / (i + 1))}
avg_losses.update({'evaluation': (total_loss / (i + 1))})
for (loss_name, loss) in total_losses.items():
avg_losses.update({loss_name: (loss / (i + 1))})
return avg_losses
def _get_valid_losses_on_test_data(self, epoch, enhance):
total_losses = {}
total_loss = 0
data_loader = self.tt_loader
data_loader.epoch = epoch
name = f'Valid | Epoch {(epoch + 1)}'
logprog = LogProgress(logger, data_loader, updates=self.num_prints, name=name)
total_filenames = []
for (i, data) in enumerate(logprog):
((lr, lr_path), (hr, hr_path)) = data
lr = lr.to(self.device)
hr = hr.to(self.device)
filename = Path(hr_path[0]).stem
total_filenames += filename
if (self.args.experiment.model == 'aero'):
hr_spec = self.model._spec(hr, scale=True).detach()
(pr_time, pr_spec, lr_spec) = self.dmodel(lr, return_spec=True, return_lr_spec=True)
pr_spec = pr_spec.detach()
lr_spec = lr_spec.detach()
else:
nfft = self.args.experiment.nfft
win_length = (nfft // 4)
pr_time = self.model(lr)
pr_spec = spectro(pr_time, n_fft=nfft, win_length=win_length)
lr_spec = spectro(lr, n_fft=nfft, win_length=win_length)
hr_spec = spectro(hr, n_fft=nfft, win_length=win_length)
pr_time = match_signal(pr_time, hr.shape[(- 1)])
if enhance:
save_wavs(pr_time, lr, hr, [os.path.join(self.args.samples_dir, filename)], self.args.experiment.lr_sr, self.args.experiment.hr_sr)
save_specs(lr_spec, pr_spec, hr_spec, os.path.join(self.args.samples_dir, filename))
hr_reprs = {'time': hr, 'spec': hr_spec}
pr_reprs = {'time': pr_time, 'spec': pr_spec}
losses = self._get_losses(hr_reprs, pr_reprs)
total_generator_loss = 0
for (loss_name, loss) in losses['generator'].items():
total_generator_loss += loss
total_loss += total_generator_loss.item()
for (loss_name, loss) in losses['generator'].items():
total_loss_name = ('generator_' + loss_name)
if (total_loss_name in total_losses):
total_losses[total_loss_name] += loss.item()
else:
total_losses[total_loss_name] = loss.item()
for (loss_name, loss) in losses['discriminator'].items():
total_loss_name = ('discriminator_' + loss_name)
if (total_loss_name in total_losses):
total_losses[total_loss_name] += loss.item()
else:
total_losses[total_loss_name] = loss.item()
logprog.update(total_loss=format((total_loss / (i + 1)), '.5f'))
del pr_reprs, hr_reprs
avg_losses = {'total': (total_loss / (i + 1))}
avg_losses.update({'evaluation': (total_loss / (i + 1))})
for (loss_name, loss) in total_losses.items():
avg_losses.update({loss_name: (loss / (i + 1))})
return (avg_losses, (total_filenames if enhance else None))
def _get_losses(self, hr, pr):
hr_time = hr['time']
pr_time = pr['time']
losses = {'generator': {}, 'discriminator': {}}
with torch.autograd.set_detect_anomaly(True):
if ('l1' in self.args.losses):
losses['generator'].update({'l1': F.l1_loss(pr_time, hr_time)})
if ('l2' in self.args.losses):
losses['generator'].update({'l2': F.mse_loss(pr_time, hr_time)})
if ('stft' in self.args.losses):
stft_loss = self._get_stft_loss(pr_time, hr_time)
losses['generator'].update({'stft': stft_loss})
if self.adversarial_mode:
if ('msd_melgan' in self.args.experiment.discriminator_models):
(generator_losses, discriminator_loss) = self._get_melgan_adversarial_loss(pr_time, hr_time)
if (not self.args.experiment.only_features_loss):
losses['generator'].update({'adversarial_melgan': generator_losses['adversarial']})
if (not self.args.experiment.only_adversarial_loss):
losses['generator'].update({'features_melgan': generator_losses['features']})
losses['discriminator'].update({'msd_melgan': discriminator_loss})
if ('msd_hifi' in self.args.experiment.discriminator_models):
(generator_losses, discriminator_loss) = self._get_msd_adversarial_loss(pr_time, hr_time)
if (not self.args.experiment.only_features_loss):
losses['generator'].update({'adversarial_msd': generator_losses['adversarial']})
if (not self.args.experiment.only_adversarial_loss):
losses['generator'].update({'features_msd': generator_losses['features']})
losses['discriminator'].update({'msd': discriminator_loss})
if ('mpd' in self.args.experiment.discriminator_models):
(generator_losses, discriminator_loss) = self._get_mpd_adversarial_loss(pr_time, hr_time)
if (not self.args.experiment.only_features_loss):
losses['generator'].update({'adversarial_mpd': generator_losses['adversarial']})
if (not self.args.experiment.only_adversarial_loss):
losses['generator'].update({'features_mpd': generator_losses['features']})
losses['discriminator'].update({'mpd': discriminator_loss})
if ('hifi' in self.args.experiment.discriminator_models):
(generator_loss, discriminator_loss) = self._get_hifi_adversarial_loss(pr_time, hr_time)
losses['generator'].update({'adversarial_hifi': generator_loss})
losses['discriminator'].update({'hifi': discriminator_loss})
return losses
def _get_stft_loss(self, pr, hr):
(sc_loss, mag_loss) = self.mrstftloss(pr.squeeze(1), hr.squeeze(1))
stft_loss = (sc_loss + mag_loss)
return stft_loss
def _get_melgan_adversarial_loss(self, pr, hr):
discriminator = self.dmodels['msd_melgan']
discriminator_fake_detached = discriminator(pr.detach())
discriminator_real = discriminator(hr)
discriminator_fake = discriminator(pr)
total_loss_discriminator = self._get_melgan_discriminator_loss(discriminator_fake_detached, discriminator_real)
generator_losses = self._get_melgan_generator_loss(discriminator_fake, discriminator_real)
return (generator_losses, total_loss_discriminator)
def _get_melgan_discriminator_loss(self, discriminator_fake, discriminator_real):
discriminator_loss = 0
for scale in discriminator_fake:
discriminator_loss += F.relu((1 + scale[(- 1)])).mean()
for scale in discriminator_real:
discriminator_loss += F.relu((1 - scale[(- 1)])).mean()
return discriminator_loss
def _get_melgan_generator_loss(self, discriminator_fake, discriminator_real):
features_loss = 0
features_weights = (4.0 / (self.args.experiment.melgan_discriminator.n_layers + 1))
discriminator_weights = (1.0 / self.args.experiment.melgan_discriminator.num_D)
weights = (discriminator_weights * features_weights)
for i in range(self.args.experiment.melgan_discriminator.num_D):
for j in range((len(discriminator_fake[i]) - 1)):
features_loss += (weights * F.l1_loss(discriminator_fake[i][j], discriminator_real[i][j].detach()))
adversarial_loss = 0
for scale in discriminator_fake:
adversarial_loss += F.relu((1 - scale[(- 1)])).mean()
if (('only_adversarial_loss' in self.args.experiment) and self.args.experiment.only_adversarial_loss):
return {'adversarial': adversarial_loss}
if (('only_features_loss' in self.args.experiment) and self.args.experiment.only_features_loss):
return {'features': (self.args.experiment.features_loss_lambda * features_loss)}
return {'adversarial': adversarial_loss, 'features': (self.args.experiment.features_loss_lambda * features_loss)}
def _get_hifi_adversarial_loss(self, pr, hr):
mpd = self.dmodels['mpd']
msd = self.dmodels['msd_hifi']
(y_df_hat_r, y_df_hat_g, _, _) = mpd(hr, pr.detach())
loss_disc_f = discriminator_loss(y_df_hat_r, y_df_hat_g)
(y_ds_hat_r, y_ds_hat_g, _, _) = msd(hr, pr.detach())
loss_disc_s = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
total_loss_discriminator = (loss_disc_s + loss_disc_f)
pr_mel = self.melspec_transform(pr)
hr_mel = self.melspec_transform(hr)
loss_mel = (F.l1_loss(hr_mel, pr_mel) * self.args.experiment.mel_spec_loss_lambda)
(y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g) = mpd(hr, pr)
(y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g) = msd(hr, pr)
loss_fm_f = feature_loss(fmap_f_r, fmap_f_g)
loss_fm_s = feature_loss(fmap_s_r, fmap_s_g)
loss_gen_f = generator_loss(y_df_hat_g)
loss_gen_s = generator_loss(y_ds_hat_g)
if (('only_features_loss' in self.args.experiment) and self.args.experiment.only_features_loss):
total_loss_generator = (loss_fm_s + loss_fm_f)
else:
total_loss_generator = ((((loss_gen_s + loss_gen_f) + loss_fm_s) + loss_fm_f) + loss_mel)
return (total_loss_generator, total_loss_discriminator)
def _get_msd_adversarial_loss(self, pr, hr):
msd = self.dmodels['msd_hifi']
(y_ds_hat_r, y_ds_hat_g, _, _) = msd(hr, pr.detach())
d_loss = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
(y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g) = msd(hr, pr)
g_feat_loss = feature_loss(fmap_s_r, fmap_s_g)
g_adv_loss = generator_loss(y_ds_hat_g)
if (('only_adversarial_loss' in self.args.experiment) and self.args.experiment.only_adversarial_loss):
return ({'adversarial': g_adv_loss}, d_loss)
if (('only_features_loss' in self.args.experiment) and self.args.experiment.only_features_loss):
return ({'features': (self.args.experiment.features_loss_lambda * g_feat_loss)}, d_loss)
return ({'adversarial': g_adv_loss, 'features': (self.args.experiment.features_loss_lambda * g_feat_loss)}, d_loss)
def _get_mpd_adversarial_loss(self, pr, hr):
mpd = self.dmodels['mpd']
(y_df_hat_r, y_df_hat_g, _, _) = mpd(hr, pr.detach())
d_loss = discriminator_loss(y_df_hat_r, y_df_hat_g)
(y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g) = mpd(hr, pr)
g_feat_loss = feature_loss(fmap_f_r, fmap_f_g)
g_adv_loss = generator_loss(y_df_hat_g)
if (('only_adversarial_loss' in self.args.experiment) and self.args.experiment.only_adversarial_loss):
return ({'adversarial': g_adv_loss}, d_loss)
if (('only_features_loss' in self.args.experiment) and self.args.experiment.only_features_loss):
return ({'features': (self.args.experiment.features_loss_lambda * g_feat_loss)}, d_loss)
return ({'adversarial': g_adv_loss, 'features': (self.args.experiment.features_loss_lambda * g_feat_loss)}, d_loss)
def _optimize(self, loss):
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def _optimize_adversarial(self, discriminator_losses):
total_disc_loss = sum(list(discriminator_losses.values()))
disc_optimizer = self.disc_optimizers['disc_optimizer']
disc_optimizer.zero_grad()
total_disc_loss.backward()
disc_optimizer.step() |
def bruhat_lequal(p1, p2):
n1 = len(p1)
if (n1 == 0):
return True
if ((p1[0] > p2[0]) or (p1[(n1 - 1)] < p2[(n1 - 1)])):
return False
for i in range(n1):
c = 0
for j in range(n1):
if (p2[j] > (i + 1)):
c += 1
if (p1[j] > (i + 1)):
c -= 1
if (c < 0):
return False
return True |
def prepare_fx(graph_module, qconfig_dict, inplace=False):
return _prepare_fx(graph_module, qconfig_dict, inplace, is_dynamic_quant=False) |
class CustomFactorGenerationExampleCodegenTest(TestCase, SymforceTestCaseMixin):
_only
def test_generate_factors(self) -> None:
output_dir = self.make_output_dir(BASE_DIRNAME)
generate_factors.generate(output_dir=output_dir)
self.compare_or_update_directory(actual_dir=output_dir, expected_dir=((((path_util.symforce_data_root() / 'symforce') / 'examples') / 'custom_factor_generation') / 'gen')) |
(scope='module')
def source_1bin_shapesys():
with open('validation/data/1bin_example1.json', encoding='utf-8') as read_json:
return json.load(read_json) |
def split_s3_path(url):
parsed = urlparse(url)
if ((not parsed.netloc) or (not parsed.path)):
raise ValueError('bad s3 path {}'.format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
if s3_path.startswith('/'):
s3_path = s3_path[1:]
return (bucket_name, s3_path) |
def execute_graph(model: nn.Module, graph: Graph, model_args=(), model_kwargs=None, pre_hook: Optional[PreHook]=None, post_hook: Optional[PostHook]=None, enforce_out_of_place=True):
if (model_kwargs is None):
model_kwargs = dict()
if (not isinstance(model_args, tuple)):
model_args = (model_args,)
if (pre_hook is None):
pre_hook = IdentityPreHook()
if (post_hook is None):
post_hook = IdentityPostHook()
pre_hook = apply_pre_hook(pre_hook)
post_hook = apply_post_hook(post_hook)
nodes: List[Node] = sorted(graph.nodes, key=(lambda n: n.id))
uses = {n: len(n.out_edges) for n in nodes}
for n in graph.outputs:
uses[n] += 1
ready_expressions = dict(zip(nodes, model_args))
for node in graph.inputs:
if (node.id in graph.input_kw_ids):
ready_expressions[node] = model_kwargs[graph.input_kw_ids[node.id]]
del model_args
del model_kwargs
tensors = tensorDict(model)
ready_expressions.update({n: tensors[n.scope] for n in nodes if (n.type is NodeTypes.BUFF_PARAM)})
del tensors
layers = layerDict(model, graph.depth, graph.basic_blocks)
namespaces = used_namespaces()
for node in nodes:
if (node in ready_expressions):
continue
if (node.type is NodeTypes.CONSTANT):
v = node.constant_value
ready_expressions[node] = v
continue
(args, kwargs) = fetch_args_kwargs(node, ready_expressions)
if (node.type is NodeTypes.LAYER):
l = layers[node.scope]
with (force_out_of_place(l) if enforce_out_of_place else nullcontext()):
(args, kwargs) = pre_hook(node, l, args, kwargs)
outputs = l(*args, **kwargs)
outputs = post_hook(node, l, args, kwargs, outputs)
ready_expressions[node] = outputs
elif (node.type is NodeTypes.PRIMITIVE):
ready_expressions[node] = create_container_construct(node, args, kwargs)
else:
assert (node.type is NodeTypes.OP)
outputs = call_function(namespaces, node, args, kwargs, pre_hook, post_hook, enforce_out_of_place=enforce_out_of_place)
ready_expressions[node] = outputs
del args
del kwargs
for n in node.in_edges:
uses[n] -= 1
if (uses[n] == 0):
ready_expressions.pop(n)
if (uses[node] == 0):
ready_expressions.pop(node)
return [ready_expressions[n] for n in graph.outputs] |
class BleuMetricSpec(TextMetricSpec):
def __init__(self, params):
super(BleuMetricSpec, self).__init__(params, 'bleu')
def metric_fn(self, hypotheses, references):
return bleu.moses_multi_bleu(hypotheses, references, lowercase=False) |
def discriminator(images, num_classes, bottleneck_size=512, keep_prob=1.0, phase_train=True, weight_decay=0.0, reuse=None, scope='Discriminator'):
with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=leaky_relu, normalizer_fn=None, normalizer_params=batch_norm_params):
with tf.variable_scope(scope, [images], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=phase_train):
print('{} input shape:'.format(scope), [dim.value for dim in images.shape])
net = conv(images, 32, kernel_size=4, stride=2, scope='conv1')
print('module_1 shape:', [dim.value for dim in net.shape])
net = conv(net, 64, kernel_size=4, stride=2, scope='conv2')
print('module_2 shape:', [dim.value for dim in net.shape])
net = conv(net, 128, kernel_size=4, stride=2, scope='conv3')
print('module_3 shape:', [dim.value for dim in net.shape])
net = conv(net, 256, kernel_size=4, stride=2, scope='conv4')
print('module_4 shape:', [dim.value for dim in net.shape])
net = conv(net, 512, kernel_size=4, stride=2, scope='conv5')
print('module_5 shape:', [dim.value for dim in net.shape])
patch5_logits = slim.conv2d(net, 3, 1, activation_fn=None, normalizer_fn=None, scope='patch5_logits')
patch_logits = tf.reshape(patch5_logits, [(- 1), 3])
net = slim.flatten(net)
prelogits = slim.fully_connected(net, bottleneck_size, scope='Bottleneck', weights_initializer=slim.xavier_initializer(), activation_fn=None, normalizer_fn=None)
prelogits = tf.nn.l2_normalize(prelogits, dim=1)
print('latent shape:', [dim.value for dim in prelogits.shape])
logits = slim.fully_connected(prelogits, num_classes, scope='Logits', activation_fn=None, normalizer_fn=None)
return (patch_logits, logits) |
class BlockReference(object):
def __init__(self, name, context, stack, depth):
self.name = name
self._context = context
self._stack = stack
self._depth = depth
def super(self):
if ((self._depth + 1) >= len(self._stack)):
return self._context.environment.undefined(('there is no parent block called %r.' % self.name), name='super')
return BlockReference(self.name, self._context, self._stack, (self._depth + 1))
def __call__(self):
rv = concat(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv |
class TimeFeature(ABC):
def __init__(self, normalise: bool, a: float, b: float):
self.normalise = normalise
self.a = a
self.b = b
def __call__(self, idx: pd.DatetimeIndex) -> np.ndarray:
...
def _max_val(self) -> float:
...
def max_val(self) -> float:
return (self._max_val if self.normalise else 1.0)
def scale(self, val: np.ndarray) -> np.ndarray:
return ((val * (self.b - self.a)) + self.a)
def process(self, val: np.ndarray) -> np.ndarray:
features = self.scale((val / self.max_val))
if self.normalise:
return features
return features.astype(int)
def __repr__(self) -> str:
return f'{self.__class__.__name__}(normalise={self.normalise}, a={self.a}, b={self.b})' |
def _possible_normalizers(E, SA):
if E.has_cm():
raise ValueError('The curve E should not have CM.')
E = _over_numberfield(E)
K = E.base_field()
SA = [K.ideal(I.gens()) for I in SA]
selmer_gens = K.selmer_generators(SA, 2)
if (not selmer_gens):
return []
V = VectorSpace(GF(2), len(selmer_gens))
traces_list = []
W = V.zero_subspace()
deg_one_primes = deg_one_primes_iter(K)
while (W.dimension() < (V.dimension() - 1)):
P = next(deg_one_primes)
k = P.residue_field()
defines_valid_character = True
splitting_vector = []
for a in selmer_gens:
abar = k(a)
if (abar == 0):
defines_valid_character = False
break
if abar.is_square():
splitting_vector.append(GF(2)(0))
else:
splitting_vector.append(GF(2)(1))
if (not defines_valid_character):
continue
if (splitting_vector in W):
continue
try:
Etilde = E.change_ring(k)
except ArithmeticError:
continue
tr = Etilde.trace_of_frobenius()
if (tr == 0):
continue
traces_list.append(tr)
W = (W + V.span([splitting_vector]))
bad_primes = set()
for i in traces_list:
for p in i.prime_factors():
bad_primes.add(p)
v = W.matrix().transpose().kernel().basis()[0]
a = 1
for i in range(len(selmer_gens)):
if (v[i] == 1):
a *= selmer_gens[i]
patience = (5 * K.degree())
while True:
P = next(deg_one_primes)
k = P.residue_field()
if (not k(a).is_square()):
try:
tr = E.change_ring(k).trace_of_frobenius()
except ArithmeticError:
continue
if (tr == 0):
patience -= 1
else:
for p in tr.prime_factors():
bad_primes.add(p)
bad_primes = sorted(bad_primes)
return bad_primes |
class MyConcatDataset(ConcatDataset):
def __getattr__(self, k):
return getattr(self.datasets[0], k) |
def get_default_quantization_config_options() -> QuantizationConfigOptions:
return get_current_tp_model().default_qco |
class InMemoryDatasetProvider(td.InMemoryDataset):
def __init__(self, dataset):
super().__init__()
self.data_list = list(dataset)
self._num_classes = dataset.num_classes
self._num_features = dataset.num_features
self._to_sparse = ToSparseTensor(remove_edge_index=True, fill_cache=True)
def num_classes(self):
return self._num_classes
def set_num_classes(self, n_c):
self._num_classes = n_c
def num_features(self):
return self._num_features
def __len__(self):
return len(self.data_list)
def __getitem__(self, index):
return self.data_list[index]
def loader(self, batch_size=1, shuffle=False):
return DataLoader(self, batch_size=batch_size, shuffle=shuffle)
def clone(self, shallow=False):
self_clone = copy.copy(self)
if (not shallow):
self_clone.data_list = [d.clone() for d in self.data_list]
return self_clone
def to(self, device, **kwargs):
for (i, l) in enumerate(self.data_list):
self.data_list[i] = l.to(device, **kwargs)
return self
def to_sparse(self):
for (i, l) in enumerate(self.data_list):
self.data_list[i] = self._to_sparse(l)
return self |
def rand_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, low=0, high=1, shape=[], seed=(- 1)):
return ([None] * (len(grad_inputs) + len(inputs))) |
def filter_formulas(formulas, criteria=None):
if (criteria is None):
criteria = CRITERIA
out = []
for formula in formulas:
if (not (formula.signature.id in criteria)):
out.append(formula)
return out |
def GetNodeEcc_PDirNet(Graph, NId, IsDir=False):
return _snap.GetNodeEcc_PDirNet(Graph, NId, IsDir) |
.lower_builtin('end_list', ArrayBuilderType)
def lower_endlist(context, builder, sig, args):
(arraybuildertype,) = sig.args
(arraybuilderval,) = args
proxyin = context.make_helper(builder, arraybuildertype, arraybuilderval)
call(context, builder, libawkward.ArrayBuilder_endlist, (proxyin.rawptr,))
return context.get_dummy_value() |
def make_agent(obs_spec, action_spec, cfg):
cfg.obs_shape = obs_spec[cfg.obs_type].shape
try:
cfg.action_shape = action_spec.shape
except:
pass
return hydra.utils.instantiate(cfg) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [100])
.parametrize('num_layers', [1, 2])
.parametrize('dropout', [0.0])
.parametrize('bidirectional', [True, False])
.parametrize('training', [True])
.parametrize('seq_len', [2, 5])
.parametrize('batch_size', [3])
.parametrize('input_size', [2])
.parametrize('hidden_size', [3])
.parametrize('with_bias', [True, False])
def test_gru_double_backward(seed, num_layers, dropout, bidirectional, training, seq_len, batch_size, input_size, hidden_size, with_bias, ctx, func_name):
from nbla_test_utils import backward_function_tester
with nn.context_scope(ctx):
rng = np.random.RandomState(seed)
num_directions = 1
if bidirectional:
num_directions = 2
inputs = [(rng.randn(seq_len, batch_size, input_size).astype(np.float32) * 0.1)]
inputs += [rng.randn(num_layers, num_directions, batch_size, hidden_size).astype(np.float32)]
inputs += [rng.randn(num_directions, 3, hidden_size, (input_size + hidden_size))]
if (num_layers > 1):
inputs += [rng.randn(max(1, (num_layers - 1)), num_directions, 3, hidden_size, ((num_directions * hidden_size) + hidden_size)).astype(np.float32)]
else:
inputs += [None]
if with_bias:
inputs += [rng.randn(num_layers, num_directions, 4, hidden_size).astype(np.float32)]
else:
inputs += [None]
backward = [False for _ in inputs]
if training:
backward = [True for _ in inputs]
backward_function_tester(rng, F.gru, inputs, func_kwargs=dict(num_layers=num_layers, dropout=dropout, bidirectional=bidirectional, training=training), atol_f=1e-06, dstep=0.001, backward=backward, ctx=ctx, skip_backward_check=True) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.