code stringlengths 101 5.91M |
|---|
def _get_required_attr(element: Element, attr: str) -> str:
attribute = element.get(attr)
if (attribute is None):
raise MusicXMLError(f"Attribute '{attr}' is required for an '{element.tag}' element.")
return attribute |
def get_user_seqs(data_file):
lines = open(data_file).readlines()
user_seq = []
item_set = set()
for line in lines:
(user, items) = line.strip().split(' ', 1)
items = items.split(' ')
items = [int(item) for item in items]
user_seq.append(items)
item_set = (item_set | set(items))
max_item = max(item_set)
num_users = len(lines)
num_items = (max_item + 2)
valid_rating_matrix = generate_rating_matrix_valid(user_seq, num_users, num_items)
test_rating_matrix = generate_rating_matrix_test(user_seq, num_users, num_items)
return (user_seq, max_item, valid_rating_matrix, test_rating_matrix) |
class BufferReader():
def __init__(self, buffer: bytes):
self.buffer = buffer
self.read_offset = 0
def bytes_left(self):
return (len(self.buffer) - self.read_offset)
def unpack_f(self, s_format: str):
if (not hasattr(ConstStructs, s_format)):
le_format: str = ('<' + s_format)
setattr(ConstStructs, s_format, struct.Struct(le_format))
return self.unpack(getattr(ConstStructs, s_format))
def unpack_numpy(self, s: struct.Struct, shape: Tuple):
arr = np.ndarray(shape, s.format, self.buffer, self.read_offset).copy()
self.advance(s, int(np.prod(shape)))
return arr
def unpack_torch(self, s: struct.Struct, shape: Tuple):
import torch
arr = self.unpack_numpy(s, shape)
return torch.from_numpy(arr)
def unpack_tensorflow(self, s: struct.Struct, shape: Tuple):
import tensorflow as tf
arr = self.unpack_numpy(s, shape)
return tf.constant(arr)
def unpack(self, s: struct.Struct):
unpack: tuple = s.unpack_from(self.buffer, self.read_offset)
self.advance(s)
if (len(unpack) == 1):
return unpack[0]
return unpack
def advance(self, s: struct.Struct, times=1):
self.read_offset += (s.size * times)
def unpack_str(self) -> str:
length: int = self.unpack(ConstStructs.ushort)
bytes_: bytes = self.unpack_f(('%ds' % length))
return bytes_.decode('utf-8') |
class AverageOfMaximumScoreEnsembler(PYODScoreEnsembler):
def __init__(self, n_buckets=5, method='static', bootstrap_estimators=False):
self.method = method
self.n_buckets = n_buckets
self.bootstrap_estimators = bootstrap_estimators
def _combine(self, scores):
return aom(scores, n_buckets=self.n_buckets, method=self.method, bootstrap_estimators=self.bootstrap_estimators) |
def calculate_matvec_accumulator_range(matrix, vec_dt):
min_weight = matrix.min()
max_weight = matrix.max()
perceptive_field_elems = matrix.shape[0]
min_input = vec_dt.min()
max_input = vec_dt.max()
acc_min = (perceptive_field_elems * min((min_weight * max_input), (min_weight * min_input), (max_weight * max_input), (max_weight * min_input)))
acc_max = (perceptive_field_elems * max((min_weight * max_input), (min_weight * min_input), (max_weight * max_input), (max_weight * min_input)))
return (acc_min, acc_max) |
def convert_path_to_npy(*, path='train_64x64', outfile='train_64x64.npy'):
assert isinstance(path, str), 'Expected a string input for the path'
assert os.path.exists(path), "Input path doesn't exist"
files = [f for f in listdir(path) if isfile(join(path, f))]
print('Number of valid images is:', len(files))
imgs = []
for i in tqdm(range(len(files))):
img = scipy.ndimage.imread(join(path, files[i]))
img = img.astype('uint8')
assert (np.max(img) <= 255)
assert (np.min(img) >= 0)
assert (img.dtype == 'uint8')
assert isinstance(img, np.ndarray)
imgs.append(img)
(resolution_x, resolution_y) = (img.shape[0], img.shape[1])
imgs = np.asarray(imgs).astype('uint8')
assert (imgs.shape[1:] == (resolution_x, resolution_y, 3))
assert (np.max(imgs) <= 255)
assert (np.min(imgs) >= 0)
print('Total number of images is:', imgs.shape[0])
print('All assertions done, dumping into npy file')
np.save(outfile, imgs) |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_it_iva(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
if (output_format in {'compact', 'standard'}):
result = ([iva.compact(val)] + result)
return result |
class IRBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_type='batch', use_se=True):
super(IRBlock, self).__init__()
norm_layer = build_norm(norm_type, dimension=2)
self.bn0 = norm_layer(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = norm_layer(planes)
self.prelu = nn.PReLU(num_parameters=planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self.use_se = use_se
if self.use_se:
self.se = SEBlock(planes)
def forward(self, x):
residual = x
out = self.bn0(x)
out = self.conv1(out)
out = self.bn1(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.use_se:
out = self.se(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
return out |
def acos_safe(x: Scalar, epsilon: Scalar=epsilon()) -> Scalar:
x_safe = Max(((- 1) + epsilon), Min((1 - epsilon), x))
return sympy.acos(x_safe) |
def eval_sighan2015_by_model(correct_fn, sighan_path=sighan_2015_path, verbose=True):
TP = 0.0
FP = 0.0
FN = 0.0
TN = 0.0
total_num = 0
start_time = time.time()
with open(sighan_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line.startswith('#'):
continue
parts = line.split('\t')
if (len(parts) != 2):
continue
src = parts[0]
tgt = parts[1]
r = correct_fn(src)
(tgt_pred, pred_detail) = (r['target'], r['errors'])
if verbose:
print()
print('input :', src)
print('truth :', tgt)
print('predict:', tgt_pred, pred_detail)
if (src == tgt):
if (tgt == tgt_pred):
TN += 1
print('right')
else:
FP += 1
print('wrong')
elif (tgt == tgt_pred):
TP += 1
print('right')
else:
FN += 1
print('wrong')
total_num += 1
spend_time = (time.time() - start_time)
acc = ((TP + TN) / total_num)
precision = ((TP / (TP + FP)) if (TP > 0) else 0.0)
recall = ((TP / (TP + FN)) if (TP > 0) else 0.0)
f1 = ((((2 * precision) * recall) / (precision + recall)) if ((precision + recall) != 0) else 0)
print(f'Sentence Level: acc:{acc:.4f}, precision:{precision:.4f}, recall:{recall:.4f}, f1:{f1:.4f}, cost time:{spend_time:.2f} s, total num: {total_num}')
return (acc, precision, recall, f1) |
def florisPw(u_stream, tis, xs, ys, yws):
if (curl == True):
fi.floris.farm.set_wake_model('curl')
fi.reinitialize_flow_field(wind_speed=u_stream)
fi.reinitialize_flow_field(turbulence_intensity=tis)
fi.reinitialize_flow_field(layout_array=[xs, ys])
fi.calculate_wake(yaw_angles=yws)
floris_power_0 = fi.get_farm_power()
return round((floris_power_0 / 1000000.0), 2) |
def StackedRNN(inners, num_layers, lstm=False, dropout=0, train=True):
num_directions = len(inners)
total_layers = (num_layers * num_directions)
def forward(input, hidden, weight, batch_sizes):
assert (len(weight) == total_layers)
next_hidden = []
if lstm:
hidden = list(zip(*hidden))
for i in range(num_layers):
all_output = []
for (j, inner) in enumerate(inners):
l = ((i * num_directions) + j)
(hy, output) = inner(input, hidden[l], weight[l], batch_sizes)
next_hidden.append(hy)
all_output.append(output)
input = torch.cat(all_output, (input.dim() - 1))
if ((dropout != 0) and (i < (num_layers - 1))):
input = F.dropout(input, p=dropout, training=train, inplace=False)
if lstm:
(next_h, next_c) = zip(*next_hidden)
next_hidden = (torch.cat(next_h, 0).view(total_layers, *next_h[0].size()), torch.cat(next_c, 0).view(total_layers, *next_c[0].size()))
else:
next_hidden = torch.cat(next_hidden, 0).view(total_layers, *next_hidden[0].size())
return (next_hidden, input)
return forward |
class ResizeShortestEdge(T.Augmentation):
def __init__(self, short_edge_length, max_size=sys.maxsize, sample_style='range', interp=Image.BILINEAR, clip_frame_cnt=1):
super().__init__()
assert (sample_style in ['range', 'choice', 'range_by_clip', 'choice_by_clip']), sample_style
self.is_range = ('range' in sample_style)
if isinstance(short_edge_length, int):
short_edge_length = (short_edge_length, short_edge_length)
if self.is_range:
assert (len(short_edge_length) == 2), f"short_edge_length must be two values using 'range' sample style. Got {short_edge_length}!"
self._cnt = 0
self._init(locals())
def get_transform(self, image):
if ((self._cnt % self.clip_frame_cnt) == 0):
if self.is_range:
self.size = np.random.randint(self.short_edge_length[0], (self.short_edge_length[1] + 1))
else:
self.size = np.random.choice(self.short_edge_length)
if (self.size == 0):
return NoOpTransform()
self._cnt = 0
self._cnt += 1
(h, w) = image.shape[:2]
scale = ((self.size * 1.0) / min(h, w))
if (h < w):
(newh, neww) = (self.size, (scale * w))
else:
(newh, neww) = ((scale * h), self.size)
if (max(newh, neww) > self.max_size):
scale = ((self.max_size * 1.0) / max(newh, neww))
newh = (newh * scale)
neww = (neww * scale)
neww = int((neww + 0.5))
newh = int((newh + 0.5))
return T.ResizeTransform(h, w, newh, neww, self.interp) |
class TestDivision(object):
def test_division_int(self):
x = np.array([5, 10, 90, 100, (- 5), (- 10), (- 90), (- 100), (- 120)])
if ((5 / 10) == 0.5):
assert_equal((x / 100), [0.05, 0.1, 0.9, 1, (- 0.05), (- 0.1), (- 0.9), (- 1), (- 1.2)])
else:
assert_equal((x / 100), [0, 0, 0, 1, (- 1), (- 1), (- 1), (- 1), (- 2)])
assert_equal((x // 100), [0, 0, 0, 1, (- 1), (- 1), (- 1), (- 1), (- 2)])
assert_equal((x % 100), [5, 10, 90, 0, 95, 90, 10, 0, 80])
def test_division_complex(self):
msg = 'Complex division implementation check'
x = np.array([(1.0 + (1.0 * 1j)), (1.0 + (0.5 * 1j)), (1.0 + (2.0 * 1j))], dtype=np.complex128)
assert_almost_equal(((x ** 2) / x), x, err_msg=msg)
msg = 'Complex division overflow/underflow check'
x = np.array([1e+110, 1e-110], dtype=np.complex128)
y = ((x ** 2) / x)
assert_almost_equal((y / x), [1, 1], err_msg=msg)
def test_zero_division_complex(self):
with np.errstate(invalid='ignore', divide='ignore'):
x = np.array([0.0], dtype=np.complex128)
y = (1.0 / x)
assert_(np.isinf(y)[0])
y = (complex(np.inf, np.nan) / x)
assert_(np.isinf(y)[0])
y = (complex(np.nan, np.inf) / x)
assert_(np.isinf(y)[0])
y = (complex(np.inf, np.inf) / x)
assert_(np.isinf(y)[0])
y = (0.0 / x)
assert_(np.isnan(y)[0])
def test_floor_division_complex(self):
msg = 'Complex floor division implementation check'
x = np.array([(0.9 + 1j), ((- 0.1) + 1j), (0.9 + (0.5 * 1j)), (0.9 + (2.0 * 1j))], dtype=np.complex128)
y = np.array([0.0, (- 1.0), 0.0, 0.0], dtype=np.complex128)
assert_equal(np.floor_divide((x ** 2), x), y, err_msg=msg)
msg = 'Complex floor division overflow/underflow check'
x = np.array([1e+110, 1e-110], dtype=np.complex128)
y = np.floor_divide((x ** 2), x)
assert_equal(y, [1e+110, 0], err_msg=msg)
def test_floor_division_signed_zero(self):
x = np.zeros(10)
assert_equal(np.signbit((x // 1)), 0)
assert_equal(np.signbit(((- x) // 1)), 1) |
def test_alias_delay_initialization1(capture):
class B(m.A):
def __init__(self):
super(B, self).__init__()
def f(self):
print('In python f()')
with capture:
a = m.A()
m.call_f(a)
del a
pytest.gc_collect()
assert (capture == 'A.f()')
with capture:
b = B()
m.call_f(b)
del b
pytest.gc_collect()
assert (capture == '\n PyA.PyA()\n PyA.f()\n In python f()\n PyA.~PyA()\n ') |
class RandomSplit(NamedTuple):
train_sentences: List[Sentence]
dev_sentences: List[Sentence]
test_sentences: List[Sentence] |
class PlyProperty(object):
def __init__(self, name, val_dtype):
_check_name(name)
self._name = str(name)
self.val_dtype = val_dtype
def _get_val_dtype(self):
return self._val_dtype
def _set_val_dtype(self, val_dtype):
self._val_dtype = _data_types[_lookup_type(val_dtype)]
val_dtype = property(_get_val_dtype, _set_val_dtype)
def name(self):
return self._name
def dtype(self, byte_order='='):
return (byte_order + self.val_dtype)
def _from_fields(self, fields):
return _np.dtype(self.dtype()).type(next(fields))
def _to_fields(self, data):
(yield _np.dtype(self.dtype()).type(data))
def _read_bin(self, stream, byte_order):
try:
return _read_array(stream, self.dtype(byte_order), 1)[0]
except IndexError:
raise StopIteration
def _write_bin(self, data, stream, byte_order):
_write_array(stream, _np.dtype(self.dtype(byte_order)).type(data))
def __str__(self):
val_str = _data_type_reverse[self.val_dtype]
return ('property %s %s' % (val_str, self.name))
def __repr__(self):
return ('PlyProperty(%r, %r)' % (self.name, _lookup_type(self.val_dtype))) |
def build_alphabet(data=None, names=None, name=None):
if ((name is not None) and ((data is not None) or (names is not None))):
raise ValueError('name cannot be specified with any other argument')
if (isinstance(names, (int, Integer)) or (names == Infinity) or ((data is None) and (names is not None))):
(data, names) = (names, data)
if isinstance(data, (int, Integer)):
if (names is None):
from sage.sets.integer_range import IntegerRange
return IntegerRange(Integer(data))
if isinstance(names, str):
return TotallyOrderedFiniteSet([(names + ('%d' % i)) for i in range(data)])
if (isinstance(names, collections.abc.Sequence) and (len(names) == data)):
return TotallyOrderedFiniteSet(names)
raise ValueError('invalid value for names')
if (data == Infinity):
data = NonNegativeIntegers()
if (isinstance(data, (tuple, list, str, range)) or (data in Sets())):
if (names is not None):
if (not isinstance(names, str)):
raise TypeError('names must be a string when data is a set')
return Family(data, (lambda i: (names + str(i))), name=names)
if (data in Sets()):
return data
return TotallyOrderedFiniteSet(data)
if (name is not None):
if (not isinstance(name, str)):
raise TypeError('name must be a string')
if ((name == 'positive integers') or (name == 'PP')):
from sage.sets.positive_integers import PositiveIntegers
return PositiveIntegers()
if ((name == 'natural numbers') or (name == 'NN')):
return NonNegativeIntegers()
data = []
for alpha_name in name.split(' '):
try:
data.extend(list(set_of_letters[alpha_name]))
except KeyError:
raise TypeError('name is not recognized')
return TotallyOrderedFiniteSet(data)
if (data is None):
from sage.sets.pythonclass import Set_PythonType
return Set_PythonType(object)
raise ValueError('unable to construct an alphabet from the given parameters') |
def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True, **kwargs):
best_backend = backend_manager.get_best_backend(path, 'save')
best_backend.imsave(path, img, channel_first=channel_first, as_uint16=as_uint16, auto_scale=auto_scale, **kwargs) |
_module()
class VeryDeepVgg(BaseModule):
def __init__(self, leaky_relu=True, input_channels=3, init_cfg=[dict(type='Xavier', layer='Conv2d'), dict(type='Uniform', layer='BatchNorm2d')]):
super().__init__(init_cfg=init_cfg)
ks = [3, 3, 3, 3, 3, 3, 2]
ps = [1, 1, 1, 1, 1, 1, 0]
ss = [1, 1, 1, 1, 1, 1, 1]
nm = [64, 128, 256, 256, 512, 512, 512]
self.channels = nm
cnn = Sequential()
def conv_relu(i, batch_normalization=False):
n_in = (input_channels if (i == 0) else nm[(i - 1)])
n_out = nm[i]
cnn.add_module('conv{0}'.format(i), nn.Conv2d(n_in, n_out, ks[i], ss[i], ps[i]))
if batch_normalization:
cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(n_out))
if leaky_relu:
cnn.add_module('relu{0}'.format(i), nn.LeakyReLU(0.2, inplace=True))
else:
cnn.add_module('relu{0}'.format(i), nn.ReLU(True))
conv_relu(0)
cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2))
conv_relu(1)
cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2))
conv_relu(2, True)
conv_relu(3)
cnn.add_module('pooling{0}'.format(2), nn.MaxPool2d((2, 2), (2, 1), (0, 1)))
conv_relu(4, True)
conv_relu(5)
cnn.add_module('pooling{0}'.format(3), nn.MaxPool2d((2, 2), (2, 1), (0, 1)))
conv_relu(6, True)
self.cnn = cnn
def out_channels(self):
return self.channels[(- 1)]
def forward(self, x):
output = self.cnn(x)
return output |
class SideObstacleSetBBreakoutWorld(RandomSideObstacleBreakoutWorld):
side_obstacle_width_range_start = 15
side_obstacle_width_range_end = 20 |
class FeatureWrapper(torch.utils.data.Dataset):
def __init__(self, data_source, feature_path):
self.data_source = data_source
self.features = torch.load(feature_path)
def __len__(self):
return len(self.data_source)
def __getitem__(self, idx):
item = self.data_source[idx]
feature = self.features[item['impath']]
output = {'img': feature, 'label': item['label'], 'classname': item['classname'], 'impath': item['impath'], 'idx': idx}
return output |
def count_parameters(model):
total_params = 0
for (name, parameter) in model.named_parameters():
if (not parameter.requires_grad):
continue
params = parameter.numel()
print(name, params)
total_params += params
print(f'Total Trainable Params: {total_params}')
return total_params |
class Runtime():
def __init__(self, worker_pool_factory):
self._get_worker_pool = worker_pool_factory
def inline(cls):
return cls(inline_pool_factory)
def run(self, query, args, combiner=union_combiner, randomize=True, chunksize=1, progress=False, profile=False, print_error=True):
with perf_count('Executing query in Runtime', enable=profile):
with _WorkerPoolContext(self._get_worker_pool(query)) as pool:
total_work = len(args)
with tqdm(total=total_work, disable=(not progress)) as pbar:
with perf_count('Executing in workers', enable=profile):
args_with_err = []
with perf_count('Dispatching tasks', enable=profile):
if randomize:
random.shuffle(args)
async_results = pool.map(_create_tasks(args, chunksize), _get_callback(pbar, args_with_err, print_error))
combined_result = None
for future in async_results:
try:
r = future.get()
except TaskException:
continue
if (combined_result is None):
combined_result = r
else:
combined_result = combiner(combined_result, r)
if ((combined_result is None) and (total_work > 0)):
raise RekallRuntimeException('All tasks failed!')
return (combined_result, args_with_err)
def get_result_iterator(self, query, args, randomize=True, chunksize=1, print_error=True, dispatch_size=mp.cpu_count()):
with _WorkerPoolContext(self._get_worker_pool(query)) as pool:
args_with_err = []
if randomize:
random.shuffle(args)
tasks = _create_tasks(args, chunksize)
if ((dispatch_size is None) or (dispatch_size <= 0)):
dispatch_size = len(tasks)
outstanding_tasks = tasks
async_results = []
num_finished_tasks = 0
while (num_finished_tasks < len(tasks)):
num_to_yield = len(async_results)
if ((num_to_yield <= (dispatch_size / 2)) and (len(outstanding_tasks) > 0)):
task_batch = outstanding_tasks[:dispatch_size]
outstanding_tasks = outstanding_tasks[dispatch_size:]
async_results.extend(pool.map(task_batch, _get_callback(None, args_with_err, print_error)))
if randomize:
future_to_yield = _pop_future_to_yield(async_results)
else:
future_to_yield = async_results.pop(0)
num_finished_tasks += 1
try:
r = future_to_yield.get()
except TaskException:
continue
(yield r)
if (len(args_with_err) > 0):
raise RekallRuntimeException('The following tasks failed: {0}'.format(args_with_err)) |
def are_projectively_equivalent(P, Q, base_ring):
from sage.matrix.constructor import matrix
return (matrix(base_ring, [P, Q]).rank() < 2) |
.xfail(_IS_WASM, reason='cannot start subprocess')
def test_imports_strategies():
good_import = '\n from sklearn.experimental import enable_halving_search_cv\n from sklearn.model_selection import HalvingGridSearchCV\n from sklearn.model_selection import HalvingRandomSearchCV\n '
assert_run_python_script(textwrap.dedent(good_import))
good_import_with_model_selection_first = '\n import sklearn.model_selection\n from sklearn.experimental import enable_halving_search_cv\n from sklearn.model_selection import HalvingGridSearchCV\n from sklearn.model_selection import HalvingRandomSearchCV\n '
assert_run_python_script(textwrap.dedent(good_import_with_model_selection_first))
bad_imports = "\n import pytest\n\n with pytest.raises(ImportError, match='HalvingGridSearchCV is experimental'):\n from sklearn.model_selection import HalvingGridSearchCV\n\n import sklearn.experimental\n with pytest.raises(ImportError, match='HalvingRandomSearchCV is experimental'):\n from sklearn.model_selection import HalvingRandomSearchCV\n "
assert_run_python_script(textwrap.dedent(bad_imports)) |
def script_model_defines_attr(script_model, attr):
script_attr = getattr(script_model, attr, None)
if (script_attr is None):
return False
default_attr = get_function_from_type(torch.jit.RecursiveScriptModule, attr)
if (default_attr is None):
return False
return (script_attr != default_attr) |
class CollectionNode(Node):
def __init__(self, tag, value, start_mark=None, end_mark=None, flow_style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style |
def do_structure(cfg):
if isinstance(cfg, CfgNode):
model = build_model(cfg)
else:
model = instantiate(cfg.model)
logger.info(('Model Structure:\n' + str(model))) |
def write_model_card(model_card_dir, src_lang, tgt_lang, model_name):
texts = {'en': "Machine learning is great, isn't it?", 'ru': ' - , ?', 'de': 'Maschinelles Lernen ist groartig, nicht wahr?'}
scores = {'wmt19-de-en-6-6-base': [0, 38.37], 'wmt19-de-en-6-6-big': [0, 39.9]}
pair = f'{src_lang}-{tgt_lang}'
readme = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- allenai
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt19 transformer]( for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](
2 models are available:
* [wmt19-de-en-6-6-big](
* [wmt19-de-en-6-6-base](
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](
## Eval results
Here are the BLEU scores:
model | transformers
-------|
{model_name} | {scores[model_name][1]}
The score was calculated using this code:
```bash
git clone
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](
- [test set](
### BibTeX entry and citation info
```
{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=True, exist_ok=True)
path = os.path.join(model_card_dir, 'README.md')
print(f'Generating {path}')
with open(path, 'w', encoding='utf-8') as f:
f.write(readme) |
def DrawGLScene():
glClear((GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT))
glLoadIdentity()
glutSwapBuffers() |
def step_decay(optimizer, step, lr, decay_step, gamma):
lr = (lr * (gamma ** (step / decay_step)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr |
def test_channel_first() -> None:
env = DummyAtari(grayscale=False)
assert env.observation_space.shape
(width, height, channel) = env.observation_space.shape
wrapper = ChannelFirst(env)
(observation, _) = wrapper.reset()
assert (observation.shape == (channel, width, height))
(observation, _, _, _, _) = wrapper.step(wrapper.action_space.sample())
assert (observation.shape == (channel, width, height))
dqn = DQNConfig().create()
dqn.build_with_env(wrapper)
dqn.predict(np.expand_dims(observation, axis=0)) |
class SemiSupervisedTrainingPlan(TrainingPlan):
def __init__(self, module: BaseModuleClass, n_classes: int, *, classification_ratio: int=50, lr: float=0.001, weight_decay: float=1e-06, n_steps_kl_warmup: Union[(int, None)]=None, n_epochs_kl_warmup: Union[(int, None)]=400, reduce_lr_on_plateau: bool=False, lr_factor: float=0.6, lr_patience: int=30, lr_threshold: float=0.0, lr_scheduler_metric: Literal[('elbo_validation', 'reconstruction_loss_validation', 'kl_local_validation')]='elbo_validation', **loss_kwargs):
super().__init__(module=module, lr=lr, weight_decay=weight_decay, n_steps_kl_warmup=n_steps_kl_warmup, n_epochs_kl_warmup=n_epochs_kl_warmup, reduce_lr_on_plateau=reduce_lr_on_plateau, lr_factor=lr_factor, lr_patience=lr_patience, lr_threshold=lr_threshold, lr_scheduler_metric=lr_scheduler_metric, **loss_kwargs)
self.loss_kwargs.update({'classification_ratio': classification_ratio})
self.n_classes = n_classes
def log_with_mode(self, key: str, value: Any, mode: str, **kwargs):
self.log(f'{mode}_{key}', value, **kwargs)
def compute_and_log_metrics(self, loss_output: LossOutput, metrics: dict[(str, ElboMetric)], mode: str):
super().compute_and_log_metrics(loss_output, metrics, mode)
if (loss_output.classification_loss is None):
return
classification_loss = loss_output.classification_loss
true_labels = loss_output.true_labels.squeeze()
logits = loss_output.logits
predicted_labels = torch.argmax(logits, dim=(- 1))
accuracy = tmf.classification.multiclass_accuracy(predicted_labels, true_labels, self.n_classes, average='micro')
f1 = tmf.classification.multiclass_f1_score(predicted_labels, true_labels, self.n_classes, average='micro')
ce = tmf.classification.multiclass_calibration_error(logits, true_labels, self.n_classes)
self.log_with_mode(METRIC_KEYS.CLASSIFICATION_LOSS_KEY, classification_loss, mode, on_step=False, on_epoch=True, batch_size=loss_output.n_obs_minibatch)
self.log_with_mode(METRIC_KEYS.ACCURACY_KEY, accuracy, mode, on_step=False, on_epoch=True, batch_size=loss_output.n_obs_minibatch)
self.log_with_mode(METRIC_KEYS.F1_SCORE_KEY, f1, mode, on_step=False, on_epoch=True, batch_size=loss_output.n_obs_minibatch)
self.log_with_mode(METRIC_KEYS.CALIBRATION_ERROR_KEY, ce, mode, on_step=False, on_epoch=True, batch_size=loss_output.n_obs_minibatch)
def training_step(self, batch, batch_idx):
if (len(batch) == 2):
full_dataset = batch[0]
labelled_dataset = batch[1]
else:
full_dataset = batch
labelled_dataset = None
if ('kl_weight' in self.loss_kwargs):
self.loss_kwargs.update({'kl_weight': self.kl_weight})
input_kwargs = {'feed_labels': False, 'labelled_tensors': labelled_dataset}
input_kwargs.update(self.loss_kwargs)
(_, _, loss_output) = self.forward(full_dataset, loss_kwargs=input_kwargs)
loss = loss_output.loss
self.log('train_loss', loss, on_epoch=True, batch_size=loss_output.n_obs_minibatch, prog_bar=True)
self.compute_and_log_metrics(loss_output, self.train_metrics, 'train')
return loss
def validation_step(self, batch, batch_idx):
if (len(batch) == 2):
full_dataset = batch[0]
labelled_dataset = batch[1]
else:
full_dataset = batch
labelled_dataset = None
input_kwargs = {'feed_labels': False, 'labelled_tensors': labelled_dataset}
input_kwargs.update(self.loss_kwargs)
(_, _, loss_output) = self.forward(full_dataset, loss_kwargs=input_kwargs)
loss = loss_output.loss
self.log('validation_loss', loss, on_epoch=True, batch_size=loss_output.n_obs_minibatch)
self.compute_and_log_metrics(loss_output, self.val_metrics, 'validation') |
def _train_test_metrics(args_namespace):
return train_test_metrics(args_namespace.train_dataset_path, args_namespace.test_dataset_path, args_namespace.output_path, args_namespace.config_path, args_namespace.exclude_slot_metrics, args_namespace.include_errors, args_namespace.verbosity) |
def test_load_csvs_folder_does_not_exist():
error_message = re.escape("The folder 'demo/' cannot be found.")
with pytest.raises(ValueError, match=error_message):
load_csvs('demo/') |
def apply_filters(sentence, filters):
for f in filters:
sentence = f(sentence)
return sentence |
(config_name='config', config_path='conf')
def main(cfg: DictConfig) -> None:
logging.info(('\n' + OmegaConf.to_yaml(cfg)))
train_device = _get_device(cfg.framework.gpu)
env_device = _get_device(cfg.framework.env_gpu)
logging.info(('Using training device %s.' % str(train_device)))
logging.info(('Using env device %s.' % str(env_device)))
gripper_mode = Discrete()
if (cfg.method.name == 'PathARM'):
arm_action_mode = TrajectoryActionMode(cfg.method.trajectory_points)
else:
arm_action_mode = EndEffectorPoseViaPlanning()
action_mode = MoveArmThenGripper(arm_action_mode, gripper_mode)
task_files = [t.replace('.py', '') for t in os.listdir(task.TASKS_PATH) if ((t != '__init__.py') and t.endswith('.py'))]
if (cfg.rlbench.task not in task_files):
raise ValueError(('Task %s not recognised!.' % cfg.rlbench.task))
task_class = task_file_to_task_class(cfg.rlbench.task)
cfg.rlbench.cameras = (cfg.rlbench.cameras if isinstance(cfg.rlbench.cameras, ListConfig) else [cfg.rlbench.cameras])
obs_config = _create_obs_config(cfg.rlbench.cameras, cfg.rlbench.camera_resolution)
env = CustomRLBenchEnv(task_class=task_class, observation_config=obs_config, action_mode=action_mode, dataset_root=cfg.rlbench.demo_path, episode_length=cfg.rlbench.episode_length, headless=True, time_in_state=True)
cwd = os.getcwd()
logging.info(('CWD:' + os.getcwd()))
existing_seeds = len(list(filter((lambda x: ('seed' in x)), os.listdir(cwd))))
for seed in range(existing_seeds, (existing_seeds + cfg.framework.seeds)):
logging.info(('Starting seed %d.' % seed))
run_seed(cfg, env, cfg.rlbench.cameras, train_device, env_device, seed) |
_utils.test(debug=True)
def test_assign_ann():
def func_ann():
a: ti.i32 = 1
b: ti.f32 = a
assert (a == 1)
assert (b == 1.0)
func_ann() |
class SeqCLDataset(th.utils.data.Dataset):
def __init__(self, data: Sequence):
super().__init__()
self.d = data
def __getitem__(self, node_id):
item = self.d.get_tokens(node_id)
neighbours = self.d.neighbours[node_id]
k = np.random.choice(neighbours, 1)
item = self.d.get_NB_tokens(item, k[0])
return item
def __len__(self):
return self.d.n_nodes |
def s_load(file_obj):
cur_elt = []
for line in file_obj:
if (line == b'\n'):
encoded_elt = b''.join(cur_elt)
try:
pickled_elt = base64.b64decode(encoded_elt)
elt = loads(pickled_elt)
except EOFError:
print('EOF found while unpickling data')
print(pickled_elt)
raise StopIteration
cur_elt = []
(yield elt)
else:
cur_elt.append(line) |
.parametrize('embedding_size,cross_num,hidden_size,sparse_feature_num', [(8, 0, (32,), 2), (8, 1, (32,), 2)])
def test_DCNMix(embedding_size, cross_num, hidden_size, sparse_feature_num):
model_name = 'DCN-Mix'
sample_size = SAMPLE_SIZE
(x, y, feature_columns) = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num)
model = DCNMix(linear_feature_columns=feature_columns, dnn_feature_columns=feature_columns, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5, device=get_device())
check_model(model, model_name, x, y) |
def lr_grad(w, X, y, lam=0):
y[(y == 0)] = (- 1)
z = torch.sigmoid((y * X.mv(w)))
return (X.t().mv(((z - 1) * y)) + ((lam * X.size(0)) * w)) |
def register_optimizer_class(cls, name=None):
_init_optimizer_classes_dict()
if (not name):
name = cls.__name__
_check_valid_optimizer(cls)
assert (name.lower() not in _OptimizerClassesDict)
_OptimizerClassesDict[name.lower()] = cls
if name.endswith('Optimizer'):
name = name[:(- len('Optimizer'))]
assert (name.lower() not in _OptimizerClassesDict)
_OptimizerClassesDict[name.lower()] = cls |
class AttnSkipUpBlock2D(nn.Module):
def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_pre_norm: bool=True, attn_num_head_channels=1, attention_type='default', output_scale_factor=np.sqrt(2.0), upsample_padding=1, add_upsample=True):
super().__init__()
self.attentions = nn.ModuleList([])
self.resnets = nn.ModuleList([])
self.attention_type = attention_type
for i in range(num_layers):
res_skip_channels = (in_channels if (i == (num_layers - 1)) else out_channels)
resnet_in_channels = (prev_output_channel if (i == 0) else out_channels)
self.resnets.append(ResnetBlock2D(in_channels=(resnet_in_channels + res_skip_channels), out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min((resnet_in_channels + (res_skip_channels // 4)), 32), groups_out=min((out_channels // 4), 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
self.attentions.append(AttentionBlock(out_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, eps=resnet_eps))
self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels)
if add_upsample:
self.resnet_up = ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min((out_channels // 4), 32), groups_out=min((out_channels // 4), 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, use_nin_shortcut=True, up=True, kernel='fir')
self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.skip_norm = torch.nn.GroupNorm(num_groups=min((out_channels // 4), 32), num_channels=out_channels, eps=resnet_eps, affine=True)
self.act = nn.SiLU()
else:
self.resnet_up = None
self.skip_conv = None
self.skip_norm = None
self.act = None
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, skip_sample=None):
for resnet in self.resnets:
res_hidden_states = res_hidden_states_tuple[(- 1)]
res_hidden_states_tuple = res_hidden_states_tuple[:(- 1)]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
hidden_states = resnet(hidden_states, temb)
hidden_states = self.attentions[0](hidden_states)
if (skip_sample is not None):
skip_sample = self.upsampler(skip_sample)
else:
skip_sample = 0
if (self.resnet_up is not None):
skip_sample_states = self.skip_norm(hidden_states)
skip_sample_states = self.act(skip_sample_states)
skip_sample_states = self.skip_conv(skip_sample_states)
skip_sample = (skip_sample + skip_sample_states)
hidden_states = self.resnet_up(hidden_states, temb)
return (hidden_states, skip_sample) |
(Output('anomaly-attribute-options', 'children'), Output('anomaly_exception_modal', 'is_open'), Output('anomaly_exception_modal_content', 'children'), [Input('anomaly-btn', 'n_clicks'), Input('anomaly_exception_modal_close', 'n_clicks')], [State('log-type-select', 'value'), State('attribute-name-options', 'value'), State('file-select', 'value'), State('parsing-algo-select', 'value'), State('vectorization-algo-select', 'value'), State('categorical-encoder-select', 'value'), State('ad-algo-select', 'value'), State('time-interval', 'value'), State('ad-param-table', 'children'), State('ad-parsing-param-table', 'children')])
def click_run(btn_click, modal_close, log_type, attributes, filename, parsing_algo, vectorization_algo, categorical_encoder, ad_algo, time_interval, ad_param_table, parsing_param_table):
ctx = dash.callback_context
if ctx.triggered:
prop_id = ctx.triggered[0]['prop_id'].split('.')[0]
if (prop_id == 'anomaly-btn'):
try:
interval_map = {0: '1s', 1: '1min', 2: '1h', 3: '1d'}
freq = interval_map[time_interval]
file_path = os.path.join(file_manager.base_directory, filename)
ad_params = log_anomaly_demo.parse_parameters(param_info=log_anomaly_demo.get_parameter_info(ad_algo), params={p['Parameter']: p['Value'] for p in ad_param_table['props']['data'] if p['Parameter']})
parsing_params = LogPattern().parse_parameters(param_info=LogPattern().get_parameter_info(parsing_algo), params={p['Parameter']: p['Value'] for p in parsing_param_table['props']['data'] if p['Parameter']})
config = _ad_config_sample()
config.open_set_data_loader_config.filepath = file_path
config.open_set_data_loader_config.dataset_name = log_type
config.feature_extractor_config.group_by_category = attributes
config.feature_extractor_config.group_by_time = freq
config.log_parser_config.parsing_algorithm = parsing_algo
config_class = LogPattern().get_config_class(parsing_algo)
config.log_parser_config.parsing_algo_params = config_class.from_dict(parsing_params)
config.log_vectorizer_config.algo_name = vectorization_algo
config.categorical_encoder_config.algo_name = categorical_encoder
config.anomaly_detection_config.algo_name = ad_algo
config_class = log_anomaly_demo.get_config_class(ad_algo)
config.anomaly_detection_config.algo_params = config_class.from_dict(ad_params)
log_anomaly_demo.execute_anomaly_detection(config)
return (create_attribute_component(log_anomaly_demo.get_attributes()), False, '')
except Exception as error:
return (html.Div(), True, str(error))
elif (prop_id == 'anomaly_exception_modal_close'):
return (html.Div(), False, '')
else:
return (html.Div(), False, '') |
def shift_stats_container(sc, num_of_shifting_factors):
shifting_factor = np.random.random(num_of_shifting_factors)
shifted_sc = shift_statistics(sc, shifting_factor)
return (shifted_sc, shifting_factor) |
def update_plot(policy, max_length=np.inf):
queue.put(['demo', policy.get_param_values(), max_length]) |
def init_params(net):
for m in net.modules():
if (isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d)):
init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias.data is not None):
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
if (m.bias.data is not None):
init.constant_(m.bias, 0) |
class VQVAE(tfk.Model):
def __init__(self, encoder, decoder, codebook_size, beta=0.25):
super(VQVAE, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.quantizer = VectorQuantizerEMA(codebook_size)
self.beta = beta
def quantize(self, x):
if (not self.built):
self(x)
z_e = self.encoder(x)
flat_z_e = tf.reshape(z_e, shape=((- 1), z_e.shape[(- 1)]))
codebook = self.quantizer.codebook
distances = ((tf.reduce_sum((flat_z_e ** 2), axis=1, keepdims=True) - (2 * tf.matmul(flat_z_e, codebook))) + tf.reduce_sum((codebook ** 2), axis=0, keepdims=True))
encoding_indices = tf.argmax((- distances), axis=1)
encoding_indices = tf.reshape(encoding_indices, tf.concat([tf.shape(z_e)[:(- 1)], [1]], axis=0))
return encoding_indices
def dequantize(self, x):
flat_x = tf.reshape(x, shape=((- 1), x.shape[(- 1)]))
codebook = tf.transpose(self.quantizer.codebook)
z_q = tf.nn.embedding_lookup(codebook, flat_x)
z_q = tf.reshape(z_q, tf.concat([tf.shape(x)[:(- 1)], [(- 1)]], axis=0))
x_rec = self.decoder(z_q)
return x_rec
def call(self, x, training=False):
z_e = self.encoder(x)
z_q = self.quantizer(z_e, training=training)
commitment_loss = tf.reduce_mean(tf.square((tf.stop_gradient(z_q) - z_e)))
self.add_loss((self.beta * commitment_loss))
x_rec = self.decoder(z_q)
return x_rec |
class PythonInliner(ast.NodeTransformer):
def __init__(self, target_id, target_ast):
self.target_id = target_id
self.target_ast = target_ast
def visit_Name(self, node: ast.AST):
if (node.id == self.target_id):
return ast.copy_location(self.target_ast, node)
else:
return self.generic_visit(node) |
def random_hex():
r = (lambda : np.random.randint(0, 255))
return ('#%02X%02X%02X' % (r(), r(), r())) |
def get_parent(config: Dict[(str, Any)]) -> Optional[str]:
if (config['load_from'] is None):
return None
return config['load_from'].rsplit('/', maxsplit=1)[0] |
class BaseTransformersCLICommand(ABC):
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
def run(self):
raise NotImplementedError() |
def xcorr_slow(x, kernel):
batch = x.size()[0]
out = []
for i in range(batch):
px = x[i]
pk = kernel[i]
px = px.view(1, px.size()[0], px.size()[1], px.size()[2])
pk = pk.view((- 1), px.size()[1], pk.size()[1], pk.size()[2])
po = F.conv2d(px, pk)
out.append(po)
out = torch.cat(out, 0)
return out |
def _map_slice_value_raw(v: Union[(None, slice, int, numpy.number, numpy.ndarray, Tensor[T])]) -> Union[(None, slice, int, numpy.number, T)]:
if (v is None):
return None
if isinstance(v, slice):
return slice(_map_slice_value_raw(v.start), _map_slice_value_raw(v.stop), _map_slice_value_raw(v.step))
if isinstance(v, (int, numpy.number)):
return v
if isinstance(v, numpy.ndarray):
assert (v.ndim <= 1), f'strided_slice: expect scalar or vector, got array with shape {v.shape}'
return v
if isinstance(v, Tensor):
assert (len(v.dims) <= 1), f'strided_slice: expect scalar or vector, got Tensor with dims {v.dims}'
return v.raw_tensor
raise TypeError(f'strided_slice: got unexpected value of type {type(v).__name__}') |
class SageNotebookInteractiveShell(SageShellOverride, InteractiveShell):
def init_display_formatter(self):
from sage.repl.rich_output.backend_ipython import BackendIPythonNotebook
backend = BackendIPythonNotebook()
backend.get_display_manager().switch_backend(backend, shell=self) |
_module()
class VFNet(SingleStageDetector):
'Implementation of `VarifocalNet\n (VFNet).<
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) |
def test_get_path_accessible(accessible_path, workspace_root):
workspace = Workspace(workspace_root, True)
full_path = workspace.get_path(accessible_path)
assert full_path.is_absolute()
assert full_path.is_relative_to(workspace_root) |
class ModelArguments():
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'})
resize_position_embeddings: Optional[bool] = field(default=None, metadata={'help': "Whether to automatically resize the position embeddings if `max_source_length` exceeds the model's position embeddings."})
from_pt: bool = field(default=False, metadata={'help': 'Whether to load the model checkpoint from .pt file'})
multiencoder_type: Optional[str] = field(default=None, metadata={'help': "Currently only 'bart' is supported"})
multiencoder_max_num_chunks: Optional[int] = field(default=None, metadata={'help': 'Max passages/encoders to use in multiencoder'})
multiencoder_stride: bool = field(default=False, metadata={'help': 'Whether to stride'}) |
def test_orthogonal_procrustes_checkfinite_exception():
np.random.seed(1234)
(m, n) = (2, 3)
A_good = np.random.randn(m, n)
B_good = np.random.randn(m, n)
for bad_value in (np.inf, (- np.inf), np.nan):
A_bad = A_good.copy()
A_bad[(1, 2)] = bad_value
B_bad = B_good.copy()
B_bad[(1, 2)] = bad_value
for (A, B) in ((A_good, B_bad), (A_bad, B_good), (A_bad, B_bad)):
assert_raises(ValueError, orthogonal_procrustes, A, B) |
def srwl_uti_cryst_ASF(_s, _mat='Si'):
fa = None
fa0 = 0
if (_mat == 'Si'):
fa = [6.2915, 2.4386, 3.0353, 32.3337, 1.9891, 0.6785, 1.541, 81.6937, 1.1407]
fa0 = 13.985
else:
raise Exception(strMatDataNotDefined)
s2 = (_s * _s)
if (s2 != 0):
f0 = (((((fa[0] * exp(((- fa[1]) * s2))) + (fa[2] * exp(((- fa[3]) * s2)))) + (fa[4] * exp(((- fa[5]) * s2)))) + (fa[6] * exp(((- fa[7]) * s2)))) + fa[8])
else:
f0 = fa0
return f0 |
def nparray(named_tensor):
proto = named_tensor.transformer_metadata.pop()
metadata = {'int_to_float': proto.int_to_float, 'int_list': proto.int_list, 'bool_list': proto.bool_list}
array_shape = tuple(metadata['int_list'])
flat_array = np.frombuffer(named_tensor.data_bytes, dtype=np.float32)
nparray = np.reshape(flat_array, newshape=array_shape, order='C')
return nparray |
class Table(object):
def __init__(self, dataset, version):
self.dataset = dataset
self.version = version
self.name = f'{self.dataset}_{self.version}'
L.info(f'start building data {self.name}...')
self.data = pd.read_pickle(((DATA_ROOT / self.dataset) / f'{self.version}.pkl'))
self.data_size_mb = ((self.data.values.nbytes / 1024) / 1024)
self.row_num = self.data.shape[0]
self.col_num = len(self.data.columns)
self.parse_columns()
L.info(f'build finished: {self}')
def parse_columns(self):
self.columns = OrderedDict([(col, Column(col, self.data[col])) for col in self.data.columns])
def __repr__(self):
return f'''Table {self.name} ({self.row_num} rows, {self.data_size_mb:.2f}MB, columns:
{os.linesep.join([repr(c) for c in self.columns.values()])})'''
def get_minmax_dict(self):
minmax_dict = {}
for (i, col) in enumerate(self.columns.values()):
minmax_dict[i] = (col.minval, col.maxval)
return minmax_dict
def normalize(self, scale=1):
data = copy.deepcopy(self.data)
for (cname, col) in self.columns.items():
data[cname] = (col.normalize(data[cname].values) * scale)
return data
def digitalize(self):
data = copy.deepcopy(self.data)
for (cname, col) in self.columns.items():
if is_categorical(col.dtype):
data[cname] = col.discretize(data[cname])
elif col.has_nan:
data[cname].fillna(0, inplace=True)
return data
def get_max_muteinfo_order(self):
order = []
max_entropy = float('-inf')
first_col = None
for c in self.columns.keys():
e = entropy(self.data[c].value_counts())
if (e > max_entropy):
first_col = c
max_entropy = e
assert (first_col is not None), (first_col, max_entropy)
order.append(first_col)
sep = '|'
chosen_data = (self.data[first_col].astype(str) + sep)
while (len(order) < self.col_num):
max_muinfo = float('-inf')
next_col = None
for c in self.columns.keys():
if (c in order):
continue
m = mutual_info_score(chosen_data, self.data[c])
if (m > max_muinfo):
next_col = c
max_muinfo = m
assert (next_col is not None), (next_col, max_entropy)
order.append(next_col)
chosen_data = ((chosen_data + sep) + self.data[next_col].astype(str))
return (order, [self.data.columns.get_loc(c) for c in order])
def get_muteinfo(self, digital_data=None):
data = (digital_data if (digital_data is not None) else self.digitalize())
muteinfo_dict = {}
for c1 in self.columns.keys():
muteinfo_dict[c1] = {}
for c2 in self.columns.keys():
if ((c1 != c2) and (c2 in muteinfo_dict)):
assert (c1 in muteinfo_dict[c2]), muteinfo_dict.keys()
muteinfo_dict[c1][c2] = muteinfo_dict[c2][c1]
else:
muteinfo_dict[c1][c2] = mutual_info_score(data[c1], data[c2])
return pd.DataFrame().from_dict(muteinfo_dict) |
def get_cpp_decl_type(typename, ensure_temp_safe=True):
if ensure_temp_safe:
typename = TEMP_SAFE_CPP_DECL_TYPE.get(typename, typename)
return typename |
class Fantasizer(GreedyAcquisitionFunctionBuilder[FantasizerModelOrStack]):
def __init__(self, base_acquisition_function_builder: Optional[(AcquisitionFunctionBuilder[SupportsPredictJoint] | SingleModelAcquisitionBuilder[SupportsPredictJoint])]=None, fantasize_method: str='KB'):
tf.debugging.Assert((fantasize_method in ['KB', 'sample']), [tf.constant([])])
if (base_acquisition_function_builder is None):
base_acquisition_function_builder = ExpectedImprovement()
if isinstance(base_acquisition_function_builder, SingleModelAcquisitionBuilder):
base_acquisition_function_builder = base_acquisition_function_builder.using(OBJECTIVE)
self._builder = base_acquisition_function_builder
self._fantasize_method = fantasize_method
self._base_acquisition_function: Optional[AcquisitionFunction] = None
self._fantasized_acquisition: Optional[AcquisitionFunction] = None
self._fantasized_models: Mapping[(Tag, (_fantasized_model | ModelStack[SupportsPredictJoint]))] = {}
def _update_base_acquisition_function(self, models: Mapping[(Tag, FantasizerModelOrStack)], datasets: Optional[Mapping[(Tag, Dataset)]]) -> AcquisitionFunction:
if (self._base_acquisition_function is not None):
self._base_acquisition_function = self._builder.update_acquisition_function(self._base_acquisition_function, models, datasets)
else:
self._base_acquisition_function = self._builder.prepare_acquisition_function(models, datasets)
return self._base_acquisition_function
def _update_fantasized_acquisition_function(self, models: Mapping[(Tag, FantasizerModelOrStack)], datasets: Optional[Mapping[(Tag, Dataset)]], pending_points: TensorType) -> AcquisitionFunction:
tf.debugging.assert_rank(pending_points, 2)
fantasized_data = {tag: _generate_fantasized_data(fantasize_method=self._fantasize_method, model=model, pending_points=pending_points) for (tag, model) in models.items()}
if (datasets is None):
datasets = fantasized_data
else:
datasets = {tag: (data + fantasized_data[tag]) for (tag, data) in datasets.items()}
if (self._fantasized_acquisition is None):
self._fantasized_models = {tag: _generate_fantasized_model(model, fantasized_data[tag]) for (tag, model) in models.items()}
self._fantasized_acquisition = self._builder.prepare_acquisition_function(cast(Dict[(Tag, SupportsPredictJoint)], self._fantasized_models), datasets)
else:
for (tag, model) in self._fantasized_models.items():
if isinstance(model, ModelStack):
observations = tf.split(fantasized_data[tag].observations, model._event_sizes, axis=(- 1))
for (submodel, obs) in zip(model._models, observations):
submodel.update_fantasized_data(Dataset(fantasized_data[tag].query_points, obs))
else:
model.update_fantasized_data(fantasized_data[tag])
self._builder.update_acquisition_function(self._fantasized_acquisition, cast(Dict[(Tag, SupportsPredictJoint)], self._fantasized_models), datasets)
return self._fantasized_acquisition
def prepare_acquisition_function(self, models: Mapping[(Tag, FantasizerModelOrStack)], datasets: Optional[Mapping[(Tag, Dataset)]]=None, pending_points: Optional[TensorType]=None) -> AcquisitionFunction:
for model in models.values():
if (not (isinstance(model, FantasizerModelType) or (isinstance(model, ModelStack) and all((isinstance(m, FantasizerModelType) for m in model._models))))):
raise NotImplementedError(f'Fantasizer only works with FastUpdateModel models that also support predict_joint, get_kernel and get_observation_noise, or with ModelStack stacks of such models; received {model.__repr__()}')
if (pending_points is None):
return self._update_base_acquisition_function(models, datasets)
else:
return self._update_fantasized_acquisition_function(models, datasets, pending_points)
def update_acquisition_function(self, function: AcquisitionFunction, models: Mapping[(Tag, FantasizerModelOrStack)], datasets: Optional[Mapping[(Tag, Dataset)]]=None, pending_points: Optional[TensorType]=None, new_optimization_step: bool=True) -> AcquisitionFunction:
if (pending_points is None):
return self._update_base_acquisition_function(models, datasets)
else:
return self._update_fantasized_acquisition_function(models, datasets, pending_points) |
class State(Borg):
def __init__(self, session: Optional[SparkSession]=None):
Borg.__init__(self)
if (not hasattr(self, 'logger_set')):
self.logger = logger_with_settings()
self.logger_set = True
if (session is None):
if (not hasattr(self, 'session')):
self.session = get_spark_session()
else:
self.session = session |
class DebugPrompts(Prompts):
def in_prompt_tokens(self, cli=None):
return [(Token.Prompt, 'debug: ')]
def continuation_prompt_tokens(self, cli=None, width=None):
return [(Token.Prompt, '.....: ')]
def rewrite_prompt_tokens(self):
return [(Token.Prompt, '-----> ')]
def out_prompt_tokens(self):
return [(Token.OutPrompt, '')] |
class IndexVocab(Configurable):
ROOT = 0
def __init__(self, *args, **kwargs):
super(IndexVocab, self).__init__(*args, **kwargs)
self.placeholder = None
def generate_placeholder(self):
if (self.placeholder is None):
self.placeholder = tf.placeholder(tf.int32, shape=[None, None], name=self.name)
return self.placeholder
def set_feed_dict(self, data, feed_dict):
feed_dict[self.placeholder] = data
return
def setup(self):
self.placeholder = None
return
def index(self, token):
return (0 if (token == '_') else int(token))
def depth(self):
return None
def conll_idx(self):
return self._conll_idx
def __getitem__(self, key):
if isinstance(key, basestring):
return int(key)
elif isinstance(key, (int, long, np.int32, np.int64)):
return str(key)
elif hasattr(key, '__iter__'):
return [self[k] for k in key]
else:
raise ValueError('key to BaseVocab.__getitem__ must be (iterable of) string or integer')
return |
class ResNetABN(nn.Module):
def __init__(self, block, layers, num_classes=10, num_bns=2, first_layer_conv=3):
self.inplanes = 64
self.num_bns = num_bns
self.num_classes = num_classes
super(ResNetABN, self).__init__()
self.conv1 = conv3x3(3, 64, kernel_size=first_layer_conv)
self.bn1 = MultiBatchNorm(64, self.num_bns)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1, num_bns=self.num_bns)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, num_bns=self.num_bns)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, num_bns=self.num_bns)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, num_bns=self.num_bns)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, blocks, stride=1, num_bns=2):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = TwoInputSequential(Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), MultiBatchNorm((planes * block.expansion), num_bns))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, num_bns=num_bns))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, num_bns=num_bns))
return TwoInputSequential(*layers)
def forward(self, x, rf=False, domain_label=None):
if (domain_label is None):
domain_label = (0 * torch.ones(x.shape[0], dtype=torch.long).cuda())
x = self.conv1(x)
(x, _) = self.bn1(x, domain_label)
x = F.relu(x)
(x, _) = self.layer1(x, domain_label)
(x, _) = self.layer2(x, domain_label)
(x, _) = self.layer3(x, domain_label)
(x, _) = self.layer4(x, domain_label)
x = self.avgpool(x)
feat = x.view(x.size(0), (- 1))
x = self.fc(feat)
if rf:
return (feat, x)
else:
return x |
.parametrize(['packet_params', 'expected_params'], [({'nu_line': 0.1, 'next_line_id': 0, 'is_last_line': True}, {'tardis_error': None, 'd_line': 1e+99}), ({'nu_line': 0.2, 'next_line_id': 1, 'is_last_line': False}, {'tardis_error': None, 'd_line': 7.e+17}), ({'nu_line': 0.5, 'next_line_id': 1, 'is_last_line': False}, {'tardis_error': utils.MonteCarloException, 'd_line': 0.0}), ({'nu_line': 0.6, 'next_line_id': 0, 'is_last_line': False}, {'tardis_error': utils.MonteCarloException, 'd_line': 0.0})])
def test_calculate_distance_line(packet_params, expected_params, static_packet, model):
nu_line = packet_params['nu_line']
is_last_line = packet_params['is_last_line']
time_explosion = model.time_explosion
doppler_factor = frame_transformations.get_doppler_factor(static_packet.r, static_packet.mu, time_explosion)
comov_nu = (static_packet.nu * doppler_factor)
d_line = 0
obtained_tardis_error = None
try:
d_line = calculate_distances.calculate_distance_line(static_packet, comov_nu, is_last_line, nu_line, time_explosion)
except utils.MonteCarloException:
obtained_tardis_error = utils.MonteCarloException
assert_almost_equal(d_line, expected_params['d_line'])
assert (obtained_tardis_error == expected_params['tardis_error']) |
def test_trainable_variables():
(trackable_layer, variables, modules, module_variables) = setup_layer_modules_variables()
all_vars = (variables + module_variables)
trainable_variables = [v for v in all_vars if v.trainable]
assert (to_tensor_set(trackable_layer.trainable_variables) == to_tensor_set(trainable_variables)) |
.skip(reason='Covered more efficiently by test_train.test_run_experiment')
def test_experiment_config_parser(tmp_path):
tmp_data_dir = (tmp_path / 'tmpdata')
cfg_fname = os.path.join(Config.get_dir(), 'experiments.json')
cfg = memcnn.experiment.factory.load_experiment_config(cfg_fname, ['cifar10', 'resnet110'])
memcnn.experiment.factory.experiment_config_parser(cfg, str(tmp_data_dir), workers=None) |
class NumberConverter(BaseConverter):
weight = 50
def __init__(self, map, fixed_digits=0, min=None, max=None, signed=False):
if signed:
self.regex = self.signed_regex
BaseConverter.__init__(self, map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
self.signed = signed
def to_python(self, value):
if (self.fixed_digits and (len(value) != self.fixed_digits)):
raise ValidationError()
value = self.num_convert(value)
if (((self.min is not None) and (value < self.min)) or ((self.max is not None) and (value > self.max))):
raise ValidationError()
return value
def to_url(self, value):
value = self.num_convert(value)
if self.fixed_digits:
value = (('%%0%sd' % self.fixed_digits) % value)
return str(value)
def signed_regex(self):
return ('-?' + self.regex) |
def spline_basis(pseudo: torch.Tensor, kernel_size: torch.Tensor, is_open_spline: torch.Tensor, degree: int) -> Tuple[(torch.Tensor, torch.Tensor)]:
return torch.ops.torch_spline_conv.spline_basis(pseudo, kernel_size, is_open_spline, degree) |
class Options():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.parser.add_argument('--dataset', default='cifar10', help='folder | cifar10 | mnist ')
self.parser.add_argument('--dataroot', default='', help='path to dataset')
self.parser.add_argument('--batchsize', type=int, default=64, help='input batch size')
self.parser.add_argument('--workers', type=int, help='number of data loading workers', default=8)
self.parser.add_argument('--droplast', action='store_true', default=True, help='Drop last batch size.')
self.parser.add_argument('--isize', type=int, default=32, help='input image size.')
self.parser.add_argument('--nc', type=int, default=3, help='input image channels')
self.parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
self.parser.add_argument('--ngf', type=int, default=64)
self.parser.add_argument('--ndf', type=int, default=64)
self.parser.add_argument('--extralayers', type=int, default=0, help='Number of extra layers on gen and disc')
self.parser.add_argument('--device', type=str, default='gpu', help='Device: gpu | cpu')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment')
self.parser.add_argument('--model', type=str, default='ganomaly', help='chooses which model to use. ganomaly')
self.parser.add_argument('--display_server', type=str, default=' help='visdom server of the web display')
self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
self.parser.add_argument('--display_id', type=int, default=0, help='window id of the web display')
self.parser.add_argument('--display', action='store_true', help='Use visdom.')
self.parser.add_argument('--outf', default='./output', help='folder to output images and model checkpoints')
self.parser.add_argument('--manualseed', default=(- 1), type=int, help='manual seed')
self.parser.add_argument('--abnormal_class', default='car', help='Anomaly class idx for mnist and cifar datasets')
self.parser.add_argument('--proportion', type=float, default=0.1, help='Proportion of anomalies in test set.')
self.parser.add_argument('--metric', type=str, default='roc', help='Evaluation metric.')
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
self.parser.add_argument('--save_image_freq', type=int, default=100, help='frequency of saving real and fake images')
self.parser.add_argument('--save_test_images', action='store_true', help='Save test images for demo.')
self.parser.add_argument('--load_weights', action='store_true', help='Load the pretrained weights')
self.parser.add_argument('--resume', default='', help='path to checkpoints (to continue training)')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--iter', type=int, default=0, help='Start from iteration i')
self.parser.add_argument('--niter', type=int, default=15, help='number of epochs to train for')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
self.parser.add_argument('--w_adv', type=float, default=1, help='Adversarial loss weight')
self.parser.add_argument('--w_con', type=float, default=50, help='Reconstruction loss weight')
self.parser.add_argument('--w_enc', type=float, default=1, help='Encoder loss weight.')
self.isTrain = True
self.opt = None
def parse(self):
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
self.opt.gpu_ids.append(id)
if (self.opt.device == 'gpu'):
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
if (self.opt.name == 'experiment_name'):
self.opt.name = ('%s/%s' % (self.opt.model, self.opt.dataset))
expr_dir = os.path.join(self.opt.outf, self.opt.name, 'train')
test_dir = os.path.join(self.opt.outf, self.opt.name, 'test')
if (not os.path.isdir(expr_dir)):
os.makedirs(expr_dir)
if (not os.path.isdir(test_dir)):
os.makedirs(test_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(' Options \n')
for (k, v) in sorted(args.items()):
opt_file.write(('%s: %s\n' % (str(k), str(v))))
opt_file.write(' End \n')
return self.opt |
def resize_target(target, size):
new_target = np.zeros((target.shape[0], size, size), np.int32)
for (i, t) in enumerate(target.numpy()):
new_target[(i, ...)] = cv2.resize(t, ((size,) * 2), interpolation=cv2.INTER_NEAREST)
return torch.from_numpy(new_target).long() |
def _get_test_keep_instance_predicate(cfg: CfgNode):
general_keep_predicate = _maybe_create_general_keep_instance_predicate(cfg)
return general_keep_predicate |
class ConvTBC(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=0):
super(ConvTBC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _single(kernel_size)
self.padding = _single(padding)
self.weight = torch.nn.Parameter(torch.Tensor(self.kernel_size[0], in_channels, out_channels))
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_normal_(self.weight)
nn.init.zeros_(self.bias)
def conv_tbc(self, input: Tensor):
return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding[0])
def forward(self, input: Tensor):
return self.conv_tbc(input)
def __repr__(self):
s = '{name}({in_channels}, {out_channels}, kernel_size={kernel_size}, padding={padding}'
if (self.bias is None):
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__) |
class _TensorMixin(_TensorMixinBase):
def from_tensor(x) -> Tensor:
assert x.get_shape().is_fully_defined()
x_shape = x.get_shape().as_list()
return _t.Tensor(name=str(x.op.name), shape=x_shape, batch_dim_axis=None, dtype=x.dtype.name, placeholder=x)
def template_from_constant(x, name, dtype=None, shape=None, with_batch_dim=False, sparse_dim=None, feature_dim=None) -> Tensor:
import numpy
if (dtype is None):
if isinstance(x, bool):
dtype = 'bool'
elif isinstance(x, int):
dtype = 'int32'
elif isinstance(x, float):
dtype = 'float32'
elif isinstance(x, numpy.ndarray):
dtype = str(x.dtype)
else:
raise TypeError(('%r: cannot handle value %r of type %r' % (name, x, type(x))))
shape_ = (x.shape if isinstance(x, numpy.ndarray) else ())
if (shape is not None):
if (len(shape) > len(shape_) == 0):
pass
else:
assert (len(shape) == len(shape_)), ('%r: shape does not match in ndim, %r vs %r' % (name, shape, shape_))
else:
shape = shape_
dim_tags = []
for (i, d) in enumerate(shape):
d_ = (shape_[i] if (len(shape_) > 0) else None)
if isinstance(d, Dim):
if (len(shape_) > 0):
assert (d.dimension == d_)
elif isinstance(d, int):
if (len(shape_) > 0):
assert (d == d_)
d = Dim(kind=(Dim.Types.Spatial if (i < (len(shape) - 1)) else Dim.Types.Feature), description=('%s:static:%i' % (name, i)), auto_generated=True, dimension=d)
else:
raise TypeError(('%r shape[%i] invalid type %r in shape %r' % (name, i, type(d), shape)))
dim_tags.append(d)
if (with_batch_dim and (batch_dim not in dim_tags)):
dim_tags.insert(0, batch_dim)
return _t.Tensor(name=name, dim_tags=dim_tags, dtype=dtype, sparse_dim=sparse_dim, feature_dim=feature_dim)
def _handle_extra_kwargs(self, *, shape=None, sparse=None, dim=NotSpecified, batch_dim_axis=NotSpecified, dim_tags=None, placeholder=None, size_placeholder=None, auto_create_placeholders=False, vocab=None, same_dim_tags_as=None, **kwargs):
assert isinstance(self, _t.Tensor)
(shape, sparse, dim, batch_dim_axis, dim_tags)
if (vocab is not None):
from returnn.datasets.util.vocabulary import Vocabulary
if isinstance(vocab, str):
vocab = Vocabulary(vocab)
elif isinstance(vocab, dict):
vocab = Vocabulary.create_vocab(**vocab)
assert isinstance(vocab, Vocabulary)
assert self.sparse, ('%s should represent indices of %s' % (self, vocab))
assert (self.dim == vocab.num_labels), ('%s dims do not match with vocab %s' % (self, vocab))
self.sparse_dim.vocab = vocab
if kwargs:
self._extra = _TensorExtra(tensor=self, **kwargs)
if size_placeholder:
self.size_placeholder = size_placeholder
if same_dim_tags_as:
for (_axis, _dim_tag) in sorted(same_dim_tags_as.items()):
_axis = self.get_axis_from_description(_axis)
assert isinstance(_dim_tag, Dim)
base_tag = self._dims[_axis]
if (base_tag != _dim_tag):
base_tag.declare_same_as(_dim_tag)
self._dims = ((self._dims[:_axis] + (_dim_tag,)) + self._dims[(_axis + 1):])
if (placeholder is not None):
self.raw_tensor = placeholder
elif auto_create_placeholders:
from returnn.tf.frontend_low_level._backend import TFBackend
self.raw_tensor = TFBackend.create_placeholder_raw(self)
_auto_create_size_placeholders_on_dim_tags(name=self.name, dim_tags=self._dims)
self._adapt_batch_consistent_dim_tags()
self.sanity_check(assume_complete=False)
def _raw_backend(self) -> Optional[Type[Backend]]:
import returnn.frontend._backend as _backend_api
if (self._raw_tensor is None):
return None
return _backend_api.get_backend_by_raw_tensor_type(type(self._raw_tensor))
def control_flow_ctx(self) -> Optional[ControlFlowContext]:
if (not self._extra):
return None
return self._extra.control_flow_ctx
_flow_ctx.setter
def control_flow_ctx(self, value: Optional[ControlFlowContext]):
if (value == self.control_flow_ctx):
return
self._make_extra().control_flow_ctx = value
def available_for_inference(self) -> bool:
if (not self._extra):
return True
return self._extra.available_for_inference
_for_inference.setter
def available_for_inference(self, value: bool):
if (value == self.available_for_inference):
return
self._make_extra().available_for_inference = value
def _make_extra(self: Tensor) -> _TensorExtra:
if (not self._extra):
self._extra = _TensorExtra(tensor=self)
return self._extra
def sanity_check(self, ignore_placeholder=False, assume_complete=True):
special_axes_dict = {'time_dim_axis': self.time_dim_axis, 'feature_dim_axis': self.feature_dim_axis}
batch_dim_axis = self.batch_dim_axis
batch_ndim = self.batch_ndim
for (axis_name, axis) in special_axes_dict.items():
assert ((axis is None) or (0 <= axis < batch_ndim)), ('%s: axis %s (%i) invalid' % (self, axis_name, axis))
if (batch_dim_axis is not None):
for (axis_name, axis) in special_axes_dict.items():
assert (axis != batch_dim_axis), ('%s: axis %s (%i) must be different from batch_dim_axis (%i)' % (self, axis_name, axis, batch_dim_axis))
if (self.sparse_dim is not None):
assert (special_axes_dict['feature_dim_axis'] is None), ('%s: If sparse, there cannot be a feature dim axis.' % self)
for (axis, tag) in enumerate(self._dims):
if tag.is_batch_dim():
assert (axis == batch_dim_axis), ('%s: invalid %s' % (self, tag))
continue
if (tag.batch and self.batch):
assert ((tag.batch == self.batch) or self.batch.is_broadcast())
if tag.dyn_size_ext:
assert (tag.dyn_size_ext.dtype in {'int32', 'int64'})
if tag.dyn_size_ext.have_batch_axis():
assert (tag.batch == tag.dyn_size_ext.batch)
if ((not ignore_placeholder) and (self._raw_tensor is not None)):
backend = self._raw_backend
raw_shape = backend.get_known_shape_raw(self._raw_tensor)
assert (len(raw_shape) == batch_ndim), f'Mismatching shape ndim: Raw tensor {raw_shape} vs Tensor {self}'
for i in range(batch_ndim):
if (self._dims[i].dimension is None):
continue
if (raw_shape[i] != self._dims[i].dimension):
raise Exception((f'''Mismatching shape: Raw tensor {raw_shape} vs Tensor {self};
''' + backend.format_graph_output(self._raw_tensor, max_depth=3)))
backend.set_known_shape_raw(self._raw_tensor, self.batch_shape)
assert (backend.get_dtype_name_raw(self._raw_tensor) == self.dtype), f'{self} dtype {self.dtype} does not match raw tensor dtype {backend.get_dtype_name_raw(self._raw_tensor)}'
if assume_complete:
for tag in self._dims:
if tag.is_batch_dim():
continue
if tag.is_dynamic():
assert tag.dyn_size_ext, ('%s sanity_check: dynamic dim %s undefined' % (self, tag))
if (not ignore_placeholder):
if (tag.dyn_size_ext.placeholder is None):
tag.complete_dyn_size()
if (self.placeholder is not None):
assert (tag.dyn_size_ext.placeholder is not None), ('%s sanity_check: dynamic dim %s value unknown' % (self, tag))
assert tag.is_dim_known()
def get_runtime_sanity_check_op(self: Tensor):
assert (self._raw_tensor is not None)
return self._raw_backend.runtime_sanity_checks(self)
def verify_out_shape(self, out_shape, allow_missing_implicit_dims=False):
actual_dims_str = ('{%s}' % ', '.join([str(d) for d in (list(self.dim_tags) + sorted(self.dim_tags_set_implicit_only_wrapped))]))
expected_dims_str = ('{%s}' % ', '.join([str(d) for d in sorted(out_shape)]))
self_dim_tags = self.dim_tags_set_implicit
self_dim_tags_implicit_only = self.dim_tags_set_implicit_only_wrapped
if (not out_shape):
if self_dim_tags:
raise VerifyOutShapeException((('%s verify_out_shape:\n' % self) + ('Actual dims: %s\nExpected empty out_shape: %s' % (actual_dims_str, expected_dims_str))))
return
if (not isinstance(out_shape, set)):
raise TypeError(('%s verify_out_shape: expects a set but got %s' % (self, type(out_shape))))
remaining = set(self_dim_tags)
for dim in out_shape:
if isinstance(dim, Dim):
dim_tag = dim
elif isinstance(dim, _m.ImplicitDim):
dim_tag = dim.tag
if (dim not in self_dim_tags_implicit_only):
raise VerifyOutShapeException(('%s verify_out_shape:\nActual dims: %s\nExpected out_shape: %s\n%s is not an implicit dim in self' % (self, actual_dims_str, expected_dims_str, dim)))
elif isinstance(dim, _m.OptionalDim):
dim_tag = dim.tag
if (dim_tag not in remaining):
continue
else:
raise TypeError(('%s verify_out_shape with out_shape %s: expect dim tags but got %s' % (self, out_shape, type(dim))))
if (dim_tag not in remaining):
if (dim_tag in self_dim_tags):
raise VerifyOutShapeException(((('%s verify_out_shape does not match:\n' % self) + ('Actual dims: %s\nExpected out_shape: %s\n' % (actual_dims_str, expected_dims_str))) + ('Dim %s multiple times in out_shape' % dim)))
raise VerifyOutShapeException(((('%s verify_out_shape:\n' % self) + ('Actual dims: %s\nExpected out_shape: %s\n' % (actual_dims_str, expected_dims_str))) + ('Dim %s not in self' % dim)))
remaining.discard(dim_tag)
if remaining:
if (allow_missing_implicit_dims and remaining.issubset(self.dim_tags_set_implicit_only)):
pass
else:
raise VerifyOutShapeException(((('%s verify_out_shape missing dims:\n' % self) + ('Actual dims: %s\nExpected out_shape: %s\n' % (actual_dims_str, expected_dims_str))) + ('Missing dims: %s' % ', '.join(map(str, sorted(remaining))))))
def get_placeholder_kwargs(self, with_batch=True):
return dict(name=self.name, dtype=self.dtype, shape=(self.batch_shape if with_batch else self.shape))
def get_axes_with_size(self):
return [i for (i, dim) in enumerate(self.shape) if (dim is None)]
def get_kwargs(self, *, include_special_axes=True):
keys = ['name', 'dims', 'dtype']
if include_special_axes:
if ((self.version <= 1) and (self.time_dim_axis_or_unspecified is not NotSpecified)):
keys += ['time_dim_axis']
if (self.feature_dim_axis_or_unspecified is not NotSpecified):
keys += ['feature_dim_axis']
if self.sparse_dim:
keys += ['sparse_dim']
if (self.version == 1):
keys += ['version']
if self._extra:
if (self.batch is not None):
keys += ['batch']
if (self.beam is not None):
keys += ['beam']
if self.control_flow_ctx:
keys += ['control_flow_ctx']
if (not self.available_for_inference):
keys += ['available_for_inference']
return {key: getattr(self, key) for key in keys}
def get_description(self, with_name=True, with_placeholder=False, catch_exceptions=False):
keys = []
if self.sparse:
keys.append('dtype')
keys.append('sparse_dim')
elif (self.dtype != 'float32'):
keys.append('dtype')
if with_placeholder:
keys.append('placeholder')
if (not self.available_for_inference):
keys.append('available_for_inference')
if (self.beam is not None):
if ((not self.batch) or (self.batch.beam != self.beam)):
keys.append('beam')
args = []
if with_name:
name = getattr(self, 'name', None)
args += [(repr(name) if name else '<undefined>')]
try:
batch_shape_meta = ('[%s]' % ','.join(self.get_batch_axes_short_description()))
except Exception as exc:
if catch_exceptions:
batch_shape_meta = ('<!%s: %s>' % (type(exc).__name__, exc))
else:
raise
args += [batch_shape_meta]
for key in keys:
try:
value_repr = repr(getattr(self, key))
except Exception as exc:
if catch_exceptions:
value_repr = ('<!%s: %s>' % (type(exc).__name__, exc))
else:
raise
args += [('%s=%s' % (key, value_repr))]
if self.control_flow_ctx:
try:
value_repr = self.control_flow_ctx.repr_inner()
except Exception as exc:
if catch_exceptions:
value_repr = ('<!%s: %s>' % (type(exc).__name__, exc))
else:
raise
args += [('ctx=' + value_repr)]
return ('Tensor{%s}' % ', '.join(args))
def get_batch_axes_short_description(self, special_axes=True):
res = []
for (axis, dim_tag) in enumerate(self.dim_tags):
descriptions = []
if (axis == self.batch_dim_axis):
if self.batch:
descriptions.append(self.batch.short_repr())
else:
descriptions.append('B?')
if special_axes:
if (axis == self.time_dim_axis):
descriptions.append('T')
if (axis == self.feature_dim_axis):
descriptions.append('F')
if (self.batch_shape[axis] is None):
if (axis == self.batch_dim_axis):
pass
else:
descriptions.append(dim_tag.short_repr())
elif ((axis != self.batch_dim_axis) or (not self.batch)):
descriptions.append(dim_tag.short_repr())
res.append(('|'.join(descriptions) or '?'))
return res
def get_compare_key(self):
return (self.dtype, self.shape, self.batch_dim_axis, self.feature_dim_axis, self.time_dim_axis, self.dim_tags, self.batch, self.beam)
def __repr__(self):
return self.get_description(catch_exceptions=True)
def __hash__(self):
return id(self)
def _sis_hash(self):
if ((self.raw_tensor is not None) and hasattr(self.raw_tensor, '_sis_hash')):
return self.raw_tensor._sis_hash()
from sisyphus.hash import sis_hash_helper
return sis_hash_helper(self.get_kwargs())
def __getstate__(self):
d = {k: getattr(self, k) for k in self.__slots__}
d['_raw_tensor'] = None
return d
def __setstate__(self, state):
for (k, v) in state.items():
setattr(self, k, v)
def reset(self: Tensor):
self._raw_tensor = None
self.batch = None
def _adapt_batch_consistent_dim_tags(self):
if (not self._extra):
return
if (not self.batch):
return
dims = tuple((tag.get_for_batch_ctx(batch=self.batch, ctx=self.control_flow_ctx) for tag in self._dims))
assert all(dims)
dims: Tuple[(Dim, ...)]
self._dims = dims
def copy(self, name: Optional[str]=None) -> _t.Tensor:
data = _t.Tensor(**self.get_kwargs())
data._raw_tensor = self._raw_tensor
if name:
data.name = name
return data
def copy_as_batch_major(self) -> _t.Tensor:
return self.copy_with_batch_dim_axis(0)
def copy_as_time_major(self) -> _t.Tensor:
assert (self.time_dim_axis is not None)
return self.copy_with_time_dim_axis(0)
def copy_with_batch_dim_axis(self, batch_dim_axis) -> _t.Tensor:
assert (self.batch_dim_axis is not None)
return self.copy_move_axis(self.batch_dim_axis, batch_dim_axis)
def copy_with_time_dim_axis(self, time_dim_axis) -> _t.Tensor:
assert (self.time_dim_axis is not None)
return self.copy_move_axis(self.time_dim_axis, time_dim_axis)
def copy_transpose(self: Tensor, perm: Sequence[Union[(int, Dim)]], *, allow_int: bool=True) -> _t.Tensor:
assert (len(perm) == len(self._dims)), f'{self}: invalid perm {perm!r} length'
if (not perm):
return self.copy()
if (allow_int and isinstance(perm[0], int)):
assert all((isinstance(a, int) for a in perm)), f'{self}: invalid perm {perm!r} types'
assert (set(perm) == set(range(len(perm)))), f'{self}: invalid perm {perm!r}'
return self._copy_compatible_to_dims_with_perm([self._dims[i] for i in perm], perm)
else:
assert all((isinstance(a, Dim) for a in perm)), f'{self}: invalid perm {perm!r} types'
return self.copy_compatible_to_dims(perm)
def copy_move_axis(self, old_axis, new_axis) -> _t.Tensor:
if (old_axis < 0):
old_axis += self.batch_ndim
assert (old_axis >= 0)
assert (0 <= old_axis < self.batch_ndim)
if (new_axis < 0):
new_axis += self.batch_ndim
assert (new_axis >= 0)
assert (0 <= new_axis < self.batch_ndim)
if (old_axis == new_axis):
return self.copy()
perm = list(range(self.batch_ndim))
old = perm.pop(old_axis)
perm.insert(new_axis, old)
return self.copy_transpose(perm)
def copy_swap_axes(self, axis1, axis2) -> _t.Tensor:
if (axis1 < 0):
axis1 += self.batch_ndim
assert (0 <= axis1 < self.batch_ndim)
if (axis2 < 0):
axis2 += self.batch_ndim
assert (0 <= axis2 < self.batch_ndim)
if (axis1 == axis2):
return self.copy()
perm = list(range(self.batch_ndim))
(perm[axis1], perm[axis2]) = (perm[axis2], perm[axis1])
return self.copy_transpose(perm)
def copy_as_bt_or_tb_major(self) -> _t.Tensor:
assert (self.have_batch_axis() and self.have_time_axis())
if (self.batch_dim_axis == 0):
return self.copy_with_time_dim_axis(1)
if (self.time_dim_axis == 0):
return self.copy_with_batch_dim_axis(1)
if (self.batch_dim_axis > self.time_dim_axis):
return self.copy_as_time_major().copy_as_bt_or_tb_major()
return self.copy_as_batch_major().copy_as_bt_or_tb_major()
def copy_with_feature_dim_axis(self, feature_dim_axis) -> _t.Tensor:
assert (self.feature_dim_axis is not None)
return self.copy_move_axis(self.feature_dim_axis, feature_dim_axis)
def copy_as_batch_feature_major(self) -> _t.Tensor:
assert (self.batch_dim_axis is not None)
assert (self.feature_dim_axis is not None)
data = self.copy_as_batch_major()
data = data.copy_with_feature_dim_axis(1)
return data
def copy_as_time_batch_major(self) -> _t.Tensor:
assert (self.have_batch_axis() and self.have_time_axis())
data = self.copy_as_bt_or_tb_major()
if (data.time_dim_axis == 1):
data = data.copy_move_axis(0, 1)
return data
def copy_as_batch_spatial_major(self) -> _t.Tensor:
data = self.copy_as_batch_major()
if (data.feature_dim_axis is not None):
data = data.copy_with_feature_last()
if data.size_placeholder:
for (i, (j, size)) in enumerate(sorted(data.size_placeholder.items())):
data = data.copy_move_axis(data.get_batch_axis(j), (i + 1))
if (data.feature_dim_axis is not None):
assert (data.feature_dim_axis == (data.batch_ndim - 1))
if (data.feature_dim_axis_or_unspecified is not NotSpecified):
if (data._default_feature_dim_axis() == data.feature_dim_axis):
data.feature_dim_axis = NotSpecified
return data
def copy_with_feature_last(self) -> _t.Tensor:
assert (self.feature_dim_axis is not None)
return self.copy_with_feature_dim_axis((- 1))
def copy_add_batch_dim(self, batch_dim_axis, batch=None, dim_tag=None) -> _t.Tensor:
if self.have_batch_axis():
raise Exception(f'{self} copy_add_batch_dim: already has batch-dim at axis {self.batch_dim_axis}, cannot add tag {dim_tag!r}')
assert (self.batch_dim_axis is None)
if (batch_dim_axis < 0):
assert (((batch_dim_axis + self.batch_ndim) + 1) >= 0)
batch_dim_axis += (self.batch_ndim + 1)
assert (0 <= batch_dim_axis <= self.batch_ndim)
data_opts = self.get_kwargs(include_special_axes=False)
placeholder = self.placeholder
if (placeholder is not None):
backend = self._raw_backend
placeholder = backend.expand_dims_raw(placeholder, batch_dim_axis)
if batch:
batch_dim_ = batch.dim
elif dim_tag:
if dim_tag.dyn_size_ext:
assert (dim_tag.dyn_size_ext.dims == ())
assert (dim_tag.dyn_size_ext.raw_tensor is not None)
batch_dim_ = dim_tag.dyn_size_ext.raw_tensor
elif dim_tag.dimension:
batch_dim_ = dim_tag.dimension
else:
raise Exception(f'{self} copy_add_batch_dim: unknown batch dim for {dim_tag!r}')
else:
raise Exception(f'{self} copy_add_batch_dim: unknown batch dim ')
if ((not isinstance(batch_dim_, int)) or (batch_dim_ != 1)):
placeholder = backend.expand_raw(placeholder, batch_dim_axis, batch_dim_)
dim_tags = list(self.dim_tags)
if dim_tag:
assert dim_tag.is_batch_dim()
assert (dim_tag.batch == batch)
if batch:
assert ((dim_tag.dimension == batch.static_dim) or (dim_tag.dimension is None))
elif batch:
dim_tag = batch.batch_dim_tag
else:
dim_tag = Dim(kind=Dim.Types.Batch, description='batch', dimension=(batch.static_dim if batch else None), batch=batch)
dim_tags.insert(batch_dim_axis, dim_tag)
data_opts['dims'] = dim_tags
if batch:
data_opts['batch'] = batch
data_opts['beam'] = batch.beam
other_special_axes = self.get_special_axes_dict(counted_with_batch_dim=True, only_available=True)
for (k, a) in other_special_axes.items():
data_opts[k] = (a if (a < batch_dim_axis) else (a + 1))
return _t.Tensor(placeholder=placeholder, **data_opts)
def copy_add_spatial_dim(self, spatial_dim_axis=None, dim=1, auto_time_dim_axis=True) -> _t.Tensor:
if (dim is None):
assert (not self.placeholder)
dim_tag = Dim(description='added_spatial', dimension=dim, kind=Dim.Types.Spatial)
if (spatial_dim_axis is None):
spatial_dim_axis = self.get_default_new_axis_for_dim_tag(dim_tag)
v = self.copy_add_dim_by_tag(dim_tag, unbroadcast=True, axis=spatial_dim_axis)
if (auto_time_dim_axis and (self.time_dim_axis is None)):
v.time_dim_axis = spatial_dim_axis
return v
def copy_add_feature_dim(self, axis=None) -> _t.Tensor:
if self.sparse:
return self.copy_add_spatial_dim(spatial_dim_axis=axis)
dim_tag = Dim(description='feature1', dimension=1, kind=Dim.Types.Feature)
if (axis is None):
axis = self.get_default_new_axis_for_dim_tag(dim_tag)
v = self.copy_add_dim_by_tag(dim_tag, axis=axis)
if (v.feature_dim_axis_or_unspecified is not NotSpecified):
v.feature_dim_axis = NotSpecified
if (axis < 0):
axis += v.batch_ndim
assert (axis >= 0)
assert (0 <= axis < v.batch_ndim)
if (v.feature_dim_axis != axis):
v.feature_dim_axis = axis
return v
def get_default_new_axis_for_dim_tag(self, dim_tag: Dim) -> int:
if dim_tag.is_batch_dim():
return 0
if (dim_tag.is_feature_dim() and (not self.sparse)):
if (self.feature_dim_axis is not None):
return (self.feature_dim_axis + 1)
else:
return self.batch_ndim
if (dim_tag.is_dynamic() and self.get_dynamic_axes()):
return (self.get_dynamic_axes()[(- 1)] + 1)
if (dim_tag.is_spatial_dim() and self.get_spatial_batch_axes()):
return (self.get_spatial_batch_axes()[(- 1)] + 1)
elif (dim_tag.is_spatial_dim() and (self.feature_dim_axis is not None)):
return self.feature_dim_axis
else:
return self.batch_ndim
def copy_add_dim_by_tag(self, dim_tag, unbroadcast=False, axis=None) -> _t.Tensor:
assert dim_tag.can_be_used_as_dim()
if (axis is None):
axis = self.get_default_new_axis_for_dim_tag(dim_tag=dim_tag)
if (axis < 0):
axis += (self.batch_ndim + 1)
assert (0 <= axis <= self.batch_ndim)
if dim_tag.is_batch_dim():
if unbroadcast:
return self.copy_add_batch_dim(batch_dim_axis=axis, batch=dim_tag.batch, dim_tag=dim_tag)
else:
if (dim_tag.batch or self.batch):
from returnn.tf.util.data import BatchInfo
batch_info = BatchInfo.make_global_broadcast_batch_info()
else:
batch_info = None
if (dim_tag and (dim_tag.dimension == 1) and (dim_tag.batch == batch_info)):
pass
else:
dim_tag = Dim(kind=Dim.Types.Batch, description='batch-broadcast', dimension=1, batch=batch_info, auto_generated=True)
return self.copy_add_batch_dim(batch_dim_axis=axis, batch=batch_info, dim_tag=dim_tag)
data_opts = self.get_kwargs()
if (self.sparse and dim_tag.is_feature_dim()):
dim_tag = dim_tag.copy(same_as_self=True, kind=Dim.Types.Spatial)
if ((not unbroadcast) and (dim_tag.dimension != 1)):
dim_tag = Dim(kind=dim_tag.kind, description=('%s_dummy_dim1' % (dim_tag.description or 'unnamed')), dimension=1, auto_generated=True)
data_opts['dims'] = ((self._dims[:axis] + (dim_tag,)) + self._dims[axis:])
other_special_axes = self.get_special_axes_dict(counted_with_batch_dim=True, only_available=True)
for (k, a) in other_special_axes.items():
data_opts[k] = (a if (a < axis) else (a + 1))
if (dim_tag.is_feature_dim() and (self.feature_dim_axis is None)):
data_opts.pop('feature_dim_axis', None)
if (dim_tag.is_spatial_dim() and (self.time_dim_axis is None)):
data_opts.pop('time_dim_axis', None)
if (self.placeholder is not None):
backend = self._raw_backend
placeholder = backend.expand_dims_raw(self.placeholder, axis)
if ((dim_tag.dimension is None) or (dim_tag.dimension > 1)):
placeholder = backend.expand_raw(placeholder, axis, dim_tag.get_dim_value())
data_opts['placeholder'] = placeholder
return _t.Tensor(**data_opts)
def copy_split_feature_dim(self, new_feature_dim) -> _t.Tensor:
assert (not self.sparse)
assert (self.feature_dim_axis is not None)
assert (self.dim is not None)
assert ((self.dim % new_feature_dim) == 0), 'must be a multiple of the input feature dim'
feature_dim_rem = (self.dim // new_feature_dim)
new_feature_dim_axis = (self.feature_dim_axis + 1)
data_opts = self.get_kwargs(include_special_axes=False)
dim_tag_split_rem = Dim(kind=Dim.Types.Spatial, description=('feature_split_rem_%i' % feature_dim_rem), auto_generated=True, dimension=feature_dim_rem)
dim_tag_new = Dim(kind=self.dim_tags[self.feature_dim_axis].kind, description=('feature_split_new_%i' % new_feature_dim), auto_generated=True, dimension=new_feature_dim)
dim_tags = ((self.dim_tags[:self.feature_dim_axis] + (dim_tag_split_rem, dim_tag_new)) + self.dim_tags[(self.feature_dim_axis + 1):])
data_opts['dims'] = dim_tags
other_special_axes = self.get_special_axes_dict(counted_with_batch_dim=True, only_available=True)
other_special_axes.pop('feature_dim_axis', None)
for (k, a) in other_special_axes.items():
data_opts[k] = (a if (a < new_feature_dim_axis) else (a + 1))
if (self.placeholder is not None):
backend = self._raw_backend
backend.set_known_shape_raw(self.placeholder, self.batch_shape)
old_shape = backend.get_shape_tuple_raw(self.placeholder)
new_shape = ((old_shape[:self.feature_dim_axis] + (feature_dim_rem, new_feature_dim)) + old_shape[(self.feature_dim_axis + 1):])
data_opts['placeholder'] = backend.reshape_raw(self.placeholder, new_shape)
return _t.Tensor(**data_opts)
def copy_extend_batch(self, batch) -> _t.Tensor:
assert self.have_batch_axis()
assert self.batch, ('%s: batch unset' % self)
data = self.copy()
batch = batch.copy_set_beam(data.beam)
if (data.batch.beam != data.beam):
data.batch = data.batch.copy_set_beam(data.beam)
if (data.batch == batch):
return data
data.batch = batch
self._adapt_batch_consistent_dim_tags()
if (self.placeholder is not None):
assert self._raw_backend.is_tensorflow
assert set(self.batch.virtual_dims).issubset(batch.virtual_dims)
import tensorflow as tf
from returnn.tf.util.basic import get_shape
from returnn.util.basic import ensure_list_of_type
from returnn.tf.util.data import BatchInfo
with tf.name_scope('copy_extend_batch'):
axis = self.batch_dim_axis
x = self.placeholder
shape = get_shape(x)
old_dims = ensure_list_of_type(self.batch.virtual_dims, BatchInfo.FixedDim)
new_dims = ensure_list_of_type(batch.virtual_dims, BatchInfo.FixedDim)
batch_broadcast_shape = []
ndim_batch_split = ((self.batch_ndim - 1) + len(new_dims))
tiles = ([1] * ndim_batch_split)
old_idx = 0
for (new_idx, new_dim) in enumerate(new_dims):
old_dim = (old_dims[old_idx] if (old_idx < len(old_dims)) else None)
if (old_dim == new_dim):
batch_broadcast_shape.append(old_dim.size)
old_idx += 1
else:
batch_broadcast_shape.append(1)
tiles[(axis + new_idx)] = new_dim.size
assert (old_idx == len(old_dims))
shape_batch_split = ((shape[:axis] + batch_broadcast_shape) + shape[(axis + 1):])
x = tf.reshape(x, shape_batch_split)
x = tf.tile(x, tiles)
shape = ((shape[:axis] + [batch.dim]) + shape[(axis + 1):])
x = tf.reshape(x, shape)
data.placeholder = x
return data
def copy_compatible_to(self: Tensor, data: Tensor, add_dims=True, unbroadcast=False, except_feature=False, except_axis=None, check_sparse=True, check_dtype=True) -> Tensor:
assert ((not check_sparse) or (self.sparse == data.sparse))
assert ((not check_dtype) or (self.dtype == data.dtype))
v = self.copy()
if (v.have_batch_axis() and data.have_batch_axis() and v.batch and data.batch and (v.batch != data.batch)):
v = v.copy_extend_batch(data.batch)
v.sparse_dim = data.sparse_dim
if ((v.batch_dim_axis is not None) and (data.batch_dim_axis is None)):
raise ValueError(('copy_compatible_to: self %r has batch-dim, but target data %r has not' % (self, data)))
if (data.batch_ndim < v.batch_ndim):
raise ValueError(('copy_compatible_to: self %r already has more dims than target data %r' % (self, data)))
is_equal_opts = dict(allow_same_feature_dim=True, allow_same_spatial_dim=True, treat_feature_as_spatial=True, ignore_feature_dim=True)
mapped_axes = data.find_matching_dim_map(v, list(range(v.batch_ndim)), is_equal_opts)
assert (len(mapped_axes) == v.batch_ndim)
except_axis_int = (data.get_axis_from_description(except_axis, allow_int=True) if (except_axis is not None) else None)
for target_axis in range(data.batch_ndim):
new_v_axis = min(target_axis, v.batch_ndim)
if (target_axis not in mapped_axes.values()):
if (not add_dims):
raise ValueError(('%s.copy_compatible_to(%s) not allowed, axis %i (%s) not in source' % (self, data, target_axis, data.dim_tags[target_axis])))
unbroadcast_axis = (unbroadcast and (not (except_feature and (data.feature_dim_axis == target_axis))) and (not ((except_axis_int is not None) and (except_axis_int == target_axis))))
v = v.copy_add_dim_by_tag(data.get_dim_tag(target_axis), axis=new_v_axis, unbroadcast=unbroadcast_axis)
mapped_axes = {(v_ax + (1 if (v_ax >= new_v_axis) else 0)): trg_ax for (v_ax, trg_ax) in mapped_axes.items()}
mapped_axes[new_v_axis] = target_axis
else:
matching_v_axes = [v_ax for (v_ax, trg_ax) in mapped_axes.items() if (trg_ax == target_axis)]
assert (len(matching_v_axes) == 1)
matching_v_axis = matching_v_axes[0]
if (target_axis != matching_v_axis):
v = v.copy_swap_axes(matching_v_axis, new_v_axis)
(mapped_axes[matching_v_axis], mapped_axes[new_v_axis]) = (mapped_axes[new_v_axis], mapped_axes[matching_v_axis])
assert (v.batch_ndim == data.batch_ndim)
assert all(((mapped_axes[ax] == ax) for ax in range(v.batch_ndim)))
if (self.version == 1):
assert (v.batch_dim_axis == data.batch_dim_axis)
if (v.time_dim_axis != data.time_dim_axis):
v.time_dim_axis = NotSpecified
if (v.time_dim_axis != data.time_dim_axis):
v.time_dim_axis = data.time_dim_axis
if (v.feature_dim_axis != data.feature_dim_axis):
v.feature_dim_axis = NotSpecified
if (v.feature_dim_axis != data.feature_dim_axis):
v.feature_dim_axis = data.feature_dim_axis
elif (v.feature_dim_axis != data.feature_dim_axis):
v.feature_dim_axis = data.feature_dim_axis
if self.sparse:
v.feature_dim_axis = NotSpecified
v.sparse_dim = self.sparse_dim
v.sanity_check()
return v
def get_out_permutation_to_dims(self, dims: Sequence[Dim]) -> List[int]:
out_permutation: List[int] = []
count = 0
taken = ([False] * len(self._dims))
for dim in dims:
candidates: List[int] = []
for j in range(len(self._dims)):
if taken[j]:
continue
if (dim is self._dims[j]):
candidates = [j]
break
if (dim == self._dims[j]):
candidates.append(j)
if (not candidates):
out_permutation.append((- 1))
elif (len(candidates) == 1):
out_permutation.append(candidates[0])
taken[candidates[0]] = True
count += 1
else:
max_match_priority_idx = None
max_match_priority = None
count_same_match_priority = 0
for j in range(len(candidates)):
dim = self._dims[candidates[j]]
match_priority = dim.match_priority
if ((j > 0) and (match_priority == max_match_priority)):
count_same_match_priority += 1
if ((j == 0) or (match_priority > max_match_priority)):
max_match_priority = match_priority
count_same_match_priority = 1
max_match_priority_idx = j
assert (count_same_match_priority >= 1)
if (count_same_match_priority > 1):
raise ValueError(f'{self}: dim {dim} is ambiguous, from tensor dims {self._dims} and raw_tensor shape {dims}')
out_permutation.append(candidates[max_match_priority_idx])
taken[candidates[max_match_priority_idx]] = True
count += 1
assert (count == len(self._dims))
assert (len(out_permutation) == len(dims))
return out_permutation
def copy_compatible_to_dims(self: _t.Tensor, dims: Sequence[Dim]) -> _t.Tensor:
out_permutation = self.get_out_permutation_to_dims(dims)
if (out_permutation == list(range(len(self._dims)))):
return self.copy()
return self._copy_compatible_to_dims_with_perm(dims, out_permutation)
def _copy_compatible_to_dims_with_perm(self, dims: Sequence[Dim], out_permutation: Sequence[int]):
raw_tensor = self._raw_tensor
if (raw_tensor is not None):
backend = self._raw_backend
raw_shape = backend.get_shape_tuple_raw(raw_tensor)
raw_tensor = backend.transpose_raw(raw_tensor, [p for p in out_permutation if (p >= 0)])
raw_tensor = backend.reshape_raw(raw_tensor, [(raw_shape[p] if (p >= 0) else 1) for p in out_permutation])
out_dims = [(dims[i] if (p >= 0) else Dim(kind=dims[i].kind, description=('%s_bc_dim1' % (dims[i].description or 'unnamed')), dimension=1, auto_generated=True)) for (i, p) in enumerate(out_permutation)]
kwargs = self.get_kwargs(include_special_axes=False)
kwargs['dims'] = out_dims
kwargs['raw_tensor'] = raw_tensor
res = _t.Tensor(**kwargs)
if (self.version <= 1):
if (self.time_dim_axis is None):
if (res.time_dim_axis is not None):
res.time_dim_axis = None
else:
axis = out_permutation.index(self.time_dim_axis)
assert (axis >= 0)
if (res.time_dim_axis != axis):
res.time_dim_axis = axis
if (self.feature_dim_axis is None):
if (res.feature_dim_axis is not None):
res.feature_dim_axis = None
else:
axis = out_permutation.index(self.feature_dim_axis)
assert (axis >= 0)
if (res.feature_dim_axis != axis):
res.feature_dim_axis = axis
return res
def copy_compatible_to_dims_raw(self: _t.Tensor, dims: Sequence[Dim]) -> _t.RawTensorType:
raw_tensor = self._raw_tensor
assert (raw_tensor is not None), f'{self} copy_compatible_to_dims_raw: no raw tensor'
out_permutation = self.get_out_permutation_to_dims(dims)
if (out_permutation == list(range(len(self._dims)))):
return raw_tensor
backend = self._raw_backend
raw_shape = backend.get_shape_tuple_raw(raw_tensor)
raw_tensor = backend.transpose_raw(raw_tensor, [p for p in out_permutation if (p >= 0)])
raw_tensor = backend.reshape_raw(raw_tensor, [(raw_shape[p] if (p >= 0) else 1) for p in out_permutation])
return raw_tensor
def copy_time_flattened(self) -> _t.Tensor:
assert (self.batch_dim_axis is not None)
assert (self.time_dim_axis is not None)
data_opts = self.get_kwargs(include_special_axes=False)
if (self.placeholder is not None):
data_opts['placeholder'] = self.get_placeholder_time_flattened()
dim_tag = self.dim_tags[self.time_dim_axis]
dim_tag = Dim(kind=Dim.Types.Spatial, description=('%s_flattened' % (dim_tag.description or 'unnamed')), auto_generated=True, dimension=None)
data_opts['dims'] = ((dim_tag,) + tuple((tag for (i, tag) in enumerate(self.dim_tags) if (i not in (self.batch_dim_axis, self.time_dim_axis)))))
data_opts['time_dim_axis'] = None
data_opts.pop('feature_dim_axis', None)
return _t.Tensor(**data_opts)
def copy_extend_with_beam(self, beam) -> Tensor:
data = self.copy()
if (data.beam and (data.beam == beam)):
return data
assert (data.beam is None), ('incompatible beam (%r vs %r)' % (data.beam, beam))
if (beam is None):
return data
data.beam = beam
assert data.batch
data.batch = data.batch.copy_set_beam(beam)
if (data.placeholder is not None):
assert data._raw_backend.is_tensorflow
import tensorflow as tf
from returnn.tf.util.basic import get_valid_scope_name_from_str, same_control_flow_ctx, tile_transposed
with tf.name_scope(('%s_data_extend_with_beam' % get_valid_scope_name_from_str(self.name))):
with same_control_flow_ctx(data.placeholder):
data.placeholder = tile_transposed(data.placeholder, axis=data.batch_dim_axis, multiples=beam.beam_size)
setattr(data.placeholder, '_RETURNN_beam_expanded_base_data', self)
data._adapt_batch_consistent_dim_tags()
return data
def copy_merge_into_batch(self, axes) -> Tensor:
assert self.batch
assert (self.batch_dim_axis in axes)
assert (sorted(set(axes)) == sorted(axes))
min_axis = min(axes)
axes = list(axes)
data = self.copy()
if (axes != list(range(min_axis, (min_axis + len(axes))))):
rem_axes_start = list(range(min_axis))
rem_axes_end = [a for a in range(min_axis, self.batch_ndim) if (a not in axes)]
data = data.copy_transpose(((rem_axes_start + axes) + rem_axes_end))
axes = list(range(min_axis, (min_axis + len(axes))))
assert (data.batch_dim_axis in axes)
tensor = data.placeholder
batch = data.batch
data = data.copy_template()
batch_idx = 0
for axis in axes:
if (axis == data.batch_dim_axis):
batch_idx = len(batch.virtual_dims)
continue
batch = batch.copy_extend_with_padded_or_fixed_dim_tag(dim_tag=data.dim_tags[axis], new_dim_idx=batch_idx)
batch_idx += 1
for axis in reversed(sorted(axes)):
if (axis != data.batch_dim_axis):
data = data.copy_template_excluding_axis(axis)
data.batch = batch
if (tensor is not None):
assert self._raw_backend.is_tensorflow
import tensorflow as tf
from returnn.tf.util.basic import get_shape
shape = get_shape(tensor)
tensor = tf.reshape(tensor, ((shape[:min_axis] + [(- 1)]) + shape[(min_axis + len(axes)):]))
data.placeholder = tensor
return data
def copy_squeeze_axes(self, axes) -> Tensor:
assert isinstance(axes, (list, tuple))
assert all(((self.batch_shape[axis] == 1) for axis in axes))
assert all(((0 <= axis < self.batch_ndim) for axis in axes))
if (not axes):
return self.copy()
data_opts = self.get_kwargs(include_special_axes=False)
if (self._raw_tensor is not None):
backend = self._raw_backend
data_opts['raw_tensor'] = backend.squeeze_raw(self._raw_tensor, axes)
data_opts['dims'] = [tag for (i, tag) in enumerate(self._dims) if (i not in axes)]
if ((self.time_dim_axis is not None) and (self.time_dim_axis_or_unspecified is not NotSpecified)):
if (self.time_dim_axis not in axes):
data_opts['time_dim_axis'] = (self.time_dim_axis - len([axis for axis in axes if (axis < self.time_dim_axis)]))
if ((self.feature_dim_axis is not None) and (self.feature_dim_axis_or_unspecified is not NotSpecified)):
if (self.feature_dim_axis not in axes):
data_opts['feature_dim_axis'] = (self.feature_dim_axis - len([axis for axis in axes if (axis < self.feature_dim_axis)]))
return _t.Tensor(**data_opts)
def copy_template(self, name=None, *, dtype=None) -> _t.Tensor:
kwargs = self.get_kwargs()
if name:
kwargs['name'] = name
if dtype:
kwargs['dtype'] = dtype
return _t.Tensor(**kwargs)
def copy_template_dense(self, name=None, dtype=None) -> Tensor:
out = self.copy_template(name=name)
if out.sparse:
feat_dim = out.sparse_dim
out.sparse = False
out.dtype = 'float32'
out = out.copy_add_dim_by_tag(dim_tag=feat_dim, unbroadcast=True, axis=(- 1))
out.feature_dim_axis = NotSpecified
assert (out.feature_dim_axis == (out.batch_ndim - 1))
if dtype:
out.dtype = dtype
return out
def copy_template_excluding_axis(self, exclude_axis, name=None) -> _t.Tensor:
kwargs = self.get_kwargs(include_special_axes=False)
if (exclude_axis < 0):
exclude_axis += self.batch_ndim
assert (exclude_axis >= 0)
assert (0 <= exclude_axis < self.batch_ndim)
if (exclude_axis == self.feature_dim_axis):
kwargs.pop('dim', None)
other_special_axes = self.get_special_axes_dict(counted_with_batch_dim=True, only_available=True)
for (axis_name, axis) in other_special_axes.items():
if (axis == exclude_axis):
continue
kwargs[axis_name] = (axis if (axis < exclude_axis) else (axis - 1))
new_dim_tags = list(self.dim_tags)
del new_dim_tags[exclude_axis]
kwargs['dims'] = new_dim_tags
if name:
kwargs['name'] = name
return _t.Tensor(**kwargs)
def copy_template_excluding_spatial_dim(self, spatial_axis_num, name=None) -> Tensor:
spatial_axes = self.get_spatial_batch_axes()
if (spatial_axis_num < 0):
spatial_axis_num += len(spatial_axes)
assert (spatial_axis_num >= 0)
assert (0 <= spatial_axis_num < len(spatial_axes))
axis_to_exclude = spatial_axes[spatial_axis_num]
return self.copy_template_excluding_axis(exclude_axis=axis_to_exclude, name=name)
def copy_template_excluding_time_dim(self, name=None) -> _t.Tensor:
assert (self.time_dim_axis is not None)
return self.copy_template_excluding_axis(exclude_axis=self.time_dim_axis, name=name)
def copy_template_adding_time_dim(self, name=None, time_dim_axis=0) -> _t.Tensor:
if (time_dim_axis < 0):
time_dim_axis += (self.batch_ndim + 1)
assert (time_dim_axis >= 0)
assert (0 <= time_dim_axis <= self.batch_ndim)
kwargs = self.get_kwargs(include_special_axes=False)
dim_tag = Dim(kind=Dim.Types.Time, description='unknown_time', dimension=None, auto_generated=True)
dim_tags = ((self.dim_tags[:time_dim_axis] + (dim_tag,)) + self.dim_tags[time_dim_axis:])
kwargs['dims'] = dim_tags
other_special_axes = self.get_special_axes_dict(counted_with_batch_dim=True, only_available=True)
other_special_axes.pop('time_dim_axis', None)
for (axis_name, axis) in other_special_axes.items():
kwargs[axis_name] = (axis if (axis < time_dim_axis) else (axis + 1))
kwargs['time_dim_axis'] = time_dim_axis
if name:
kwargs['name'] = name
return _t.Tensor(**kwargs)
def copy_template_replace_dim_tag(self, axis, new_dim_tag, name=None) -> _t.Tensor:
assert new_dim_tag.can_be_used_as_dim()
if (axis < 0):
assert ((axis + self.batch_ndim) >= 0)
axis += self.batch_ndim
assert (0 <= axis < self.batch_ndim)
opts = self.get_kwargs()
if self.dim_tags[axis].is_batch_dim():
opts.pop('batch', None)
if new_dim_tag.is_batch_dim():
if (self.time_dim_axis == axis):
opts.pop('time_dim_axis', None)
if (self.feature_dim_axis == axis):
opts.pop('feature_dim_axis', None)
dim_tags = ((self.dim_tags[:axis] + (new_dim_tag,)) + self.dim_tags[(axis + 1):])
opts['dims'] = dim_tags
if (self.feature_dim_axis_or_unspecified is not NotSpecified):
if ((self.feature_dim_axis == axis) and self.dim_tags[axis].is_feature_dim() and (not new_dim_tag.is_feature_dim())):
opts['feature_dim_axis'] = None
if name:
opts['name'] = name
return _t.Tensor(**opts)
def copy_template_replace_dim(self, axis, new_dim, new_size=None) -> _t.Tensor:
dim_tag = self.dim_tags[axis]
if dim_tag.is_batch_dim():
assert (new_dim is None)
return self.copy_template()
dim_tag = Dim(kind=dim_tag.kind, description=('%s_replaced' % (dim_tag.description or 'unnamed')), auto_generated=True, dimension=new_dim, dyn_size=new_size)
return self.copy_template_replace_dim_tag(axis=axis, new_dim_tag=dim_tag)
def copy_template_new_dim_tags(self, new_dim_tags, name=None, keep_special_axes=False) -> _t.Tensor:
if keep_special_axes:
assert (len(new_dim_tags) == self.batch_ndim)
opts = self.get_kwargs(include_special_axes=keep_special_axes)
opts['dims'] = new_dim_tags
if name:
opts['name'] = name
return _t.Tensor(**opts)
def copy_template_set_ctx(self, ctx) -> Tensor:
kwargs = self.get_kwargs()
kwargs['control_flow_ctx'] = ctx
return _t.Tensor(**kwargs)
def copy_template_unpack_batch(self) -> Tensor:
assert self.have_batch_axis()
assert self.batch, ('%s: batch unset' % self)
data = self.copy()
kwargs = self.get_kwargs(include_special_axes=False)
from returnn.tf.util.data import BatchInfo
dim_tags = []
for dim_tag in data.dim_tags:
if (dim_tag.is_batch_dim() and dim_tag.batch and (len(dim_tag.batch.virtual_dims) > 0)):
batch = dim_tag.batch
new_batch_dim_tag = None
for virtual_dim in batch.virtual_dims:
if isinstance(virtual_dim, BatchInfo.PackedDim):
dim_tags.append(virtual_dim.dim_tag)
batch = batch.copy_remove_dim(virtual_dim)
elif isinstance(virtual_dim, BatchInfo.GlobalBatchDim):
assert (not new_batch_dim_tag)
if ((batch is None) or batch.is_global_batch()):
new_batch_dim_tag = batch_dim
else:
new_batch_dim_tag = Dim(kind=Dim.Types.Batch, description=dim_tag.description, dimension=None)
dim_tags.append(new_batch_dim_tag)
assert new_batch_dim_tag, ('%s: batch info %r invalid' % (self, batch))
new_batch_dim_tag.batch = batch
kwargs['batch'] = batch
else:
dim_tags.append(dim_tag)
kwargs['dims'] = dim_tags
return _t.Tensor(**kwargs)
def _get_variable_dim_pattern(self):
return tuple([(dim is None) for dim in self.shape])
def _get_var_len_axes(self):
return [i for (i, d) in enumerate(self._get_variable_dim_pattern()) if d]
def matches_var_dim_pattern(self, other: _t.Tensor) -> bool:
if (self.time_dim_axis_excluding_batch != other.time_dim_axis_excluding_batch):
return False
return (self._get_var_len_axes() == other._get_var_len_axes())
def dim_tags(self) -> Tuple[(Dim, ...)]:
return self._dims
def shape(self) -> Tuple[(Optional[int], ...)]:
return tuple((tag.dimension for tag in self._dims if (not tag.is_batch_dim())))
def shape(self, shape):
if (tuple(shape) == self.shape):
return
raise Exception(('%s: setting the shape is not allowed (new shape %s)' % (self, shape)))
def batch_shape(self) -> Tuple[(Optional[int], ...)]:
return tuple((tag.dimension for tag in self.dim_tags))
def get_batch_shape(self, batch_dim):
if (self.batch_dim_axis is not None):
return ((self.shape[:self.batch_dim_axis] + (batch_dim,)) + self.shape[self.batch_dim_axis:])
return self.shape
def get_dynamic_batch_shape(self):
return [self.get_dim(axis) for axis in range(self.batch_ndim)]
def have_varying_shape_in_ctx(self):
return any((tag.control_flow_ctx for tag in self.dim_tags))
def size_placeholder(self: Tensor):
return _SizePlaceholderProxy(self)
_placeholder.setter
def size_placeholder(self, sizes):
if (sizes is None):
return
for (axis_wo_b, size) in sizes.items():
self.set_dynamic_size(axis=self.get_batch_axis(axis_wo_b), sizes=size)
def shape_dense(self):
if self.sparse:
return (self.shape + (self.dim,))
return self.shape
def batch_shape_dense(self):
if self.sparse:
return (self.batch_shape + (self.dim,))
return self.batch_shape
def dim_tags_sparse(self):
if (self.sparse or (not self.have_feature_axis())):
return self.dim_tags
return (self.dim_tags[:self.feature_dim_axis] + self.dim_tags[(self.feature_dim_axis + 1):])
def dim_tags_set_implicit_only_wrapped(self):
self_dim_tags = set(self.dim_tags)
dims = set()
if (self.sparse_dim and (self.sparse_dim not in self_dim_tags)):
dims.add(_m.ImplicitSparseDim(self.sparse_dim))
for dim in self.dim_tags:
if dim.dyn_size_ext:
for dim_ in dim.dyn_size_ext.dim_tags:
if (dim_ not in self_dim_tags):
dims.add(_m.ImplicitDynSizeDim(dim_))
return dims
def dim_tags_set_implicit_only(self):
return set((dim.tag for dim in self.dim_tags_set_implicit_only_wrapped))
def dim_tags_set_implicit(self):
dims = set(self.dim_tags)
dims.update(self.dim_tags_set_implicit_only)
return dims
def remaining_dims(self: _t.Tensor, remove: Optional[Union[(Dim, Sequence[Dim])]]=None) -> List[Dim]:
batch_dims = list(self._dims)
if (not remove):
pass
elif isinstance(remove, Dim):
batch_dims.remove(remove)
else:
for remove_ in remove:
batch_dims.remove(remove_)
return batch_dims
def ndim(self):
return len(self.shape)
def ndim_dense(self):
if self.sparse:
return (self.ndim + 1)
return self.ndim
def batch_ndim(self):
return len(self._dims)
def batch_ndim_dense(self):
if self.sparse:
return (self.batch_ndim + 1)
return self.batch_ndim
def is_time_major(self):
return (self.time_dim_axis == 0)
def is_batch_major(self):
return (self.batch_dim_axis == 0)
def is_batch_feature_major(self):
return ((self.batch_dim_axis == 0) and (self.feature_dim_axis == 1))
def batch_dim_axis(self):
return _batch_dim_axis_from_dim_tags_tuple(self._dims)
_dim_axis.setter
def batch_dim_axis(self, axis):
if (axis == self.batch_dim_axis):
return
raise Exception(('%s: cannot set batch_dim_axis = %s' % (self, axis)))
def _default_feature_dim_axis(self):
if (self.version >= 2):
return None
return _default_feature_dim_axis(batch_dim_axis=self.batch_dim_axis, time_dim_axis=self.time_dim_axis, batch_shape=self.batch_shape, sparse=self.sparse)
def feature_dim_axis(self):
if (self._feature_dim_axis is not NotSpecified):
return self._feature_dim_axis
return self._default_feature_dim_axis()
_dim_axis.setter
def feature_dim_axis(self: _t.Tensor, value):
assert ((value is NotSpecified) or (value is None) or isinstance(value, int))
if (self.feature_dim_axis_or_unspecified == value):
return
if ((self.version >= 2) and (value is NotSpecified)):
value = None
if isinstance(value, int):
assert (0 <= value < self.batch_ndim)
self._feature_dim_axis = value
def feature_dim_axis_or_unspecified(self):
return self._feature_dim_axis
def time_dim_axis(self) -> Optional[int]:
if (self.version >= 2):
return None
if (self.time_dim_axis_or_unspecified is not NotSpecified):
return self.time_dim_axis_or_unspecified
return _default_time_dim_axis_dim_tags(self.dim_tags)
_dim_axis.setter
def time_dim_axis(self: _t.Tensor, value):
assert ((value is NotSpecified) or (value is None) or isinstance(value, int))
if (self.time_dim_axis_or_unspecified == value):
return
if ((self.version >= 2) and (value in (None, NotSpecified))):
return
assert (self.version == 1), 'time_dim_axis is deprecated'
if isinstance(value, int):
assert (0 <= value < self.batch_ndim)
self._make_extra().time_dim_axis = value
def time_dim_axis_or_unspecified(self):
if (self.version >= 2):
return NotSpecified
if (not self._extra):
return NotSpecified
return self._extra.time_dim_axis
def time_dim_axis_excluding_batch(self):
if (self.time_dim_axis is None):
return None
return self.get_batch_axis_excluding_batch(self.time_dim_axis)
def placeholder(self):
return self._raw_tensor
def placeholder(self: _t.Tensor, value: Optional[_t.RawTensorType]):
self.raw_tensor = value
def batch(self):
if (not self._extra):
return None
return self._extra.batch
def batch(self, batch):
if batch:
assert (batch.beam == self.beam)
if (self.batch == batch):
return
self._make_extra().batch = batch
self._adapt_batch_consistent_dim_tags()
def beam(self):
if (not self._extra):
return None
if self._extra.beam:
return self._extra.beam
if self._extra.batch:
return self._extra.batch.beam
return None
def beam(self, beam):
if (self.beam == beam):
return
self._make_extra().beam = beam
if self._extra.batch:
self._extra.batch = self.batch.copy_set_beam(beam=beam)
self._adapt_batch_consistent_dim_tags()
def dim(self):
tag = self.feature_dim_or_sparse_dim
if tag:
return tag.dimension
return None
def dim(self, dim):
assert (dim == self.dim)
def feature_dim_or_sparse_dim(self: Tensor):
if self.sparse_dim:
return self.sparse_dim
feature_dim_axis = self.feature_dim_axis
if (feature_dim_axis is not None):
return self._dims[feature_dim_axis]
return None
def sparse(self):
return (self.sparse_dim is not None)
def sparse(self, sparse):
if (self.sparse == sparse):
return
if (not sparse):
self.sparse_dim = None
return
raise Exception(('%s: setting sparse=True not supported anymore. set sparse_dim instead' % self))
def vocab(self):
if self.sparse_dim:
return self.sparse_dim.vocab
if self.have_feature_axis():
return self.dim_tags[self.feature_dim_axis].vocab
return None
def vocab(self, vocab):
raise Exception(('%s: setting vocab not supported anymore. set sparse_dim instead' % self))
def time_dimension(self) -> Union[(int, _t.RawTensorType)]:
assert (self.time_dim_axis is not None)
return self.get_dim(self.time_dim_axis)
def get_dim(self, axis: int) -> Union[(int, _t.RawTensorType)]:
if (self.batch_shape[axis] is not None):
return self.batch_shape[axis]
assert (self._raw_tensor is not None)
backend = self._raw_backend
return backend.get_shape_tuple_raw(self._raw_tensor)[axis]
def get_placeholder_as_time_major(self):
assert (self.placeholder is not None)
return self.copy_as_time_major().placeholder
def get_placeholder_as_batch_major(self):
assert (self.placeholder is not None)
return self.copy_as_batch_major().placeholder
def get_placeholder_with_runtime_sanity_checks(self):
assert (self._raw_tensor is not None)
backend = self._raw_backend
return backend.identity_with_control_dependencies_raw(self._raw_tensor, [self.get_runtime_sanity_check_op()])
def get_placeholder_time_flattened(self):
assert self._raw_backend.is_tensorflow, 'get_placeholder_time_flattened only implemented for TF yet'
from returnn.tf.util.basic import flatten_with_seq_len_mask, get_shape
import tensorflow as tf
assert (self.placeholder is not None)
assert self.have_time_axis()
assert (0 in [self.time_dim_axis, self.batch_dim_axis])
time_dim = self.get_time_dim_tag()
if time_dim.need_masking():
assert (time_dim.dyn_size_ext.dims == (self.get_batch_dim_tag(),))
return flatten_with_seq_len_mask(self.placeholder, time_dim.dyn_size, batch_dim_axis=self.batch_dim_axis, time_dim_axis=self.time_dim_axis)
else:
x = tf.transpose(self.placeholder, ([self.batch_dim_axis, self.time_dim_axis] + [i for i in range(self.batch_ndim) if (i not in [self.batch_dim_axis, self.time_dim_axis])]))
shape = get_shape(x)
return tf.reshape(x, ([(shape[0] * shape[1])] + shape[2:]))
def get_placeholder_flattened(self, keepdims=False):
assert (self.placeholder is not None)
assert self._raw_backend.is_tensorflow, 'get_placeholder_flattened only implemented for TF yet'
import tensorflow as tf
x = self.placeholder
orig_dyn_axes = (self.get_spatial_batch_axes() + [self.batch_dim_axis])
dyn_axes = list(orig_dyn_axes)
if (dyn_axes == [self.batch_dim_axis]):
return x
assert (0 in dyn_axes), 'would need some transpose, not supported at the moment'
assert (len(dyn_axes) > 1)
orig_num_dyn_axes = len(dyn_axes)
ndim = len(self.batch_shape)
if self.have_time_axis():
x = self.get_placeholder_time_flattened()
removed_axis = max(self.time_dim_axis, self.batch_dim_axis)
dyn_axes.remove(removed_axis)
dyn_axes = [(i if (i < removed_axis) else (i - 1)) for i in dyn_axes]
ndim -= 1
if (len(dyn_axes) > 1):
shape = tf.shape(x)
x = tf.reshape(x, ([tf.reduce_prod([shape[i] for i in dyn_axes])] + [shape[i] for i in range(ndim) if (i not in dyn_axes)]))
dyn_axes = [0]
assert (dyn_axes == [0])
if (keepdims and (orig_num_dyn_axes >= 2)):
for i in orig_dyn_axes:
if (i not in dyn_axes):
x = tf.expand_dims(x, axis=i)
x.set_shape(([None] * self.batch_ndim))
return x
def get_axes(self, exclude_time=False, exclude_batch=False, exclude_feature=False):
axes = list(range(len(self.batch_shape)))
if (exclude_time and (self.time_dim_axis is not None)):
axes.pop(axes.index(self.time_dim_axis))
if (exclude_batch and (self.batch_dim_axis is not None)):
axes.pop(axes.index(self.batch_dim_axis))
if (exclude_feature and (self.feature_dim_axis is not None)):
axes.pop(axes.index(self.feature_dim_axis))
return axes
def _verify_axis_int_from_description(cls, allow_int=NotSpecified):
msg = 'Do not specify axis as int but as str or Dim instead.'
if (allow_int is NotSpecified):
from returnn.util import BehaviorVersion
BehaviorVersion.require(condition=False, message=msg, version=5)
if allow_int:
return
raise Exception(msg)
def _verify_axis_order_dependent(cls):
from returnn.util import BehaviorVersion
BehaviorVersion.require(condition=False, message='Do not specify axis or axes in a way that depends on the order of the axes.', version=7)
def _make_valid_int_axis(self, axis):
if (axis < 0):
assert ((axis + self.batch_ndim) >= 0)
axis += self.batch_ndim
assert (axis < self.batch_ndim)
return axis
def get_axes_from_description(self, axes, allow_int=NotSpecified):
if ((axes is None) or (isinstance(axes, str) and (axes == ''))):
return []
if isinstance(axes, Dim):
dims = [i for (i, tag) in enumerate(self.dim_tags) if (tag == axes)]
if (len(dims) > 1):
max_match_priority = max((self.dim_tags[i].match_priority for i in dims))
dims = [i for i in dims if (self.dim_tags[i].match_priority == max_match_priority)]
assert (len(dims) <= 1), ('%s: matching dim %s must be unique, use `match_priority` to resolve the matching order of ambiguous dimensions' % (self, axes))
return dims
if isinstance(axes, int):
self._verify_axis_int_from_description(allow_int=allow_int)
return [self._make_valid_int_axis(axes)]
assert isinstance(axes, (str, int, list, tuple, Sequence))
if isinstance(axes, str):
import re
axes = axes.lower()
if (axes in ['b', 'batch']):
assert (self.batch_dim_axis is not None)
return [self.batch_dim_axis]
elif (axes == 'spatial'):
return self.get_spatial_batch_axes()
elif re.match('(s|spatial):-?\\d+$', axes):
self._verify_axis_order_dependent()
s = int(axes.split(':')[1])
spatial_axes = self.get_spatial_batch_axes()
if (s < 0):
s += len(spatial_axes)
assert (s < len(spatial_axes)), ('%s get_axes_from_description: %r invalid' % (self, axes))
return [spatial_axes[s]]
elif (axes in ['dyn', 'dynamic']):
return self.get_dynamic_axes()
elif re.match('(d|dyn|dynamic):-?\\d+$', axes):
self._verify_axis_order_dependent()
s = int(axes.split(':')[1])
dyn_axes = self.get_dynamic_axes()
if (s < 0):
s += len(dyn_axes)
assert (0 <= s < len(dyn_axes)), ('%s get_axes_from_description: %r invalid' % (self, axes))
return [dyn_axes[s]]
elif (axes == 'spatial_except_time'):
axes = self.get_spatial_batch_axes()
assert (self.time_dim_axis is not None)
axes.remove(self.time_dim_axis)
return axes
elif (axes in ['t', 'time']):
assert (self.time_dim_axis is not None)
return [self.time_dim_axis]
elif (axes == 't?'):
return ([self.time_dim_axis] if (self.time_dim_axis is not None) else [])
elif (axes == 'except_time'):
axes = list(range(self.batch_ndim))
axes.remove(self.batch_dim_axis)
if (self.time_dim_axis is not None):
axes.remove(self.time_dim_axis)
return axes
elif (axes == 'except_batch'):
axes = list(range(self.batch_ndim))
axes.remove(self.batch_dim_axis)
return axes
elif re.match('(except_batch):-?\\d+$', axes):
self._verify_axis_order_dependent()
s = int(axes.split(':')[1])
non_batch_axes = list(range(self.batch_ndim))
if (self.batch_dim_axis is not None):
non_batch_axes.remove(self.batch_dim_axis)
if (s < 0):
s += len(non_batch_axes)
assert (0 <= s < len(non_batch_axes)), ('%s get_axes_from_description: %r invalid' % (self, axes))
return [non_batch_axes[s]]
elif (axes == '*'):
return list(range(self.batch_ndim))
elif (axes == 'static'):
return self.get_static_axes()
elif re.match('(static):-?\\d+$', axes):
self._verify_axis_order_dependent()
s = int(axes.split(':')[1])
static_axes = self.get_static_axes()
if (s < 0):
s += len(static_axes)
assert (0 <= s < len(static_axes)), ('%s get_axes_from_description: %r invalid' % (self, axes))
return [static_axes[s]]
elif re.match('(dim):\\d+$', axes):
s = int(axes.split(':')[1])
dims = [a for a in range(self.batch_ndim) if (self.batch_shape[a] == s)]
assert dims, ("%s get_axes_from_description: 'dim:%i' not found" % (self, s))
assert (len(dims) == 1), ("%s get_axes_from_description: 'dim:%i' only allowed when unique" % (self, s))
return dims
elif (axes in ['f', 'feature', 'non_spatial']):
return self.get_feature_batch_axes()
elif all([(a in 'btf') for a in axes]):
return self.get_axes_from_description(list(axes))
elif axes.startswith('stag:'):
return [self.get_axis_by_tag_name(axes[len('stag:'):], spatial_only=True)]
elif axes.startswith('stag-single:'):
(_, idx_s, name) = axes.split(':', 2)
idx = int(idx_s)
return [self.get_axes_by_tag_name(name, spatial_only=True)[idx]]
elif axes.startswith('tag:'):
return [self.get_axis_by_tag_name(axes[len('tag:'):])]
raise Exception(('invalid axis mode %r' % axes))
assert isinstance(axes, (tuple, list, Sequence)), ('invalid axes %r' % axes)
flat_axes = []
for i in axes:
if isinstance(i, int):
self._verify_axis_int_from_description(allow_int=allow_int)
flat_axes.append(self._make_valid_int_axis(i))
else:
assert isinstance(i, (str, tuple, list, Dim))
flat_axes += self.get_axes_from_description(i, allow_int=allow_int)
res = []
for i in flat_axes:
if (i not in res):
res.append(i)
return res
def get_dim_tag_from_description(self, axis):
axis_int = self.get_axis_from_description(axis, allow_int=False)
return self.dim_tags[axis_int]
def get_axis_from_description(self, axis, allow_int=NotSpecified):
if isinstance(axis, Dim):
res_idx: Optional[int] = None
res_tag: Optional[Dim] = None
for (i, tag) in enumerate(self._dims):
tag: Dim
if ((tag is axis) or (tag == axis)):
if ((res_tag is None) or (res_tag.match_priority < tag.match_priority)):
res_idx = i
res_tag = tag
continue
if (res_tag.match_priority > tag.match_priority):
continue
raise Exception(f'{self}: get_axis_from_description({axis}) not unique. use match_priority to resolve ambiguity')
if (res_idx is None):
raise Exception(f'{self}: get_axis_from_description({axis}) not found')
return res_idx
axes = self.get_axes_from_description(axis, allow_int=allow_int)
assert axes, ('%s: %r axis not found' % (self, axis))
assert (len(axes) == 1), ('%r: %r is not a unique axis but %r' % (self, axis, axes))
return axes[0]
def get_description_from_axis(self, axis):
assert (0 <= axis < self.batch_ndim)
if (axis == self.batch_dim_axis):
return 'B'
dim_tag = self.dim_tags[axis]
matching_tags = [i for (i, tag) in enumerate(self.dim_tags) if (tag == dim_tag)]
if (dim_tag.dyn_size_ext and (len(matching_tags) == 1)):
return dim_tag
if (axis == self.time_dim_axis):
return 'T'
if (axis == self.feature_dim_axis):
return 'F'
if (len(matching_tags) == 1):
return dim_tag
name = dim_tag.description
matching_axes = self.get_axes_by_tag_name(name, spatial_only=True)
assert (axis in matching_axes)
return ('stag-single:%i:%s' % ((matching_axes.index(axis) - len(matching_axes)), name))
def has_axis(self, axis):
axes = self.get_axes_from_description(axis, allow_int=False)
return (len(axes) > 0)
def get_axes_by_tag_name(self, name, spatial_only=False):
dim_tags = self.get_batch_shape_dim_tags()
matching_dim_tags = [(axis, tag) for (axis, tag) in enumerate(dim_tags) if ((name.lower() in tag.description.lower()) or (name.lower() in tag.get_same_base().description.lower()))]
if spatial_only:
spatial_axes = self.get_spatial_batch_axes()
matching_dim_tags = [(axis, tag) for (axis, tag) in matching_dim_tags if ((axis in spatial_axes) or tag.is_spatial_dim())]
return [ax for (ax, _) in matching_dim_tags]
def get_axis_by_tag_name(self, name, spatial_only=False):
matching_dim_tags = self.get_axes_by_tag_name(name, spatial_only)
assert (len(matching_dim_tags) > 0), ('%r: no %stag found with name %r' % (self, ('spatial ' if spatial_only else ''), name))
assert (len(matching_dim_tags) == 1), ('%r: tag name %r is not unique in dim tags %r' % (self, name, self.get_batch_shape_dim_tags()))
return matching_dim_tags[0]
def get_batch_axis_excluding_batch(self, axis):
return _get_axis_wo_b(axis, batch_dim_axis=self.batch_dim_axis, batch_ndim=self.batch_ndim)
def have_dim_tag(self, tag, include_implicit=True, unique=False):
dims = list(self.dim_tags)
if include_implicit:
dims.extend(self.dim_tags_set_implicit_only)
matching_dims = [dim for dim in dims if (dim == tag)]
return ((len(matching_dims) == 1) if unique else (len(matching_dims) >= 1))
def get_batch_axis(self, axis):
return _get_axis_wb(axis, batch_dim_axis=self.batch_dim_axis)
def have_batch_axis(self):
return (self.batch_dim_axis is not None)
def have_time_axis(self):
return (self.time_dim_axis is not None)
def have_feature_axis(self):
return (self.feature_dim_axis is not None)
def is_time_axis_dynamic(self):
assert (self.time_dim_axis is not None)
if (self.placeholder is None):
return (self.batch_shape[self.time_dim_axis_excluding_batch] is None)
if (self.time_dim_axis_excluding_batch in self.size_placeholder):
return True
assert isinstance(self.shape[self.time_dim_axis_excluding_batch], int), ('%s: dynamic time axis dim (None) (axis %i) but size_placeholder %r misses information' % (self, self.time_dim_axis, self.size_placeholder))
return False
def is_axis_dynamic(self, axis):
if (axis == self.batch_dim_axis):
return False
return (self.batch_shape[axis] is None)
def has_dynamic_size(self, axis):
return (self.dim_tags[axis].dyn_size is not None)
def get_dynamic_size(self, axis):
tag = self.dim_tags[axis]
assert (tag.dyn_size is not None), ('%s: axis %i has no dyn size' % (self, axis))
return tag.dyn_size
def set_dynamic_size(self, axis, sizes):
if (getattr(sizes, '_RETURNN_dyn_size_beam', NotSpecified) is NotSpecified):
sizes._RETURNN_dyn_size_beam = self.beam
if (self.beam and (getattr(sizes, '_RETURNN_dyn_size_beam', None) != self.beam)):
tag = Dim.get_tag_from_size_tensor(sizes)
assert (tag and self.batch)
tag = tag.get_for_batch_ctx(batch=self.batch, ctx=self.control_flow_ctx)
assert (tag.dyn_size is not None)
sizes = tag.dyn_size
sizes_tag = Dim.get_tag_from_size_tensor(sizes)
if sizes_tag:
assert sizes_tag.is_same_size_tensor(sizes)
tag = self._dims[axis]
assert tag.is_dynamic()
if tag.is_same_size_tensor(sizes):
pass
elif (tag.dyn_size is None):
if sizes_tag:
assert sizes_tag.is_same_size_tensor(sizes)
tag = sizes_tag
else:
tag = tag.set_tag_on_size_tensor(sizes, batch=self.batch)
else:
assert sizes_tag, ('%s: assign dyn sizes %s without defined dim tag' % (self, sizes))
tag = sizes_tag
if self.batch:
tag = tag.get_for_batch_ctx(batch=self.batch, ctx=self.control_flow_ctx)
if (tag is not self._dims[axis]):
self._dims = ((self._dims[:axis] + (tag,)) + self._dims[(axis + 1):])
if (tag.dyn_size is None):
tag.dyn_size = sizes
def get_dynamic_axes(self):
return [axis for (axis, dim) in enumerate(self.batch_shape) if ((axis != self.batch_dim_axis) and (dim is None))]
def get_static_axes(self):
return [axis for (axis, dim) in enumerate(self.batch_shape) if ((axis != self.batch_dim_axis) and (dim is not None))]
def mark_same_time(self, tags, must_match=False):
if isinstance(tags, Dim):
tags = {tags}
assert all((isinstance(tag, Dim) for tag in tags))
for (axis, dim_tag) in enumerate(self.dim_tags):
if (dim_tag in tags):
self.time_dim_axis = axis
return True
if must_match:
raise Exception(('%s mark_same_time: %s not found' % (self, tags)))
return False
def is_same_time_dim(self, other: Tensor) -> bool:
assert self.have_time_axis()
if (not other.have_time_axis()):
return False
tag_self = self.get_dim_tag(self.time_dim_axis)
tag_other = other.get_dim_tag(other.time_dim_axis)
return (tag_self == tag_other)
def get_sequence_lengths(self) -> _t.RawTensorType:
assert (self.time_dim_axis is not None)
dim = self._dims[self.time_dim_axis]
assert isinstance(dim, Dim)
if dim.dyn_size_ext:
if (dim.dyn_size_ext.raw_tensor is None):
dim.complete_dyn_size()
assert (dim.dyn_size_ext.raw_tensor is not None)
return dim.dyn_size_ext.raw_tensor
assert (self.batch_shape[self.time_dim_axis] is not None)
assert (self.batch_dim_axis is not None)
batch_dim_ = self._dims[self.batch_dim_axis]
assert isinstance(batch_dim_, Dim)
if (batch_dim_.dyn_size_ext and (batch_dim_.dyn_size_ext.raw_tensor is not None)):
backend = batch_dim_.dyn_size_ext._raw_backend
return backend.fill_raw([batch_dim_.dyn_size_ext.raw_tensor], dim.size)
import tensorflow as tf
return tf.fill([self.get_batch_dim()], dim.size)
def get_sequence_mask(self):
from returnn.frontend._backend import get_backend_by_raw_tensor_type
assert (self.time_dim_axis is not None)
assert (self.batch_dim_axis is not None)
dyn_seq_len = self.get_sequence_lengths()
backend = get_backend_by_raw_tensor_type(type(dyn_seq_len))
if self.is_time_major:
assert (self.batch_dim_axis == 1)
return backend.sequence_mask_raw(dyn_seq_len, batch_major=False)
else:
assert (self.batch_dim_axis == 0)
assert (self.time_dim_axis == 1)
return backend.sequence_mask_raw(dyn_seq_len, batch_major=True)
def get_sequence_mask_broadcast(self: Tensor, axis=None) -> _t.RawTensorType:
if isinstance(axis, Dim):
axis = self.get_axis_from_description(axis)
if (axis is None):
assert (self.time_dim_axis is not None)
axis = self.time_dim_axis
if (axis < 0):
assert ((axis + self.batch_ndim) > 0)
axis += self.batch_ndim
assert (0 <= axis < self.batch_ndim)
assert (axis != self.batch_dim_axis)
tag: Dim = self.dim_tags[axis]
assert (tag.dyn_size_ext and (tag.dyn_size_ext.raw_tensor is not None))
backend = tag.dyn_size_ext._raw_backend
assert set(tag.dyn_size_ext.dim_tags).issubset(self.dim_tags)
with backend.name_scope_raw('get_sequence_mask_broadcast'):
if (backend.have_sequence_mask_raw() and tag.dyn_size_ext.have_batch_axis() and (tag.dyn_size_ext.batch_ndim == 1)):
size = tag.dyn_size
seq_mask = backend.sequence_mask_raw(size, batch_major=(axis >= self.batch_dim_axis))
shape = ([1] * self.batch_ndim)
shape[self.batch_dim_axis] = self.get_batch_dim()
shape[axis] = tag.get_dim_value()
seq_mask = backend.reshape_raw(seq_mask, shape)
assert (seq_mask.get_shape().ndims == self.batch_ndim)
else:
seq_mask = self.get_sequence_mask_tensor(axis).copy_compatible_to_dims_raw(self.dims)
return seq_mask
def get_sequence_mask_tensor(self: Tensor, axis: int) -> Tensor:
if (axis < 0):
assert ((axis + self.batch_ndim) > 0)
axis += self.batch_ndim
assert (0 <= axis < self.batch_ndim)
assert (axis != self.batch_dim_axis)
tag: Dim = self.dim_tags[axis]
return tag.get_mask(dim_order=self.dims, device=self.device)
def get_sequence_lengths_broadcast(self, axis=None):
if (axis is None):
assert (self.time_dim_axis is not None)
axis = self.time_dim_axis
if (axis < 0):
assert ((axis + self.batch_ndim) > 0)
axis += self.batch_ndim
assert (0 <= axis < self.batch_ndim)
assert (axis != self.batch_dim_axis)
tag = self.dim_tags[axis]
assert tag.dyn_size_ext
return tag.dyn_size_ext.copy_compatible_to(self, check_dtype=False, check_sparse=False).placeholder
def num_elements(self: Tensor) -> Union[(int, Tensor)]:
import returnn.frontend as rf
return rf.num_elements_of_shape(self.dims)
def copy_masked(self: Tensor, mask_value: Union[(Tensor, float, int, _t.RawTensorType)], *, dims: Optional[Sequence[Union[(Dim, int)]]]=None, allow_int: bool=NotSpecified) -> Tensor:
assert (self.raw_tensor is not None)
if (dims is None):
axes = range(self.batch_ndim)
else:
axes = [self.get_axis_from_description(dim, allow_int=allow_int) for dim in dims]
assert (len(set(axes)) == len(dims)), f'{self} copy_masked, dims {dims} not unique, axes {axes}'
axes_ = []
for axis in axes:
tag: Dim = self.dims[axis]
if (not tag.need_masking()):
continue
if set(tag.dyn_size_ext.dim_tags).issubset(self.dim_tags):
axes_.append(axis)
axes = axes_
if (not axes):
return self.copy()
use_padding_info = False
tf_util = None
if self._raw_backend.is_tensorflow:
import returnn.tf.util.basic as tf_util
use_padding_info = isinstance(mask_value, (int, float))
if use_padding_info:
d = tf_util.get_padding_info_dict_ref(self.raw_tensor)
existing_pad_values = [d.get(self.dim_tags[axis]) for axis in axes]
if (set(existing_pad_values) == {mask_value}):
return self.copy()
import returnn.frontend as rf
mask = None
for axis in axes:
mask_ = self._dims[axis].get_mask(dim_order=self.dims, device=self.device)
mask = (rf.logical_and(mask, mask_) if (mask is not None) else mask_)
assert isinstance(mask, _t.Tensor)
res = rf.where(mask, self, mask_value)
if use_padding_info:
d = tf_util.get_padding_info_dict_ref(res.raw_tensor)
d.clear()
d.update({self.dim_tags[axis]: mask_value for axis in axes})
return res
def get_batch_dim(self) -> Union[(_t.RawTensorType, int)]:
assert (self.batch_dim_axis is not None)
if self.batch:
if self.beam:
assert (self.batch.beam == self.beam)
dim = self.batch.dim
if (not isinstance(dim, int)):
batch_dim_ = self.dim_tags[self.batch_dim_axis]
batch_dim_.set_tag_on_size_tensor(dim, batch=self.batch)
return dim
from returnn.tf.layers.base import LayerBase
batch = LayerBase.get_recent_layer().get_batch_info()
batch = batch.copy_set_beam(self.beam)
return batch.dim
def get_batch_dim_tag(self):
assert self.have_batch_axis()
return self.dim_tags[self.batch_dim_axis]
def get_static_batch_dim(self):
if self.batch:
return self.batch.static_dim
if self.have_batch_axis():
return self.get_batch_dim_tag().dimension
return None
def get_spatial_batch_axes(self):
return [axis for axis in range(self.batch_ndim) if ((axis != self.batch_dim_axis) and ((axis != self.feature_dim_axis) or (axis == self.time_dim_axis) or (self.batch_shape[axis] is None)))]
def get_spatial_axes(self):
return [self.get_batch_axis_excluding_batch(axis) for axis in self.get_spatial_batch_axes()]
def get_feature_batch_axes(self):
if (self.feature_dim_axis is not None):
return [self.feature_dim_axis]
return []
def get_feature_axes(self):
return [self.get_batch_axis_excluding_batch(axis) for axis in self.get_feature_batch_axes()]
SpecialAxesNames = ('time_dim_axis', 'feature_dim_axis')
def get_special_axes_dict(self, counted_with_batch_dim=True, only_available=False):
axes = list(self.SpecialAxesNames)
d = {k: getattr(self, k) for k in axes}
if (not counted_with_batch_dim):
d = {k: (self.get_batch_axis_excluding_batch(v) if (v is not None) else None) for (k, v) in d.items()}
if only_available:
d = {k: v for (k, v) in d.items() if (v is not None)}
if (self.feature_dim_axis_or_unspecified is NotSpecified):
d.pop('feature_dim_axis', None)
return d
def get_bc_spatial_batch_shape(self):
dyn_axes = self.get_spatial_batch_axes()
if (self.batch_dim_axis is not None):
dyn_axes += [self.batch_dim_axis]
return tuple([(1 if (axis in dyn_axes) else dim) for (axis, dim) in enumerate(self.batch_shape)])
def get_bc_shape(self, opts=None):
if (opts is None):
opts = {}
default_axes_map = dict(enumerate(self.get_bc_spatial_batch_shape()))
axes_map = {}
for (key, value) in opts.items():
assert (value in ((- 1), 1, 'x', None)), ('%r get_bc_shape: invalid value in opts %r' % (self, opts))
if (value == 'x'):
value = 1
if (value == (- 1)):
value = None
key_axes = self.get_axes_from_description(key)
for key_axis in key_axes:
assert (key_axis not in axes_map), ('%r get_bc_shape: axis %i is defined multiple times in opts %r' % (self, key_axis, opts))
assert (0 <= key_axis < self.batch_ndim), ('%r get_bc_shape: invalid axis %i in opts %r' % (self, key_axis, opts))
(axes_map if (key != '*') else default_axes_map)[key_axis] = (self.batch_shape[key_axis] if (value is None) else value)
remaining_axes = sorted(set(range(self.batch_ndim)).difference(axes_map.keys()))
for axis in remaining_axes:
axes_map[axis] = default_axes_map[axis]
assert (sorted(axes_map.keys()) == list(range(self.batch_ndim)))
return tuple([axes_map[i] for i in range(self.batch_ndim)])
def get_scope_name(self):
if (self.placeholder is not None):
return os.path.dirname(self.placeholder.name)
if self.size_placeholder:
for (i, v) in sorted(self.size_placeholder.items()):
if (v is not None):
return os.path.dirname(v.name)
return None
def get_full_name(self):
scope_name = self.get_scope_name()
if scope_name:
return ('%s/%s' % (scope_name, self.name))
return self.name
def get_dim_tag(self, axis):
return self._dims[axis]
def get_time_dim_tag(self):
assert (self.time_dim_axis is not None)
return self.get_dim_tag(self.time_dim_axis)
def get_dyn_size_tags(self):
return [dim_tag for dim_tag in self._dims if dim_tag.is_dynamic_seq_length()]
def get_size_dim_tag(self, number):
dyn_size_tags = self.get_dyn_size_tags()
return dyn_size_tags[number]
def get_batch_shape_dim_tags(self):
return self.dim_tags
def get_common_data(cls, sources: List[Tensor], ignore_feature_dim=False, allow_broadcast_all_sources=NotSpecified, name=None) -> Optional[Tensor]:
from returnn.util import BehaviorVersion
if (not sources):
return None
assert sources
if (len(sources) == 1):
return sources[0].copy_template()
max_ndim = max([s.batch_ndim for s in sources])
if any((src.batch for src in sources)):
from returnn.tf.util.data import BatchInfo
common_batch = BatchInfo.get_common_batch_info([src.batch for src in sources if src.batch])
else:
common_batch = None
common = [s for s in sources if (s.batch_ndim == max_ndim)][0]
common = common.copy_template(name=name)
common.beam = None
if common_batch:
common.batch = common_batch.copy_set_beam(None)
if any([s.beam for s in sources]):
from returnn.tf.util.data import SearchBeam
common.beam = SearchBeam.get_combined_beam(*[s.beam for s in sources])
is_equal_opts = dict(ignore_feature_dim=ignore_feature_dim, treat_feature_as_spatial=True, allow_same_spatial_dim=True, undefined_matches=True, derived_matches=True)
if (BehaviorVersion.get() < 11):
is_equal_opts['broadcast_matches'] = True
(all_dim_tags, tags_dict) = Dim.get_all_dimension_tags(sources, is_equal_opts=is_equal_opts)
for dim_tag in all_dim_tags:
common_tag = Dim.get_existing_tag_from_collection(dim_tag, common.dim_tags, is_equal_opts=is_equal_opts)
if common_tag:
if (dim_tag != common_tag):
axis = common.dim_tags.index(common_tag)
common = common.copy_template_replace_dim_tag(axis=axis, new_dim_tag=dim_tag)
else:
axis = common.get_default_new_axis_for_dim_tag(dim_tag)
common = common.copy_add_dim_by_tag(dim_tag, unbroadcast=True, axis=axis)
if all(((s.batch_ndim < common.batch_ndim) for s in sources)):
from returnn.util.basic import validate_broadcast_all_sources
validate_broadcast_all_sources(allow_broadcast_all_sources=allow_broadcast_all_sources, inputs=sources, common=common)
return common
def find_matching_dims(self: Tensor, dim_tag: Dim, is_equal_opts) -> List[int]:
return [axis for axis in range(self.batch_ndim) if self.get_dim_tag(axis).is_equal(dim_tag, **is_equal_opts)]
def find_matching_dim_map(self: Tensor, other: Tensor, other_axes, is_equal_opts=None) -> Dict[(int, int)]:
if (is_equal_opts is None):
is_equal_opts = dict(allow_same_feature_dim=True, allow_same_spatial_dim=True, treat_feature_as_spatial=True)
def map_other_axis_to_self(other_axis: int, taken_self_axes: Set[int]) -> int:
other_axis_dim_tag = other.dims[other_axis]
is_equal_opts_ = None
matching = None
for opt in [{}, is_equal_opts, 'broadcast_matches', 'unknown_spatial_matches']:
if isinstance(opt, dict):
is_equal_opts_ = opt.copy()
elif isinstance(opt, str):
if (opt in is_equal_opts_):
continue
is_equal_opts_[opt] = True
matching = [self_axis for self_axis in self.find_matching_dims(other_axis_dim_tag, is_equal_opts_) if (self_axis not in taken_self_axes)]
if (opt == 'unknown_spatial_matches'):
assert (len(matching) <= 1), ('cannot match axes %s from %s to %s, failed at other %s, not unique after %s' % (other_axes, other, self, other_axis, opt))
if matching:
break
assert matching, ('cannot match the axes %s from %s to %s. Failing at axis %s, tag %s' % (other_axes, other, self, other_axis, other.dim_tags[other_axis]))
if (len(matching) == 1):
return matching[0]
max_match_priority = max((dim.match_priority for dim in self.dims))
return max(matching, key=(lambda ax: ((max_match_priority + 1) if (self.dims[ax] is other_axis_dim_tag) else self.dims[ax].match_priority)))
other_to_self_mapping = {}
for axis in other_axes:
other_to_self_mapping[axis] = map_other_axis_to_self(axis, set(other_to_self_mapping.values()))
assert (len(other_to_self_mapping) == len(other_axes)), 'other_axes may not contain duplicates'
return other_to_self_mapping
def is_valid_in_current_graph(self: _t.Tensor) -> bool:
if (self._raw_tensor is None):
return True
return self._raw_backend.is_valid_in_current_graph(self)
def mark_as_loss(self: Tensor, name: str, *, scale: Optional[float]=1.0, as_error: bool=False, use_normalized_loss: bool=False, use_flatten_frames: bool=True, custom_inv_norm_factor: Optional[Tensor]=None) -> None:
import returnn.frontend as rf
rf.get_run_ctx().mark_as_loss(loss=self, name=name, scale=scale, as_error=as_error, use_normalized_loss=use_normalized_loss, use_flatten_frames=use_flatten_frames, custom_inv_norm_factor=custom_inv_norm_factor)
def mark_as_output(self: Tensor, name: str, *, shape: Optional[Sequence[Dim]]=None) -> None:
import returnn.frontend as rf
rf.get_run_ctx().mark_as_output(self, name=name, dims=shape)
def mark_as_default_output(self: Tensor, *, shape: Optional[Sequence[Dim]]=None) -> None:
import returnn.frontend as rf
rf.get_run_ctx().mark_as_default_output(self, shape=shape) |
def procedure(xloader, teacher, network, criterion, scheduler, optimizer, mode, config, extra_info, print_freq, logger):
(data_time, batch_time, losses, top1, top5) = (AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter())
(Ttop1, Ttop5) = (AverageMeter(), AverageMeter())
if (mode == 'train'):
network.train()
elif (mode == 'valid'):
network.eval()
else:
raise ValueError('The mode is not right : {:}'.format(mode))
teacher.eval()
logger.log('[{:5s}] config :: auxiliary={:}, KD :: [alpha={:.2f}, temperature={:.2f}]'.format(mode, (config.auxiliary if hasattr(config, 'auxiliary') else (- 1)), config.KD_alpha, config.KD_temperature))
end = time.time()
for (i, (inputs, targets)) in enumerate(xloader):
if (mode == 'train'):
scheduler.update(None, ((1.0 * i) / len(xloader)))
data_time.update((time.time() - end))
targets = targets.cuda(non_blocking=True)
if (mode == 'train'):
optimizer.zero_grad()
(student_f, logits) = network(inputs)
if isinstance(logits, list):
assert (len(logits) == 2), 'logits must has {:} items instead of {:}'.format(2, len(logits))
(logits, logits_aux) = logits
else:
(logits, logits_aux) = (logits, None)
with torch.no_grad():
(teacher_f, teacher_logits) = teacher(inputs)
loss = loss_KD_fn(criterion, logits, teacher_logits, student_f, teacher_f, targets, config.KD_alpha, config.KD_temperature)
if ((config is not None) and hasattr(config, 'auxiliary') and (config.auxiliary > 0)):
loss_aux = criterion(logits_aux, targets)
loss += (config.auxiliary * loss_aux)
if (mode == 'train'):
loss.backward()
optimizer.step()
(sprec1, sprec5) = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(sprec1.item(), inputs.size(0))
top5.update(sprec5.item(), inputs.size(0))
(tprec1, tprec5) = obtain_accuracy(teacher_logits.data, targets.data, topk=(1, 5))
Ttop1.update(tprec1.item(), inputs.size(0))
Ttop5.update(tprec5.item(), inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
if (((i % print_freq) == 0) or ((i + 1) == len(xloader))):
Sstr = ((' {:5s} '.format(mode.upper()) + time_string()) + ' [{:}][{:03d}/{:03d}]'.format(extra_info, i, len(xloader)))
if (scheduler is not None):
Sstr += ' {:}'.format(scheduler.get_min_info())
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Lstr = 'Loss {loss.val:.3f} ({loss.avg:.3f}) {top1.val:.2f} ({top1.avg:.2f}) {top5.val:.2f} ({top5.avg:.2f})'.format(loss=losses, top1=top1, top5=top5)
Lstr += ' Teacher : ={:.2f}, ={:.2f}'.format(Ttop1.avg, Ttop5.avg)
Istr = 'Size={:}'.format(list(inputs.size()))
logger.log(((((((Sstr + ' ') + Tstr) + ' ') + Lstr) + ' ') + Istr))
logger.log(' **{:5s}** accuracy drop :: ={:.2f}, ={:.2f}'.format(mode.upper(), (Ttop1.avg - top1.avg), (Ttop5.avg - top5.avg)))
logger.log(' **{mode:5s}** {top1.avg:.2f} {top5.avg:.2f} {error1:.2f} {error5:.2f} Loss:{loss:.3f}'.format(mode=mode.upper(), top1=top1, top5=top5, error1=(100 - top1.avg), error5=(100 - top5.avg), loss=losses.avg))
return (losses.avg, top1.avg, top5.avg) |
def test_encoder():
img_feat = torch.randn(4, 36, 2048)
seq_size = 20
ques = torch.randperm(seq_size).view(1, seq_size)
ques = ques.unsqueeze(1).repeat(4, 10, 1)
ques_len = torch.LongTensor([6, 5, 4, 3]).unsqueeze(1).repeat(1, 10) |
def test_unmatched_lengths_2d_np_array():
y_true = np.array([[1, 2, 3], [1, 2, 4], [1, 5, 6], [1, 5, 8]])
y_pred = np.array([[1, 2, 3], [1, 2, 4]])
with pytest.raises(AssertionError):
precision(y_true, y_pred) |
class CarlaEngine():
def __init__(self, config, traffic_manager, carla_observers):
self._running = False
self.config = config
self._traffic_manager = traffic_manager
self._carla_process = None
self._carla_simulation = None
self._carla_sensors = []
self._carla_actors = {}
self._carla_observers = carla_observers
self.world = None
def reset(self, scenario):
if (not self._running):
def is_used(port):
return (port in [conn.laddr.port for conn in psutil.net_connections()])
server_port = random.randint(15000, 32000)
while (is_used(server_port) or is_used((server_port + 1))):
server_port += 2
carla_executable = self.config.simulation.carla_path
if self.config.rendering.human_mode:
carla_start_cmd = [carla_executable, '-windowed', '-ResX={}'.format(600), '-ResY={}'.format(600)]
else:
carla_start_cmd = [carla_executable, '-RenderOffScreen']
carla_start_cmd += [f'--carla-rpc-port={server_port}', '-quality-level=Epic']
if (os.name != 'nt'):
self._carla_process = subprocess.Popen(carla_start_cmd, shell=True, preexec_fn=os.setsid, stdout=open(os.devnull, 'w'))
else:
self._carla_process = subprocess.Popen(carla_start_cmd)
time.sleep(10)
self._carla_simulation = CarlaSimulation('127.0.0.1', server_port, self.config.simulation.dt)
BridgeHelper.blueprint_library = self._carla_simulation.world.get_blueprint_library()
self._running = True
for x in self._carla_sensors:
x.destroy()
for (k, v) in self._carla_actors.items():
self._carla_simulation.destroy_actor(v)
self._carla_simulation.tick()
self._carla_actors = {}
self._carla_sensors = []
BridgeHelper.offset = scenario.sumo_net.getLocationOffset()
if os.path.exists(scenario.xodr_path):
with open(scenario.xodr_path, encoding='utf-8') as od_file:
try:
data = od_file.read()
except OSError:
print('file could not be read.')
sys.exit()
vertex_distance = 2.0
max_road_length = 500.0
wall_height = 0.0
extra_width = 1.0
self.world = self._carla_simulation.client.generate_opendrive_world(data, carla.OpendriveGenerationParameters(vertex_distance=vertex_distance, max_road_length=max_road_length, wall_height=wall_height, additional_width=extra_width, smooth_junctions=True, enable_mesh_visibility=True))
else:
raise ValueError
settings = self._carla_simulation.world.get_settings()
settings.synchronous_mode = True
settings.fixed_delta_seconds = self.config.simulation.dt
settings.substepping = False
self._carla_simulation.world.apply_settings(settings)
def close(self):
if self._running:
self._carla_simulation.close()
del self._carla_simulation
if (os.name != 'nt'):
os.killpg(os.getpgid(self._carla_process.pid), signal.SIGTERM)
else:
subprocess.call(['taskkill', '/F', '/T', '/PID', str(self._carla_process.pid)])
self._carla_simulation = None
self._carla_process = None
self._carla_actors = {}
self._carla_sensors = []
self._running = False
logging.info('Closed Carla instance...')
def simulationStep(self):
spawned_actors = [x for x in self._traffic_manager.actor_ids if (x not in self._carla_actors.keys())]
destroyed_actors = [x for x in self._carla_actors.keys() if (x not in self._traffic_manager.actor_ids)]
for sumo_actor_id in spawned_actors:
sumo_actor = self._get_sumo_actor(sumo_actor_id)
if (sumo_actor_id == self.config.simulation.egoID):
carla_blueprint = self._carla_simulation.world.get_blueprint_library().find('vehicle.audi.a2')
else:
carla_blueprint = BridgeHelper.get_carla_blueprint(sumo_actor, False)
if (carla_blueprint is not None):
carla_transform = BridgeHelper.get_carla_transform(sumo_actor.transform, sumo_actor.extent)
carla_actor_id = self._carla_simulation.spawn_actor(carla_blueprint, carla_transform)
if (carla_actor_id != INVALID_ACTOR_ID):
self._carla_actors[sumo_actor_id] = carla_actor_id
else:
print('No blueprint')
continue
for sumo_actor_id in destroyed_actors:
if (sumo_actor_id in self._carla_actors):
self._carla_simulation.destroy_actor(self._carla_actors.pop(sumo_actor_id))
for sumo_actor_id in self._carla_actors:
sumo_actor = self._get_sumo_actor(sumo_actor_id)
carla_actor_id = self._carla_actors[sumo_actor_id]
carla_actor = self._carla_simulation.get_actor(carla_actor_id)
carla_transform = BridgeHelper.get_carla_transform(sumo_actor.transform, sumo_actor.extent)
carla_lights = BridgeHelper.get_carla_lights_state(carla_actor.get_light_state(), sumo_actor.signals)
self._carla_simulation.synchronize_vehicle(carla_actor_id, carla_transform, carla_lights)
if (self.config.simulation.egoID in spawned_actors):
for observer in self._carla_observers:
sensor = self._setup_observer(observer)
self._carla_sensors.append(sensor)
elif (self.config.simulation.egoID in destroyed_actors):
for x in self._carla_sensors:
x.destroy()
self._carla_sensors = []
try:
self._carla_simulation.tick()
except Exception:
logging.info('Error ticking Carla instance...')
self._running = False
return True
return False
def _get_sumo_actor(self, actor_id):
actor_state = self._traffic_manager.get_actor_state(actor_id).to_sumo()
type_id = actor_state.veh_type
vclass = SumoActorClass('passenger')
location = actor_state.location
rotation = actor_state.rotation
transform = carla.Transform(carla.Location(location[0], location[1], location[2]), carla.Rotation(rotation[0], rotation[1], rotation[2]))
signals = actor_state.signals
extent = carla.Vector3D(*actor_state.extent)
return SumoActor(type_id, vclass, transform, signals, extent, color)
def _setup_observer(self, observer: CarlaCameraObserver):
if (observer.sensor_type == CarlaSensor.RGBCamera):
bp_name = 'sensor.camera.rgb'
elif (observer.sensor_type == CarlaSensor.DepthCamera):
bp_name = 'sensor.camera.depth'
else:
raise NotImplementedError
camera_bp = self._carla_simulation.world.get_blueprint_library().find(bp_name)
camera_bp.set_attribute('image_size_x', str(observer.width))
camera_bp.set_attribute('image_size_y', str(observer.height))
camera_bp.set_attribute('sensor_tick', '0.0')
for (k, v) in observer.attributes.items():
camera_bp.set_attribute(k, str(v))
carla_actor = self._carla_simulation.get_actor(self._carla_actors[self.config.simulation.egoID])
loc = observer.location
rot = observer.rotation
camera_transform = carla.Transform(carla.Location(loc[0], loc[1], loc[2]), carla.Rotation(rot[0], rot[1], rot[2]))
camera = self._carla_simulation.world.spawn_actor(camera_bp, camera_transform, attach_to=carla_actor, attachment_type=carla.AttachmentType.Rigid)
camera.listen(observer.listen)
return camera |
class TestSamplerDeterministic(unittest.TestCase):
def test_to_iterable(self):
sampler = TrainingSampler(100, seed=10)
dataset = DatasetFromList(list(range(100)))
dataset = ToIterableDataset(dataset, sampler)
data_loader = data.DataLoader(dataset, num_workers=0, collate_fn=operator.itemgetter(0))
output = list(itertools.islice(data_loader, 100))
self.assertEqual(set(output), set(range(100)))
data_loader = data.DataLoader(dataset, num_workers=2, collate_fn=operator.itemgetter(0), worker_init_fn=worker_init_reset_seed)
output = list(itertools.islice(data_loader, 100))
self.assertEqual(set(output), set(range(100)))
def test_training_sampler_seed(self):
seed_all_rng(42)
sampler = TrainingSampler(30)
data = list(itertools.islice(sampler, 65))
seed_all_rng(42)
sampler = TrainingSampler(30)
seed_all_rng(999)
data2 = list(itertools.islice(sampler, 65))
self.assertEqual(data, data2) |
class GELU_SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(GELU_SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.gelu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
.openapi_version('3.0')
.operations('success')
def test_process_call_kwargs(testdir, cli, cli_args, mocker, app_type):
module = testdir.make_importable_pyfile(hook='\nimport schemathesis\nimport requests\n\\ndef process_call_kwargs(context, case, kwargs):\n if case.app is not None:\n kwargs["follow_redirects"] = False\n else:\n kwargs["allow_redirects"] = False\n ')
if (app_type == 'real'):
spy = mocker.spy(requests.Session, 'request')
else:
spy = mocker.spy(werkzeug.Client, 'open')
result = cli.main('run', *cli_args, hooks=module.purebasename)
assert (result.exit_code == ExitCode.OK), result.stdout
if (app_type == 'real'):
assert (spy.call_args[1]['allow_redirects'] is False)
else:
assert (spy.call_args[1]['follow_redirects'] is False) |
(('%s.visualize_utils.mmcv' % __name__))
def test_show_pred_gt(mock_mmcv):
preds = [[0, 0, 1, 0, 1, 1, 0, 1]]
gts = [[0, 0, 1, 0, 1, 1, 0, 1]]
show = True
win_name = 'test'
wait_time = 0
out_file = tempfile.NamedTemporaryFile().name
with pytest.raises(AssertionError):
visualize_utils.show_pred_gt(np.array([]), gts)
with pytest.raises(AssertionError):
visualize_utils.show_pred_gt(preds, np.array([]))
visualize_utils.show_pred_gt(preds, gts, show, win_name, wait_time, out_file)
mock_mmcv.imshow.assert_called_once()
mock_mmcv.imwrite.assert_called_once() |
class ResNet_context(nn.Module):
def __init__(self, num_classes, disable_self_attn, pretrained):
self.num_ch_enc = np.array([128, 256, 512, 1024, 2048])
self.disable_self_attn = disable_self_attn
super(ResNet_context, self).__init__()
self.basedir = os.path.dirname(os.path.abspath(__file__))
self.resnet_model = ResNet(Bottleneck, [3, 4, 23, 3])
if pretrained:
pretrained_weights = torch.load(os.path.join(self.basedir, '../splits/resnet101-imagenet.pth'))
model_dict = self.resnet_model.state_dict()
self.resnet_model.load_state_dict({k: v for (k, v) in pretrained_weights.items() if (k in model_dict)})
self.context = nn.Sequential(nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1), ABN_module(512), ASP_OC_Module(512, 256, disable_self_attn=self.disable_self_attn))
self.cls = nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
self.dsn = nn.Sequential(nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), ABN_module(512), nn.Dropout2d(0.1), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
def forward(self, x):
all_features = self.resnet_model(x)
all_features[(- 1)] = self.context(all_features[(- 1)])
all_features[(- 1)] = self.cls(all_features[(- 1)])
return all_features |
def level__Tornaria(self):
return self.base_ring()(((abs(self.disc()) / self.omega()) / (self.content() ** self.dim()))) |
def dump_raw_data(contents, file_path):
with open(file_path, 'w') as ouf:
writer = csv.writer(ouf, delimiter='\t', quotechar='"')
for line in contents:
writer.writerow(line) |
def get_server_partition_dataset(data_path, data_name, part_id):
part_name = os.path.join(data_name, ('partition_' + str(part_id)))
path = os.path.join(data_path, part_name)
if (not os.path.exists(path)):
print('Partition file not found.')
exit()
train_path = os.path.join(path, 'train.txt')
local2global_path = os.path.join(path, 'local_to_global.txt')
relation_path = os.path.join(path, 'relation_count.txt')
dataset = PartitionKGDataset(relation_path, train_path, local2global_path, read_triple=False)
n_entities = _file_line(os.path.join(path, 'partition_book.txt'))
local_to_global = []
with open(local2global_path) as f:
for line in f:
local_to_global.append(int(line))
global_to_local = ([0] * n_entities)
for i in range(len(local_to_global)):
global_id = local_to_global[i]
global_to_local[global_id] = i
local_to_global = None
return (global_to_local, dataset) |
def advect():
for i in range(n_tracer):
p = tracer[i]
v1 = compute_u_full(p)
v2 = compute_u_full((p + ((v1 * dt) * 0.5)))
v3 = compute_u_full((p + ((v2 * dt) * 0.75)))
tracer[i] += (((((2 / 9) * v1) + ((1 / 3) * v2)) + ((4 / 9) * v3)) * dt) |
def test_superb_er():
with tempfile.TemporaryDirectory() as tempdir:
with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples):
class TestER(SuperbER):
def default_config(self) -> dict:
config = super().default_config()
config['prepare_data'] = {}
return config
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
ids = [Path(path).stem for path in wav_paths]
labels = ['a', 'b', 'a', 'c', 'd']
start_secs = [0.0, 0.1, 0.2, None, 0.0]
end_secs = [5.2, 1.0, 0.3, None, 4.9]
df = pd.DataFrame(data={'id': ids, 'wav_path': wav_paths, 'label': labels, 'start_sec': start_secs, 'end_sec': end_secs})
train_csv = (target_dir / 'train.csv')
valid_csv = (target_dir / 'valid.csv')
test_csv = (target_dir / 'test.csv')
df.to_csv(train_csv)
df.to_csv(valid_csv)
df.to_csv(test_csv)
return (train_csv, valid_csv, [test_csv])
problem = TestER()
config = problem.default_config()
config['target_dir'] = tempdir
config['device'] = 'cpu'
config['train']['total_steps'] = 4
config['train']['log_step'] = 1
config['train']['eval_step'] = 2
config['train']['save_step'] = 2
config['eval_batch'] = 2
config['build_upstream']['name'] = 'fbank'
problem.run(**config) |
class DenseLayer(nn.Module):
def __init__(self, num_input_features, growth_rate, bn_size, norm_layer=BatchNormAct2d, drop_rate=0.0, memory_efficient=False):
super(DenseLayer, self).__init__()
(self.add_module('norm1', norm_layer(num_input_features)),)
(self.add_module('conv1', nn.Conv2d(num_input_features, (bn_size * growth_rate), kernel_size=1, stride=1, bias=False)),)
(self.add_module('norm2', norm_layer((bn_size * growth_rate))),)
(self.add_module('conv2', nn.Conv2d((bn_size * growth_rate), growth_rate, kernel_size=3, stride=1, padding=1, bias=False)),)
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient
def bottleneck_fn(self, xs):
concated_features = torch.cat(xs, 1)
bottleneck_output = self.conv1(self.norm1(concated_features))
return bottleneck_output
def any_requires_grad(self, x):
for tensor in x:
if tensor.requires_grad:
return True
return False
.unused
def call_checkpoint_bottleneck(self, x):
def closure(*xs):
return self.bottleneck_fn(xs)
return cp.checkpoint(closure, *x)
._overload_method
def forward(self, x):
pass
._overload_method
def forward(self, x):
pass
def forward(self, x):
if isinstance(x, torch.Tensor):
prev_features = [x]
else:
prev_features = x
if (self.memory_efficient and self.any_requires_grad(prev_features)):
if torch.jit.is_scripting():
raise Exception('Memory Efficient not supported in JIT')
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bottleneck_fn(prev_features)
new_features = self.conv2(self.norm2(bottleneck_output))
if (self.drop_rate > 0):
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return new_features |
.gpu
def test_pythonmode():
def runs_on_gpu(a: (dace.float64[20] StorageType.GPU_Global), b: (dace.float64[20] StorageType.GPU_Global)):
for i in (dace.map[0:20] ScheduleType.GPU_Device):
b[i] = (a[i] + 1.0)
gpu_a = cupy.random.rand(20)
gpu_b = cupy.random.rand(20)
runs_on_gpu(gpu_a, gpu_b)
assert cupy.allclose(gpu_b, (gpu_a + 1)) |
def extend_phyche_index(original_index, extend_index):
if (0 == len(extend_index)):
return original_index
for key in list(original_index.keys()):
original_index[key].extend(extend_index[key])
return original_index |
class ResnetBlock(nn.Module):
def __init__(self, dim, kernel_size=1, padding_type='zero', norm_layer=nn.BatchNorm2d, use_dropout=False, use_bias=True, act=None):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, kernel_size, padding_type, norm_layer, use_dropout, use_bias, act)
def build_conv_block(self, dim, kernel_size, padding_type, norm_layer, use_dropout, use_bias, act=nn.GELU()):
conv_block = []
p = 0
if (padding_type == 'reflect'):
conv_block += [nn.ReflectionPad2d(1)]
elif (padding_type == 'replicate'):
conv_block += [nn.ReplicationPad2d(1)]
elif (padding_type == 'zero'):
p = 0
else:
raise NotImplementedError(('padding [%s] is not implemented' % padding_type))
conv_block += [nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=p, bias=use_bias)]
if norm_layer:
conv_block += [norm_layer(dim, momentum=0.1)]
if act:
conv_block += [act]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if (padding_type == 'reflect'):
conv_block += [nn.ReflectionPad2d(1)]
elif (padding_type == 'replicate'):
conv_block += [nn.ReplicationPad2d(1)]
elif (padding_type == 'zero'):
p = 0
else:
raise NotImplementedError(('padding [%s] is not implemented' % padding_type))
conv_block += [nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=p, bias=use_bias)]
if norm_layer:
conv_block += [norm_layer(dim, momentum=0.1)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = (x + self.conv_block(x))
return out |
def bert_large_uncased_whole_word_maskings_384_4p_bw12_async_pipedream():
return dict(model_type='bert_squad', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': True, 'return_dict': False}, do_resize_token_embedding=False) |
class FrameStack(gym.Wrapper):
def __init__(self, env, n_frames):
super().__init__(env)
self.n_frames = n_frames
self.frames = deque([], maxlen=n_frames)
shape = ((n_frames,) + env.observation_space.shape)
self.observation_space = gym.spaces.Box(low=np.min(env.observation_space.low), high=np.max(env.observation_space.high), shape=shape, dtype=env.observation_space.dtype)
def reset(self):
obs = self.env.reset()
for _ in range(self.n_frames):
self.frames.append(obs)
return self._get_ob()
def step(self, action):
(obs, reward, done, info) = self.env.step(action)
self.frames.append(obs)
return (self._get_ob(), reward, done, info)
def _get_ob(self):
return np.stack(self.frames, axis=0) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.