code stringlengths 101 5.91M |
|---|
def set_memory_limit_in_bytes(parser, args, component):
param = (component + '_memory_limit')
limit = getattr(args, param)
if (limit is not None):
setattr(args, param, _get_memory_limit_in_bytes(limit, parser)) |
def setup(handler):
handler._logger.print('Force loading the entire cache: images, depths and cameras')
loader = handler._data_loader
adapter = loader.adapter
elements = [*adapter.split['train'][:loader.split_limits['train']], *adapter.split['test'][:loader.split_limits['test']]]
nr_views = adapter.nr_views
with Timer(message='Cache loading', logger=handler._logger):
for (el_idx, element) in enumerate(elements):
loader.cache.get(adapter.get_element_cameras, (element,))
if ((el_idx % max(1, (len(elements) // 100))) == 0):
handler._logger.print(('Element %d/%d' % (el_idx, len(elements))))
for view in range(nr_views):
loader.cache.get(adapter.get_single_image, (element, view))
loader.cache.get(adapter.get_single_depth_map, (element, view, False))
loader.cache.get(adapter.get_single_depth_map, (element, view, True)) |
def test_issue78():
def _get_identifier(sql):
p = sqlparse.parse(sql)[0]
return p.tokens[2]
results = (('get_name', 'z'), ('get_real_name', 'y'), ('get_parent_name', 'x'), ('get_alias', 'z'), ('get_typecast', 'text'))
variants = ('select x.y::text as z from foo', 'select x.y::text as "z" from foo', 'select x."y"::text as z from foo', 'select x."y"::text as "z" from foo', 'select "x".y::text as z from foo', 'select "x".y::text as "z" from foo', 'select "x"."y"::text as z from foo', 'select "x"."y"::text as "z" from foo')
for variant in variants:
i = _get_identifier(variant)
assert isinstance(i, sql.Identifier)
for (func_name, result) in results:
func = getattr(i, func_name)
assert (func() == result) |
class TrafficSigns(torch.utils.data.Dataset):
def __init__(self, root, train=True, transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.filename = 'traffic_signs_dataset.zip'
self.url = '
fpath = os.path.join(root, self.filename)
if (not os.path.isfile(fpath)):
if (not download):
raise RuntimeError('Dataset not found. You can use download=True to download it')
else:
print(('Downloading from ' + self.url))
self.download()
training_file = 'lab 2 data/train.p'
testing_file = 'lab 2 data/test.p'
if train:
with open(os.path.join(root, training_file), mode='rb') as f:
train = pickle.load(f)
self.data = train['features']
self.labels = train['labels']
else:
with open(os.path.join(root, testing_file), mode='rb') as f:
test = pickle.load(f)
self.data = test['features']
self.labels = test['labels']
self.data = np.transpose(self.data, (0, 3, 1, 2))
def __getitem__(self, index):
(img, target) = (self.data[index], self.labels[index])
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if (self.transform is not None):
img = self.transform(img)
return (img, target)
def __len__(self):
return len(self.data)
def download(self):
import errno
root = os.path.expanduser(self.root)
fpath = os.path.join(root, self.filename)
try:
os.makedirs(root)
except OSError as e:
if (e.errno == errno.EEXIST):
pass
else:
raise
urllib.request.urlretrieve(self.url, fpath)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close() |
def assert_requests_call(case: Case):
with pytest.raises((requests.exceptions.ConnectionError, urllib3.exceptions.NewConnectionError, CheckFailed)):
case.call(base_url=' timeout=0.001) |
class Test_Link_Regression(object):
d = 100
d_out = 10
clip_limits = (0, 1)
def test_ip(self):
(x_src, x_dst) = make_orthonormal_vectors(self.d)
expected = np.dot(x_src, x_dst)
x_src = tf.constant(x_src, shape=(1, self.d), dtype='float64')
x_dst = tf.constant(x_dst, shape=(1, self.d), dtype='float64')
li = link_regression(edge_embedding_method='ip')([x_src, x_dst])
print("link regression with 'ip' operator on orthonormal vectors: {}, expected: {}".format(li, expected))
assert (li.numpy() == pytest.approx(0, abs=1.5e-07))
li = link_regression(edge_embedding_method='ip')([x_src, x_src])
print("link regression with 'ip' operator on unit vector: ", li)
assert (li.numpy() == pytest.approx(1, abs=1.5e-07))
def test_mul_l1_l2_avg(self):
(x_src, x_dst) = make_orthonormal_vectors(self.d)
x_src = x_src.reshape(1, 1, self.d)
x_dst = x_dst.reshape(1, 1, self.d)
inp_src = keras.Input(shape=(1, self.d))
inp_dst = keras.Input(shape=(1, self.d))
for op in ['mul', 'l1', 'l2', 'avg', 'concat']:
out = link_regression(output_dim=self.d_out, edge_embedding_method=op)([inp_src, inp_dst])
li = keras.Model(inputs=[inp_src, inp_dst], outputs=out)
res = li.predict(x=[x_src, x_dst])
print("link regression with '{}' operator: {}".format(op, res.flatten()))
assert (res.shape == (1, self.d_out))
assert isinstance(res.flatten()[0], np.float32)
def test_clip_limits(self):
print('\n Testing clip limits...')
(x_src, x_dst) = make_orthonormal_vectors(self.d)
x_src = x_src.reshape(1, 1, self.d)
x_dst = x_dst.reshape(1, 1, self.d)
inp_src = keras.Input(shape=(1, self.d))
inp_dst = keras.Input(shape=(1, self.d))
for op in ['mul', 'l1', 'l2', 'avg', 'concat']:
out = link_regression(output_dim=self.d_out, edge_embedding_method=op, clip_limits=self.clip_limits)([inp_src, inp_dst])
li = keras.Model(inputs=[inp_src, inp_dst], outputs=out)
res = li.predict(x=[x_src, x_dst])
print("link regression with '{}' operator: {}".format(op, res.flatten()))
assert (res.shape == (1, self.d_out))
assert isinstance(res.flatten()[0], np.float32) |
class Sequential(Model):
def __init__(self, layers=None, name=None):
self.layers = []
self.model = None
self.inputs = []
self.outputs = []
self._trainable = True
self._initial_weights = None
self.inbound_nodes = []
self.outbound_nodes = []
self.built = False
if (not name):
prefix = 'sequential_'
name = (prefix + str(K.get_uid(prefix)))
self.name = name
if layers:
for layer in layers:
self.add(layer)
def add(self, layer):
if (not isinstance(layer, Layer)):
raise TypeError(('The added layer must be an instance of class Layer. Found: ' + str(layer)))
if (not self.outputs):
if (not layer.inbound_nodes):
if (not hasattr(layer, 'batch_input_shape')):
raise ValueError('The first layer in a Sequential model must get an `input_shape` or `batch_input_shape` argument.')
x = Input(batch_shape=layer.batch_input_shape, dtype=layer.dtype, name=(layer.name + '_input'))
layer(x)
if (len(layer.inbound_nodes) != 1):
raise ValueError((((('A layer added to a Sequential model must not already be connected somewhere else. Model received layer ' + layer.name) + ' which has ') + str(len(layer.inbound_nodes))) + ' pre-existing inbound connections.'))
if (len(layer.inbound_nodes[0].output_tensors) != 1):
raise ValueError('All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.')
self.outputs = [layer.inbound_nodes[0].output_tensors[0]]
self.inputs = topology.get_source_inputs(self.outputs[0])
topology.Node(outbound_layer=self, inbound_layers=[], node_indices=[], tensor_indices=[], input_tensors=self.inputs, output_tensors=self.outputs, input_masks=[None for _ in self.inputs], output_masks=[None], input_shapes=[x._keras_shape for x in self.inputs], output_shapes=[self.outputs[0]._keras_shape])
else:
output_tensor = layer(self.outputs[0])
if isinstance(output_tensor, list):
raise TypeError('All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.')
self.outputs = [output_tensor]
self.inbound_nodes[0].output_tensors = self.outputs
self.inbound_nodes[0].output_shapes = [self.outputs[0]._keras_shape]
self.layers.append(layer)
self.built = False
def pop(self):
if (not self.layers):
raise TypeError('There are no layers in the model.')
self.layers.pop()
if (not self.layers):
self.outputs = []
self.inbound_nodes = []
self.outbound_nodes = []
else:
self.layers[(- 1)].outbound_nodes = []
self.outputs = [self.layers[(- 1)].output]
self.inbound_nodes[0].output_tensors = self.outputs
self.inbound_nodes[0].output_shapes = [self.outputs[0]._keras_shape]
self.built = False
def get_layer(self, name=None, index=None):
if (not self.built):
self.build()
return self.model.get_layer(name, index)
def call(self, inputs, mask=None):
if (not self.built):
self.build()
return self.model.call(inputs, mask)
def build(self, input_shape=None):
if ((not self.inputs) or (not self.outputs)):
raise TypeError('Sequential model cannot be built: model is empty. Add some layers first.')
self.model = Model(self.inputs, self.outputs[0], name=(self.name + '_model'))
self.model.trainable = self.trainable
self.supports_masking = self.model.supports_masking
self._output_mask_cache = self.model._output_mask_cache
self._output_tensor_cache = self.model._output_tensor_cache
self._output_shape_cache = self.model._output_shape_cache
self.input_layers = self.model.input_layers
self.input_layers_node_indices = self.model.input_layers_node_indices
self.input_layers_tensor_indices = self.model.input_layers_tensor_indices
self.output_layers = self.model.output_layers
self.output_layers_node_indices = self.model.output_layers_node_indices
self.output_layers_tensor_indices = self.model.output_layers_tensor_indices
self.nodes_by_depth = self.model.nodes_by_depth
self.container_nodes = self.model.container_nodes
self.output_names = self.model.output_names
self.input_names = self.model.input_names
self._feed_input_names = self.model._feed_input_names
self._feed_inputs = self.model._feed_inputs
self.model.callback_model = self
self.built = True
def uses_learning_phase(self):
if (not self.built):
self.build()
return self.model.uses_learning_phase
def _flattened_layers(self):
layers = []
if self.layers:
if isinstance(self.layers[0], legacy_layers.Merge):
merge = self.layers[0]
for layer in merge.layers:
if hasattr(layer, '_flattened_layers'):
for sublayer in layer._flattened_layers:
if (sublayer not in layers):
layers.append(sublayer)
elif hasattr(layer, 'layers'):
for sublayer in layer.layers:
if (sublayer not in layers):
layers.append(sublayer)
elif (layer not in layers):
layers.append(layer)
elif (self.layers[0] not in layers):
layers.append(self.layers[0])
for layer in self.layers[1:]:
if (layer not in layers):
layers.append(layer)
return layers
def _gather_list_attr(self, attr):
all_attrs = []
for layer in self._flattened_layers:
all_attrs += getattr(layer, attr, [])
return all_attrs
def trainable(self):
return self._trainable
def trainable(self, value):
if self.model:
self.model.trainable = value
self._trainable = value
def trainable_weights(self):
if (not self.trainable):
return []
return self._gather_list_attr('trainable_weights')
def non_trainable_weights(self):
weights = self._gather_list_attr('non_trainable_weights')
if (not self.trainable):
trainable_weights = self._gather_list_attr('trainable_weights')
return (trainable_weights + weights)
return weights
def updates(self):
if (not self.built):
self.build()
return self.model.updates
def state_updates(self):
if (not self.built):
self.build()
return self.model.state_updates
def get_updates_for(self, inputs):
if (not self.built):
self.build()
return self.model.get_updates_for(inputs)
def losses(self):
if (not self.built):
self.build()
return self.model.losses
def get_losses_for(self, inputs):
if (not self.built):
self.build()
return self.model.get_losses_for(inputs)
def regularizers(self):
if (not self.built):
self.build()
return self.model.regularizers
def get_weights(self):
if legacy_models.needs_legacy_support(self):
layers = legacy_models.legacy_sequential_layers(self)
weights = []
for layer in layers:
weights.append(layer.get_weights())
return weights
if (not self.built):
self.build()
return self.model.get_weights()
def set_weights(self, weights):
if legacy_models.needs_legacy_support(self):
layers = legacy_models.legacy_sequential_layers(self)
for layer in layers:
nb_param = len(layer.weights)
layer.set_weights(weights[:nb_param])
weights = weights[nb_param:]
if (not self.built):
self.build()
self.model.set_weights(weights)
def load_weights(self, filepath, by_name=False):
if (h5py is None):
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if (('layer_names' not in f.attrs) and ('model_weights' in f)):
f = f['model_weights']
if legacy_models.needs_legacy_support(self):
layers = legacy_models.legacy_sequential_layers(self)
else:
layers = self.layers
if by_name:
topology.load_weights_from_hdf5_group_by_name(f, layers)
else:
topology.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
def save_weights(self, filepath, overwrite=True):
if (h5py is None):
raise ImportError('`save_weights` requires h5py.')
if ((not overwrite) and os.path.isfile(filepath)):
proceed = ask_to_proceed_with_overwrite(filepath)
if (not proceed):
return
if legacy_models.needs_legacy_support(self):
layers = legacy_models.legacy_sequential_layers(self)
else:
layers = self.layers
f = h5py.File(filepath, 'w')
topology.save_weights_to_hdf5_group(f, layers)
f.flush()
f.close()
def compile(self, optimizer, loss, metrics=None, sample_weight_mode=None, weighted_metrics=None, **kwargs):
self.build()
self.model.compile(optimizer, loss, metrics=metrics, sample_weight_mode=sample_weight_mode, weighted_metrics=weighted_metrics, **kwargs)
self.optimizer = self.model.optimizer
self.loss = self.model.loss
self.total_loss = self.model.total_loss
self.loss_weights = self.model.loss_weights
self.metrics = self.model.metrics
self.weighted_metrics = self.model.weighted_metrics
self.metrics_tensors = self.model.metrics_tensors
self.metrics_names = self.model.metrics_names
self.sample_weight_mode = self.model.sample_weight_mode
self.sample_weights = self.model.sample_weights
self.targets = self.model.targets
def fit(self, x, y, batch_size=32, epochs=10, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, **kwargs):
if ('nb_epoch' in kwargs):
warnings.warn('The `nb_epoch` argument in `fit` has been renamed `epochs`.')
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError(('Unrecognized keyword arguments: ' + str(kwargs)))
if (not self.built):
raise RuntimeError('The model needs to be compiled before being used.')
return self.model.fit(x, y, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_split=validation_split, validation_data=validation_data, shuffle=shuffle, class_weight=class_weight, sample_weight=sample_weight, initial_epoch=initial_epoch)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
if (not self.built):
raise RuntimeError('The model needs to be compiled before being used.')
return self.model.evaluate(x, y, batch_size=batch_size, verbose=verbose, sample_weight=sample_weight)
def predict(self, x, batch_size=32, verbose=0):
if (not self.built):
self.build()
return self.model.predict(x, batch_size=batch_size, verbose=verbose)
def predict_on_batch(self, x):
if (not self.built):
self.build()
return self.model.predict_on_batch(x)
def train_on_batch(self, x, y, class_weight=None, sample_weight=None):
if (not self.built):
raise RuntimeError('The model needs to be compiled before being used.')
return self.model.train_on_batch(x, y, sample_weight=sample_weight, class_weight=class_weight)
def test_on_batch(self, x, y, sample_weight=None):
if (not self.built):
raise RuntimeError('The model needs to be compiled before being used.')
return self.model.test_on_batch(x, y, sample_weight=sample_weight)
def predict_proba(self, x, batch_size=32, verbose=1):
preds = self.predict(x, batch_size, verbose)
if ((preds.min() < 0.0) or (preds.max() > 1.0)):
warnings.warn('Network returning invalid probability values. The last layer might not normalize predictions into probabilities (like softmax or sigmoid would).')
return preds
def predict_classes(self, x, batch_size=32, verbose=1):
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
if (proba.shape[(- 1)] > 1):
return proba.argmax(axis=(- 1))
else:
return (proba > 0.5).astype('int32')
_generator_methods_support
def fit_generator(self, generator, steps_per_epoch, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=False, initial_epoch=0):
if (not self.built):
raise RuntimeError('The model needs to be compiled before being used.')
return self.model.fit_generator(generator, steps_per_epoch, epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, class_weight=class_weight, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, shuffle=shuffle, initial_epoch=initial_epoch)
_generator_methods_support
def evaluate_generator(self, generator, steps, max_queue_size=10, workers=1, use_multiprocessing=False):
if (not self.built):
raise RuntimeError('The model needs to be compiled before being used.')
return self.model.evaluate_generator(generator, steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing)
_generator_methods_support
def predict_generator(self, generator, steps, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0):
if (not self.built):
self.build()
return self.model.predict_generator(generator, steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose)
def get_config(self):
if isinstance(self.layers[0], legacy_layers.Merge):
return self.legacy_get_config()
config = []
for layer in self.layers:
config.append({'class_name': layer.__class__.__name__, 'config': layer.get_config()})
return copy.deepcopy(config)
def from_config(cls, config, custom_objects=None):
if (('class_name' not in config[0]) or (config[0]['class_name'] == 'Merge')):
return cls.legacy_from_config(config)
model = cls()
for conf in config:
layer = layer_module.deserialize(conf, custom_objects=custom_objects)
model.add(layer)
return model
def legacy_get_config(self):
config = []
if isinstance(self.layers[0], legacy_layers.Merge):
assert hasattr(self.layers[0], 'layers')
layers = []
for layer in self.layers[0].layers:
layer_config = {'class_name': layer.__class__.__name__, 'config': layer.get_config()}
layers.append(layer_config)
merge_config = self.layers[0].get_config()
merge_config['layers'] = layers
config.append({'class_name': 'Merge', 'config': merge_config})
else:
config.append({'class_name': self.layers[0].__class__.__name__, 'config': self.layers[0].get_config()})
for layer in self.layers[1:]:
config.append({'class_name': layer.__class__.__name__, 'config': layer.get_config()})
return copy.deepcopy(config)
def legacy_from_config(cls, config, layer_cache=None):
if (not layer_cache):
layer_cache = {}
def normalize_legacy_config(conf):
if ('class_name' not in conf):
class_name = conf['name']
name = conf.get('custom_name')
conf['name'] = name
return {'class_name': class_name, 'config': conf}
return conf
model = cls()
def get_or_create_layer(layer_data):
name = layer_data['config'].get('name')
if (name in layer_cache):
return layer_cache[name]
layer = layer_module.deserialize(layer_data)
layer_cache[name] = layer
return layer
first_layer = config[0]
first_layer = normalize_legacy_config(first_layer)
if (first_layer['class_name'] == 'Merge'):
merge_inputs = []
first_layer_config = first_layer['config']
for merge_input_config in first_layer_config.pop('layers'):
merge_input = layer_module.deserialize(merge_input_config)
merge_inputs.append(merge_input)
first_layer_config['layers'] = merge_inputs
merge = legacy_layers.Merge.from_config(first_layer_config)
model.add(merge)
else:
layer = get_or_create_layer(first_layer)
model.add(layer)
for conf in config[1:]:
conf = normalize_legacy_config(conf)
layer = get_or_create_layer(conf)
model.add(layer)
return model |
def normalize_speaker(speaker):
speaker = speaker.replace('-', '_')
speaker = speaker.replace('#', '_')
speaker = speaker.replace('__', '_1_')
speaker = speaker.replace('speaker1', 'speaker')
speaker = speaker.replace('108730', '1_08730')
speaker = speaker.replace('', '08730_1123')
speaker = speaker.replace('', '08730_1457')
speaker = speaker.replace('.', '')
speaker = speaker.replace('speaker_08730_1394', 'speaker_1_08730_1394')
speaker = speaker.replace('speaker_08730_1399', 'speaker_1_08730_1399')
speaker = speaker.replace('speaker_08730_37', 'speaker_1_08730_37')
speaker = speaker.replace('speaker_08730_400', 'speaker_1_08730_400')
speaker = speaker.replace('_8730', '_08730')
speaker = speaker.replace('_0873', '_08730')
speaker = speaker.replace('_08737', '_08730')
speaker = speaker.replace('21_08730', '1_08730')
speaker = speaker.replace('058730', '08730')
speaker = speaker.replace('2_08730', '1_08730')
speaker = speaker.replace('speaker_08730_846', 'speaker_1_08730_846')
speaker = speaker.replace('speaker_8730_270', 'speaker_1_08730_270')
return speaker |
class DynamicNet(object):
def __init__(self, c0, lr):
self.models = []
self.c0 = c0
self.lr = lr
self.boost_rate = nn.Parameter(torch.tensor(lr, requires_grad=True, device='cuda'))
def add(self, model):
self.models.append(model)
def parameters(self):
params = []
for m in self.models:
params.extend(m.parameters())
params.append(self.boost_rate)
return params
def zero_grad(self):
for m in self.models:
m.zero_grad()
def to_cuda(self):
for m in self.models:
m.cuda()
def to_eval(self):
for m in self.models:
m.eval()
def to_train(self):
for m in self.models:
m.train(True)
def forward(self, x):
if (len(self.models) == 0):
return (None, self.c0)
middle_feat_cum = None
prediction = None
with torch.no_grad():
for m in self.models:
if (middle_feat_cum is None):
(middle_feat_cum, prediction) = m(x, middle_feat_cum)
else:
(middle_feat_cum, pred) = m(x, middle_feat_cum)
prediction += pred
return (middle_feat_cum, (self.c0 + (self.boost_rate * prediction)))
def forward_grad(self, x):
if (len(self.models) == 0):
return (None, self.c0)
middle_feat_cum = None
prediction = None
for m in self.models:
if (middle_feat_cum is None):
(middle_feat_cum, prediction) = m(x, middle_feat_cum)
else:
(middle_feat_cum, pred) = m(x, middle_feat_cum)
prediction += pred
return (middle_feat_cum, (self.c0 + (self.boost_rate * prediction)))
def from_file(cls, path, builder):
d = torch.load(path)
net = DynamicNet(d['c0'], d['lr'])
net.boost_rate = d['boost_rate']
for (stage, m) in enumerate(d['models']):
submod = builder(stage)
submod.load_state_dict(m)
net.add(submod)
return net
def to_file(self, path):
models = [m.state_dict() for m in self.models]
d = {'models': models, 'c0': self.c0, 'lr': self.lr, 'boost_rate': self.boost_rate}
torch.save(d, path) |
class Decoder_SPEC2MIDI(nn.Module):
def __init__(self, n_frame, n_bin, n_note, n_velocity, hid_dim, n_layers, n_heads, pf_dim, dropout, device):
super().__init__()
self.device = device
self.n_note = n_note
self.n_frame = n_frame
self.n_velocity = n_velocity
self.n_bin = n_bin
self.hid_dim = hid_dim
self.sigmoid = nn.Sigmoid()
self.dropout = nn.Dropout(dropout)
self.pos_embedding_freq = nn.Embedding(n_note, hid_dim)
self.layer_zero_freq = DecoderLayer_Zero(hid_dim, n_heads, pf_dim, dropout, device)
self.layers_freq = nn.ModuleList([DecoderLayer(hid_dim, n_heads, pf_dim, dropout, device) for _ in range((n_layers - 1))])
self.fc_onset_freq = nn.Linear(hid_dim, 1)
self.fc_offset_freq = nn.Linear(hid_dim, 1)
self.fc_mpe_freq = nn.Linear(hid_dim, 1)
self.fc_velocity_freq = nn.Linear(hid_dim, self.n_velocity)
self.scale_time = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)
self.pos_embedding_time = nn.Embedding(n_frame, hid_dim)
self.layers_time = nn.ModuleList([EncoderLayer(hid_dim, n_heads, pf_dim, dropout, device) for _ in range(n_layers)])
self.fc_onset_time = nn.Linear(hid_dim, 1)
self.fc_offset_time = nn.Linear(hid_dim, 1)
self.fc_mpe_time = nn.Linear(hid_dim, 1)
self.fc_velocity_time = nn.Linear(hid_dim, self.n_velocity)
def forward(self, enc_spec):
batch_size = enc_spec.shape[0]
enc_spec = enc_spec.reshape([(batch_size * self.n_frame), self.n_bin, self.hid_dim])
pos_freq = torch.arange(0, self.n_note).unsqueeze(0).repeat((batch_size * self.n_frame), 1).to(self.device)
midi_freq = self.pos_embedding_freq(pos_freq)
(midi_freq, attention_freq) = self.layer_zero_freq(enc_spec, midi_freq)
for layer_freq in self.layers_freq:
(midi_freq, attention_freq) = layer_freq(enc_spec, midi_freq)
dim = attention_freq.shape
attention_freq = attention_freq.reshape([batch_size, self.n_frame, dim[1], dim[2], dim[3]])
output_onset_freq = self.sigmoid(self.fc_onset_freq(midi_freq).reshape([batch_size, self.n_frame, self.n_note]))
output_offset_freq = self.sigmoid(self.fc_offset_freq(midi_freq).reshape([batch_size, self.n_frame, self.n_note]))
output_mpe_freq = self.sigmoid(self.fc_mpe_freq(midi_freq).reshape([batch_size, self.n_frame, self.n_note]))
output_velocity_freq = self.fc_velocity_freq(midi_freq).reshape([batch_size, self.n_frame, self.n_note, self.n_velocity])
midi_time = midi_freq.reshape([batch_size, self.n_frame, self.n_note, self.hid_dim]).permute(0, 2, 1, 3).contiguous().reshape([(batch_size * self.n_note), self.n_frame, self.hid_dim])
pos_time = torch.arange(0, self.n_frame).unsqueeze(0).repeat((batch_size * self.n_note), 1).to(self.device)
midi_time = self.dropout(((midi_time * self.scale_time) + self.pos_embedding_time(pos_time)))
for layer_time in self.layers_time:
midi_time = layer_time(midi_time)
output_onset_time = self.sigmoid(self.fc_onset_time(midi_time).reshape([batch_size, self.n_note, self.n_frame]).permute(0, 2, 1).contiguous())
output_offset_time = self.sigmoid(self.fc_offset_time(midi_time).reshape([batch_size, self.n_note, self.n_frame]).permute(0, 2, 1).contiguous())
output_mpe_time = self.sigmoid(self.fc_mpe_time(midi_time).reshape([batch_size, self.n_note, self.n_frame]).permute(0, 2, 1).contiguous())
output_velocity_time = self.fc_velocity_time(midi_time).reshape([batch_size, self.n_note, self.n_frame, self.n_velocity]).permute(0, 2, 1, 3).contiguous()
return (output_onset_freq, output_offset_freq, output_mpe_freq, output_velocity_freq, attention_freq, output_onset_time, output_offset_time, output_mpe_time, output_velocity_time) |
def prediction_stat(outputs, labels, n_classes):
lbl = labels.data
valid = (lbl < n_classes)
classwise_pixel_acc = []
classwise_gtpixels = []
classwise_predpixels = []
for output in outputs:
(_, pred) = output.data.max(dim=1)
for m in range(n_classes):
mask1 = (lbl == m)
mask2 = (pred[valid] == m)
diff = (pred[mask1] - lbl[mask1])
classwise_pixel_acc += [torch.sum((diff == 0))]
classwise_gtpixels += [torch.sum(mask1)]
classwise_predpixels += [torch.sum(mask2)]
return (classwise_pixel_acc, classwise_gtpixels, classwise_predpixels) |
def test_bad_wires():
dh = 1.0
nx = 12
ny = 12
hx = [(dh, nx)]
hy = [(dh, ny)]
mesh = TensorMesh([hx, hy], 'CN')
actv = np.ones(len(mesh), dtype=bool)
wires = maps.Wires(('m1', mesh.nC), ('m2', (mesh.nC - 2)), ('m3', (mesh.nC - 3)))
with pytest.raises(ValueError):
regularization.JointTotalVariation(mesh, wire_map=wires, indActive=actv) |
def sqrt_poly(f):
if (not f.is_monic()):
raise ValueError('f must be monic')
try:
return prod([(g ** Integer((e / Integer(2)))) for (g, e) in f.factor()])
except TypeError:
raise ValueError('f must be a perfect square') |
def find_benchmark(benchmark: str, path: str):
benchmarks_dir = os.path.join(PROJECT_DIR, path)
benchmark_path = find(benchmark, benchmarks_dir)
return benchmark_path |
def find_text_idx(sentence):
for (idx, line) in enumerate(sentence):
if line.startswith('# text'):
return idx
return (- 1) |
def get_optimizer(params, name, **kwargs):
if (name == 'adam'):
from torch.optim import Adam
return Adam(params, **kwargs)
elif (name == 'adamw'):
from torch.optim import AdamW
return AdamW(params, **kwargs)
else:
raise NotImplementedError(name) |
class TranslationDataset(CachedDataset2):
source_file_prefix = 'source'
target_file_prefix = 'target'
main_source_data_key = 'data'
main_target_data_key = 'classes'
def __init__(self, path, file_postfix, source_postfix='', target_postfix='', source_only=False, search_without_reference=False, unknown_label=None, seq_list_file=None, use_cache_manager=False, **kwargs):
super(TranslationDataset, self).__init__(**kwargs)
assert os.path.isdir(path)
self.path = path
self.file_postfix = file_postfix
self.source_only = source_only
self.search_without_reference = search_without_reference
self._source_postfix = source_postfix
self._target_postfix = target_postfix
self._seq_list_file = seq_list_file
self.seq_list = ([int(n) for n in open(seq_list_file).read().splitlines()] if seq_list_file else None)
self._add_postfix = {self.source_file_prefix: source_postfix, self.target_file_prefix: target_postfix}
self._use_cache_manager = use_cache_manager
from threading import Lock, Thread
self._lock = Lock()
self._main_data_key_map = {self.source_file_prefix: self.main_source_data_key}
if (not source_only):
self._main_data_key_map[self.target_file_prefix] = self.main_target_data_key
self._files_to_read = [prefix for prefix in self._main_data_key_map.keys() if (not ((prefix == self.target_file_prefix) and search_without_reference))]
self._data_files = {prefix: self._get_data_file(prefix) for prefix in self._files_to_read}
self._data_keys = (self._source_data_keys + self._target_data_keys)
self._data = {data_key: [] for data_key in self._data_keys}
self._data_len = None
self._vocabs = self._get_vocabs()
self.num_outputs = {k: [(max(self._vocabs[k].values()) + 1), 1] for k in self._vocabs.keys()}
assert all([(v1 <= (2 ** 31)) for (k, (v1, v2)) in self.num_outputs.items()])
self.num_inputs = self.num_outputs[self.main_source_data_key][0]
self._reversed_vocabs = {k: self._reverse_vocab(k) for k in self._vocabs.keys()}
self.labels = {k: self._get_label_list(k) for k in self._vocabs.keys()}
if (not isinstance(unknown_label, dict)):
assert isinstance(unknown_label, (str, type(None)))
unknown_label = {data_key: unknown_label for data_key in self._data_keys}
for data_key in self._data_keys:
unknown_label.setdefault(data_key, None)
self._unknown_label = unknown_label
self._seq_order = None
self._tag_prefix = 'line-'
self._thread = Thread(name=('%r reader' % self), target=self._thread_main)
self._thread.daemon = True
self._thread.start()
def _source_data_keys(self):
return [self.main_source_data_key]
def _target_data_keys(self):
if self.source_only:
return []
else:
return [self.main_target_data_key]
def _extend_data(self, file_prefix, data_strs):
data_key = (self.main_source_data_key if (file_prefix == self.source_file_prefix) else self.main_target_data_key)
data = [self._words_to_numpy(data_key, (s.decode('utf8').strip() + self._add_postfix[file_prefix]).split()) for s in data_strs]
with self._lock:
self._data[data_key].extend(data)
def _thread_main(self):
from returnn.util.basic import interrupt_main
try:
import returnn.util.better_exchook
returnn.util.better_exchook.install()
from returnn.util.basic import AsyncThreadRun
data_len = 0
while True:
ls = self._data_files[self.source_file_prefix].readlines((10 ** 4))
data_len += len(ls)
if (not ls):
break
with self._lock:
self._data_len = data_len
self._data_files[self.source_file_prefix].seek(0, os.SEEK_SET)
files_to_read = list(self._files_to_read)
while True:
for file_prefix in files_to_read:
data_strs = self._data_files[file_prefix].readlines((10 ** 6))
if (not data_strs):
assert (len(self._data[self._main_data_key_map[file_prefix]]) == self._data_len)
files_to_read.remove(file_prefix)
continue
assert ((len(self._data[self._main_data_key_map[file_prefix]]) + len(data_strs)) <= self._data_len)
self._extend_data(file_prefix, data_strs)
if (not files_to_read):
break
for (file_prefix, file_handle) in list(self._data_files.items()):
file_handle.close()
self._data_files[file_prefix] = None
except Exception:
sys.excepthook(*sys.exc_info())
interrupt_main()
def _transform_filename(self, filename):
if self._use_cache_manager:
from returnn.util.basic import cf
filename = cf(filename)
return filename
def _get_data_file(self, prefix):
import os
filename = ('%s/%s.%s' % (self.path, prefix, self.file_postfix))
if os.path.exists(filename):
return open(self._transform_filename(filename), 'rb')
if os.path.exists((filename + '.gz')):
import gzip
return gzip.GzipFile(self._transform_filename((filename + '.gz')), 'rb')
raise Exception(('Data file not found: %r (.gz)?' % filename))
def _get_vocabs(self):
return {data_key: self._get_vocab(prefix) for (prefix, data_key) in self._main_data_key_map.items()}
def _get_vocab(self, prefix):
import os
filename = ('%s/%s.vocab.pkl' % (self.path, prefix))
if (not os.path.exists(filename)):
raise Exception(('Vocab file not found: %r' % filename))
import pickle
vocab = pickle.load(open(self._transform_filename(filename), 'rb'))
assert isinstance(vocab, dict)
return vocab
def _reverse_vocab(self, data_key):
return {v: k for (k, v) in sorted(self._vocabs[data_key].items())}
def _get_label_list(self, data_key):
reversed_vocab = self._reversed_vocabs[data_key]
assert isinstance(reversed_vocab, dict)
num_labels = self.num_outputs[data_key][0]
return list(map(reversed_vocab.__getitem__, range(num_labels)))
def _words_to_numpy(self, data_key, words):
vocab = self._vocabs[data_key]
if (self._unknown_label[data_key] is None):
try:
words_idxs = list(map(vocab.__getitem__, words))
except KeyError as e:
raise Exception(('Can not handle unknown token without unknown_label: %s (%s)' % (str(e), bytes(str(e), 'utf-8'))))
else:
unknown_label_id = vocab[self._unknown_label[data_key]]
words_idxs = [vocab.get(w, unknown_label_id) for w in words]
return numpy.array(words_idxs, dtype=numpy.int32)
def _get_data(self, key, line_nr):
import time
last_print_time = 0
last_print_len = None
while True:
with self._lock:
if (self._data_len is not None):
assert (line_nr <= self._data_len)
cur_len = len(self._data[key])
if (line_nr < cur_len):
return self._data[key][line_nr]
if ((cur_len != last_print_len) and ((time.time() - last_print_time) > 10)):
print(('%r: waiting for %r, line %i (%i loaded so far)...' % (self, key, line_nr, cur_len)), file=log.v3)
last_print_len = cur_len
last_print_time = time.time()
time.sleep(1)
def _get_data_len(self):
import time
t = 0
while True:
with self._lock:
if (self._data_len is not None):
return self._data_len
if (t == 0):
print(('%r: waiting for data length info...' % (self,)), file=log.v3)
time.sleep(1)
t += 1
def have_corpus_seq_idx(self):
return True
def get_all_tags(self):
return [(self._tag_prefix + str(line_nr)) for line_nr in range(len(self._data[self.main_source_data_key]))]
def get_corpus_seq_idx(self, seq_idx):
if (self._seq_order is None):
return None
return self._seq_order[seq_idx]
def is_data_sparse(self, key):
return True
def get_data_dtype(self, key):
return 'int32'
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
super(TranslationDataset, self).init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
if ((seq_list is None) and self.seq_list):
seq_list = self.seq_list
if (seq_order is not None):
self._seq_order = seq_order
elif (seq_list is not None):
self._seq_order = [int(s[len(self._tag_prefix):]) for s in seq_list]
else:
num_seqs = self._get_data_len()
self._seq_order = self.get_seq_order_for_epoch(epoch=epoch, num_seqs=num_seqs, get_seq_len=(lambda i: len(self._get_data(key=self.main_source_data_key, line_nr=i))))
self._num_seqs = len(self._seq_order)
return True
def supports_seq_order_sorting(self) -> bool:
return True
def get_estimated_seq_length(self, seq_idx):
corpus_seq_idx = self.get_corpus_seq_idx(seq_idx)
assert (corpus_seq_idx is not None)
return len(self._get_data(key=self.main_source_data_key, line_nr=corpus_seq_idx))
def _collect_single_seq(self, seq_idx):
if (seq_idx >= self._num_seqs):
return None
line_nr = self._seq_order[seq_idx]
data_keys = (self._source_data_keys if self.search_without_reference else self._data_keys)
features = {data_key: self._get_data(key=data_key, line_nr=line_nr) for data_key in data_keys}
assert all([(data is not None) for data in features.values()])
return DatasetSeq(seq_idx=seq_idx, seq_tag=(self._tag_prefix + str(line_nr)), features=features) |
def NMTCriterion(vocabSize):
weight = torch.ones(vocabSize)
weight[onmt.Constants.PAD] = 0
crit = nn.NLLLoss(weight, size_average=False)
if opt.gpus:
crit.cuda()
return crit |
.parametrize('hidden_size,sparse_feature_num', [((8,), 2)])
def test_ONN(hidden_size, sparse_feature_num):
model_name = 'ONN'
sample_size = SAMPLE_SIZE
(x, y, feature_columns) = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num, hash_flag=True)
model = ONN(feature_columns, feature_columns, dnn_hidden_units=[32, 32], dnn_dropout=0.5, device=get_device())
check_model(model, model_name, x, y) |
def test_control_bfgs(ocp):
ocp.solve(algorithm='bfgs', rtol=0.01, atol=0.0, max_iter=7)
assert (ocp.solver.relative_norm <= ocp.solver.rtol) |
class Dataset():
def __init__(self, name, path, min_length=None, max_length=None, args=None):
self.name = name
if ((args is not None) and hasattr(args, 'data_dir')):
path = os.path.join(args.data_dir, path)
self.vec = pickle.load(open(path, 'rb'))
(X, Xt) = (self.vec.seq_text['train'], self.vec.seq_text['test'])
(y, yt) = (self.vec.label['train'], self.vec.label['test'])
(X, y) = filterbylength(X, y, min_length=min_length, max_length=max_length)
(Xt, yt) = filterbylength(Xt, yt, min_length=min_length, max_length=max_length)
(Xt, yt) = sortbylength(Xt, yt)
if (args.pre_loaded_attn or args.adversarial):
y_attn = json.load(open(os.path.join(args.gold_label_dir, 'train_attentions_best_epoch.json'), 'r'))
yt_attn = json.load(open(os.path.join(args.gold_label_dir, 'test_attentions_best_epoch.json'), 'r'))
true_pred = json.load(open(os.path.join(args.gold_label_dir, 'train_predictions_best_epoch.json'), 'r'))
true_pred_t = json.load(open(os.path.join(args.gold_label_dir, 'test_predictions_best_epoch.json'), 'r'))
true_pred = [e[0] for e in true_pred]
true_pred_t = [e[0] for e in true_pred_t]
new_attns = []
for (e, a) in zip(X, y_attn):
tmp = (([0] + [el for el in a if (el != 0)]) + [0])
assert (len(tmp) == len(e))
new_attns.append(tmp)
y_attn = new_attns
new_attns = []
for (e, a) in zip(Xt, yt_attn):
tmp = (([0] + [el for el in a if (el != 0)]) + [0])
assert (len(tmp) == len(e))
new_attns.append(tmp)
yt_attn = new_attns
self.train_data = DataHolder(X, y, y_attn, true_pred)
self.test_data = DataHolder(Xt, yt, yt_attn, true_pred_t)
else:
self.train_data = DataHolder(X, y)
self.test_data = DataHolder(Xt, yt)
if ((args is not None) and hasattr(args, 'hidden_size')):
self.hidden_size = args.hidden_size
self.output_size = 1
self.save_on_metric = 'roc_auc'
self.keys_to_use = {'roc_auc': 'roc_auc', 'pr_auc': 'pr_auc'}
self.bsize = 32
if ((args is not None) and hasattr(args, 'output_dir')):
self.basepath = args.output_dir |
class ConvModule(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, padding=0):
super(ConvModule, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.01)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x |
class MeatOffGrill(Task):
def init_task(self) -> None:
self._steak = Shape('steak')
self._chicken = Shape('chicken')
self._success_sensor = ProximitySensor('success')
self.register_graspable_objects([self._chicken, self._steak])
self._w1 = Dummy('waypoint1')
self._w1z = self._w1.get_position()[2]
def init_episode(self, index: int) -> List[str]:
conditions = [NothingGrasped(self.robot.gripper)]
if (index == 0):
(x, y, _) = self._chicken.get_position()
self._w1.set_position([x, y, self._w1z])
conditions.append(DetectedCondition(self._chicken, self._success_sensor))
else:
(x, y, _) = self._steak.get_position()
self._w1.set_position([x, y, self._w1z])
conditions.append(DetectedCondition(self._steak, self._success_sensor))
self.register_success_conditions(conditions)
return [('take the %s off the grill' % MEAT[index]), ('pick up the %s and place it next to the grill' % MEAT[index]), ('remove the %s from the grill and set it down to the side' % MEAT[index])]
def variation_count(self) -> int:
return 2 |
def alexnet():
model = ModelHelper(name='r', arg_scope={'order': 'NCHW', 'is_test': True})
conv1 = brew.conv(model, 'data', 'conv1', 3, 64, 11, ('XavierFill', {}), ('ConstantFill', {}), stride=4, pad=2)
relu1 = brew.relu(model, conv1, 'conv1')
pool1 = brew.max_pool(model, relu1, 'pool1', kernel=3, stride=2, pad=0, legacy_pad=3)
lrn1 = brew.lrn(model, pool1, 'pool1_lrn', size=5, alpha=0.0001, beta=0.75, bias=1.0)
conv2 = brew.conv(model, lrn1, 'conv2', 64, 192, 5, ('XavierFill', {}), ('ConstantFill', {}), pad=2)
relu2 = brew.relu(model, conv2, 'conv2')
pool2 = brew.max_pool(model, relu2, 'pool2', kernel=3, stride=2)
lrn2 = brew.lrn(model, pool2, 'pool2_lrn', size=5, alpha=0.0001, beta=0.75, bias=1.0)
conv3 = brew.conv(model, lrn2, 'conv3', 192, 384, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1)
relu3 = brew.relu(model, conv3, 'conv3')
conv4 = brew.conv(model, relu3, 'conv4', 384, 256, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1)
relu4 = brew.relu(model, conv4, 'conv4')
conv5 = brew.conv(model, relu4, 'conv5', 256, 256, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1)
relu5 = brew.relu(model, conv5, 'conv5')
pool5 = brew.max_pool(model, relu5, 'pool5', kernel=3, stride=2)
fc6 = brew.fc(model, pool5, 'fc6', ((256 * 6) * 6), 4096, ('XavierFill', {}), ('ConstantFill', {}))
relu6 = brew.relu(model, fc6, 'fc6')
fc7 = brew.fc(model, relu6, 'fc7', 4096, 4096, ('XavierFill', {}), ('ConstantFill', {}))
relu7 = brew.relu(model, fc7, 'fc7')
drop7 = brew.dropout(model, relu7, 'fc7_dropout', is_test=1, ratio=0.5)
fc8 = brew.fc(model, drop7, 'fc8', 4096, 1000, ('XavierFill', {}), ('ConstantFill', {}))
relu8 = brew.relu(model, fc8, 'fc8')
brew.dropout(model, relu8, 'fc8_dropout', is_test=1, ratio=0.5)
return (model, [(1, 3, 224, 224)]) |
def find_use(arg: Any, node: Node) -> bool:
if isinstance(arg, (tuple, list)):
return any((find_use(elem, node) for elem in arg))
elif isinstance(arg, dict):
return any((find_use(v, node) for (k, v) in arg.items()))
elif isinstance(arg, slice):
return any([find_use(arg.start, node), find_use(arg.stop, node), find_use(arg.step, node)])
elif isinstance(arg, Node):
return (arg is node)
else:
return False |
def test_CE():
reset_seed(0, check_cudnn=False)
for weighted in [True, False]:
instance = CE()
announce_msg('Testing {}'.format(instance))
announce_msg('weighted: {}'.format(weighted))
cuda = 0
DEVICE = torch.device(('cuda:{}'.format(cuda) if torch.cuda.is_available() else 'cpu'))
if torch.cuda.is_available():
torch.cuda.set_device(int(cuda))
instance.to(DEVICE)
num_classes = 5
b = 16
scores = torch.rand(b, num_classes).to(DEVICE)
labels = torch.randint(low=0, high=num_classes, size=(b,), dtype=torch.long).to(DEVICE)
tags = torch.randint(low=0, high=3, size=(b,), dtype=torch.long).to(DEVICE)
weights = None
if weighted:
weights = torch.rand(size=(b,), dtype=torch.float32).to(DEVICE)
cen = instance(scores=scores, labels=labels, weights=weights, tags=tags)
print('H(p, q): {}'.format(cen)) |
class ParamSpec(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PARAMSPEC |
def wrap_array(array, stride):
offsets = []
for x in range(int((array.__len__() / stride))):
offsets.append((x * stride))
offsets.append(array.__len__())
return ListOffsetArray(offsets, array) |
def deepfashion_name_parse(f, mode='train'):
if (mode == 'train'):
data_type = 'train'
elif (mode == 'val'):
data_type = 'gallery'
elif (mode == 'test'):
data_type = 'query'
lines = txt_parse(f)
num_train = 0
result = []
for line in lines:
if (line[0:4] != 'img/'):
continue
if ((len(line.split()) > 0) and (line.split()[2] == data_type)):
num_train += 1
name = line.split()[0]
img_class = name.split('/')[2]
result.append(((img_class + ' ') + name))
print('The first image class and name are {}'.format(result[0]))
print('The number of images is {}'.format(len(result)))
return result |
def test_download_none():
with tempfile.TemporaryDirectory(dir=TEST_WORKING_DIR) as test_dir:
stanza.download('it', model_dir=test_dir, processors='tokenize', package='combined')
stanza.download('it', model_dir=test_dir, processors='tokenize', package='vit')
it_dir = os.path.join(test_dir, 'it')
it_dir_listing = sorted(os.listdir(it_dir))
assert (sorted(it_dir_listing) == ['mwt', 'tokenize'])
combined_path = os.path.join(it_dir, 'tokenize', 'combined.pt')
vit_path = os.path.join(it_dir, 'tokenize', 'vit.pt')
assert os.path.exists(combined_path)
assert os.path.exists(vit_path)
combined_md5 = get_md5(combined_path)
vit_md5 = get_md5(vit_path)
assert (combined_md5 != vit_md5)
shutil.copyfile(vit_path, combined_path)
assert (get_md5(combined_path) == vit_md5)
pipe = stanza.Pipeline('it', model_dir=test_dir, processors='tokenize', package={'tokenize': 'combined'}, download_method=None)
assert (get_md5(combined_path) == vit_md5)
pipe = stanza.Pipeline('it', model_dir=test_dir, processors='tokenize', package={'tokenize': 'combined'})
assert (get_md5(combined_path) != vit_md5) |
_args('v', 'i')
def contiguous(g, input, memory_format):
if (memory_format > 2):
raise RuntimeError('onnx memory_format support is not implemented')
return input |
.register_model(TupleType)
class TupleModel(numba.extending.models.StructModel):
def __init__(self, dmm, fe_type):
members = [('contents', fe_type.contents)]
super().__init__(dmm, fe_type, members) |
class GumbelQuantizer(nn.Module):
def __init__(self, codebook_size, emb_dim, num_hiddens, straight_through=False, kl_weight=0.0005, temp_init=1.0):
super().__init__()
self.codebook_size = codebook_size
self.emb_dim = emb_dim
self.straight_through = straight_through
self.temperature = temp_init
self.kl_weight = kl_weight
self.proj = nn.Conv2d(num_hiddens, codebook_size, 1)
self.embed = nn.Embedding(codebook_size, emb_dim)
def forward(self, z):
hard = (self.straight_through if self.training else True)
logits = self.proj(z)
soft_one_hot = F.gumbel_softmax(logits, tau=self.temperature, dim=1, hard=hard)
z_q = torch.einsum('b n h w, n d -> b d h w', soft_one_hot, self.embed.weight)
qy = F.softmax(logits, dim=1)
diff = (self.kl_weight * torch.sum((qy * torch.log(((qy * self.codebook_size) + 1e-10))), dim=1).mean())
min_encoding_indices = soft_one_hot.argmax(dim=1)
return (z_q, diff, {'min_encoding_indices': min_encoding_indices}) |
_task('text_to_table_task')
class TextToDataTranslationTask(TranslationTask):
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
start_split_token = args.split_token.strip()
space_split_token = (' ' + start_split_token)
newline_token = args.newline_token
bpe = build_bpe(Namespace(bpe='gpt2'))
start_split_token = tgt_dict.indices[bpe.encode(start_split_token)]
space_split_token = tgt_dict.indices[bpe.encode(space_split_token)]
newline_token = tgt_dict.indices[bpe.encode(newline_token)]
other_split_tokens = [k.replace('G', ' ') for k in bpe.bpe.encoder if (args.split_token.strip() in k)]
other_split_tokens.remove((' ' + args.split_token.strip()))
other_split_tokens.remove(args.split_token.strip())
other_split_tokens.remove('<|endoftext|>')
other_split_tokens = [tgt_dict.indices[bpe.encode(k)] for k in other_split_tokens]
self.special_tokens = SpecialTokens(eos=self.target_dictionary.eos(), newline_token=newline_token, start_split_token=start_split_token, space_split_token=space_split_token, other_split_tokens=other_split_tokens)
self.table_max_columns = args.table_max_columns
def add_args(cls, parser):
super(TextToDataTranslationTask, TextToDataTranslationTask).add_args(parser)
parser.add_argument('--split-token', default='|')
parser.add_argument('--newline-token', default='\n')
parser.add_argument('--table-max-columns', type=int, required=True)
parser.add_argument('--unconstrained-decoding', default=False, action='store_true')
parser.add_argument('--return-relative-column-strs', default=None, nargs='+', choices=RELATIVE_COLUMN_STR_CHOICES)
def build_model(self, args):
model = super().build_model(args)
if (hasattr(model, 'set_special_tokens') and callable(model.set_special_tokens)):
model.set_special_tokens(self.special_tokens)
return model
def build_generator(self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None):
if self.args.unconstrained_decoding:
return super().build_generator(models, args, seq_gen_cls=seq_gen_cls, extra_gen_cls_kwargs=extra_gen_cls_kwargs)
if getattr(args, 'score_reference', False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(self.target_dictionary, compute_alignment=getattr(args, 'print_alignment', False))
from fairseq.sequence_generator import SequenceGenerator, SequenceGeneratorWithAlignment
sampling = getattr(args, 'sampling', False)
sampling_topk = getattr(args, 'sampling_topk', (- 1))
sampling_topp = getattr(args, 'sampling_topp', (- 1.0))
diverse_beam_groups = getattr(args, 'diverse_beam_groups', (- 1))
diverse_beam_strength = getattr(args, 'diverse_beam_strength', 0.5)
match_source_len = getattr(args, 'match_source_len', False)
diversity_rate = getattr(args, 'diversity_rate', (- 1))
constrained = getattr(args, 'constraints', False)
prefix_allowed_tokens_fn = getattr(args, 'prefix_allowed_tokens_fn', None)
if (sum((int(cond) for cond in [sampling, (diverse_beam_groups > 0), match_source_len, (diversity_rate > 0)])) > 1):
raise ValueError('Provided Search parameters are mutually exclusive.')
assert ((sampling_topk < 0) or sampling), '--sampling-topk requires --sampling'
assert ((sampling_topp < 0) or sampling), '--sampling-topp requires --sampling'
assert (self.table_max_columns is not None)
search_strategy = ConstrainedTableBeamSearch(self.target_dictionary, self.special_tokens, self.table_max_columns)
if (seq_gen_cls is None):
if getattr(args, 'print_alignment', False):
seq_gen_cls = SequenceGeneratorWithAlignment
else:
seq_gen_cls = SequenceGenerator
extra_gen_cls_kwargs = (extra_gen_cls_kwargs or {})
return seq_gen_cls(models, self.target_dictionary, beam_size=getattr(args, 'beam', 5), max_len_a=getattr(args, 'max_len_a', 0), max_len_b=getattr(args, 'max_len_b', 200), min_len=getattr(args, 'min_len', 1), normalize_scores=(not getattr(args, 'unnormalized', False)), len_penalty=getattr(args, 'lenpen', 1), unk_penalty=getattr(args, 'unkpen', 0), temperature=getattr(args, 'temperature', 1.0), match_source_len=getattr(args, 'match_source_len', False), no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0), search_strategy=search_strategy, **extra_gen_cls_kwargs)
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
if (not self.args.unconstrained_decoding):
assert (constraints is None)
bsz = sample['net_input']['src_tokens'].shape[0]
constraints = torch.zeros((bsz, 0), dtype=torch.float)
return super(TextToDataTranslationTask, self).inference_step(generator, models, sample, prefix_tokens=prefix_tokens, constraints=constraints)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
paths = utils.split_paths(self.args.data)
assert (len(paths) > 0)
if (split != getattr(self.args, 'train_subset', None)):
paths = paths[:1]
data_path = paths[((epoch - 1) % len(paths))]
(src, tgt) = (self.args.source_lang, self.args.target_lang)
self.datasets[split] = load_langpair_dataset(data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, load_alignments=self.args.load_alignments, truncate_source=self.args.truncate_source, num_buckets=self.args.num_batch_buckets, shuffle=(split != 'test'), pad_to_multiple=self.args.required_seq_len_multiple, special_tokens=self.special_tokens, return_relative_column_strs=self.args.return_relative_column_strs) |
def make_is_save(options):
class IsSave(Struct):
def __init__(self, save_times):
if is_sequence(save_times):
save_times = nm.asarray(save_times)
self.save_times0 = save_times
self.reset()
def reset(self, ts=None):
self.ilast = 0
self.save_times = self.save_times0
if (ts is not None):
if is_integer(self.save_times0):
self.save_times = nm.linspace(ts.t0, ts.t1, self.save_times0)
def __call__(self, ts):
if (is_string(self.save_times) and (self.save_times == 'all')):
return True
elif isinstance(self.save_times, nm.ndarray):
if ((self.ilast < len(self.save_times)) and ((ts.time + (1e-14 * ts.dt)) >= self.save_times[self.ilast])):
self.ilast += 1
return True
elif callable(self.save_times):
return self.save_times(ts)
return False
save_times = options.get('save_times', 'all')
is_save = IsSave(save_times)
return is_save |
def pretrained_model_config_and_tokenizer(model_type: str, model_name_or_path: str, config_name: str='', tokenizer_name: str='', do_lower_case: bool=False, cache_dir: str='', stateless_tied=False, do_resize_token_embedding=True, explicitly_set_dict={}, **config_kw):
(config_class, model_class, tokenizer_class) = MODEL_TYPES[model_type]
config = config_class.from_pretrained((config_name if config_name else model_name_or_path), cache_dir=(cache_dir if cache_dir else None), **config_kw)
for (k, v) in explicitly_set_dict.items():
setattr(config, k, v)
tokenizer = tokenizer_class.from_pretrained((tokenizer_name if tokenizer_name else model_name_or_path), do_lower_case=do_lower_case, cache_dir=(cache_dir if cache_dir else None))
extra_kwargs = {}
if (model_name_or_path in {'t5-11b'}):
extra_kwargs['use_cdn'] = False
model = model_class.from_pretrained(model_name_or_path, from_tf=bool(('.ckpt' in model_name_or_path)), config=config, cache_dir=(cache_dir if cache_dir else None), **extra_kwargs)
if do_resize_token_embedding:
resize_token_embeddings(model, tokenizer)
if stateless_tied:
model_to_resize = (model.module if hasattr(model, 'module') else model)
if hasattr(model_to_resize, 'make_stateless_after_loaded_tied_and_resized'):
model_to_resize.make_stateless_after_loaded_tied_and_resized()
elif hasattr(model_to_resize, 'make_stateless'):
model_to_resize.make_stateless()
else:
raise ValueError(f'Problem making model stateless. model_type: {model_type}')
return (model, tokenizer, config) |
class DeepmindMathDataset(torch.utils.data.Dataset):
VERSION = 8
vocabulary: framework.data_structures.CharVocabulary = None
raw_data = {}
index = {}
DIFFICULTIES = ['easy', 'medium', 'hard']
def lock(self) -> framework.utils.LockFile:
return framework.utils.LockFile(os.path.join(self.cache_dir, 'dm_math_lock'))
def download(self):
with self.lock():
os.makedirs(self.cache_dir, exist_ok=True)
if (not os.path.isdir(os.path.join(self.cache_dir, 'mathematics_dataset-v1.0'))):
if (not os.path.isfile(os.path.join(self.cache_dir, 'mathematics_dataset-v1.0.tar.gz'))):
assert False, f'Please download and place it in the {os.path.abspath(self.cache_dir)} folder.'
with tarfile.open(os.path.join(self.cache_dir, 'mathematics_dataset-v1.0.tar.gz'), 'r') as tf:
tf.extractall(path=self.cache_dir)
def load_file(self, path: str) -> Tuple[(List[str], List[str])]:
print(f'Loading {path}')
with open(path, 'r') as f:
lines = [l.strip() for l in f.readlines()]
q = lines[::2]
a = lines[1::2]
assert (len(q) == len(a))
return (q, a)
def verify_cache_version(self):
with self.lock():
if os.path.isfile(self.version_cache):
verfile = pickle.load(open(self.version_cache, 'rb'))
if (verfile['version'] == self.VERSION):
return
print('Cache version changed. Invalidating the cache...')
shutil.rmtree(self.task_cache, ignore_errors=True)
if os.path.exists(self.vocab_cache):
os.remove(self.vocab_cache)
pickle.dump({'version': self.VERSION}, open(self.version_cache, 'wb'))
def get_cached(self, fname: str, construct: Callable[([], Any)]) -> Any:
with self.lock():
if (not os.path.isfile(fname)):
data = construct()
os.makedirs(os.path.dirname(fname), exist_ok=True)
gc.disable()
pickle.dump(data, open(fname, 'wb'), protocol=(- 1))
gc.enable()
return data
gc.disable()
data = pickle.load(open(fname, 'rb'))
gc.enable()
return data
def create_vocab(self) -> set:
print('Constructing vocabulary...')
flist = []
extracted_dir = os.path.join(self.cache_dir, 'mathematics_dataset-v1.0')
for s in os.listdir(extracted_dir):
if ('readme' in s):
continue
set_dir = os.path.join(self.cache_dir, 'mathematics_dataset-v1.0', s)
for task in os.listdir(set_dir):
flist.append(os.path.join(set_dir, task))
def process(fname: str):
vocabulary = set()
(questions, answers) = self.load_file(fname)
for q in questions:
vocabulary.update(set(q))
for a in answers:
vocabulary.update(set(a))
return vocabulary
vlist = framework.utils.parallel_map(flist, process)
return set().union(*vlist)
def translate_file(self, fname: str, file, known: set):
print(f'Translating {fname}')
(questions, answers) = self.load_file(fname)
index = []
cache = []
offset = file.tell()
skipped = 0
def sync():
np.asarray(list(itertools.chain.from_iterable(cache)), dtype=np.int8).astype('int8').tofile(file)
assert (offset == file.tell())
cache.clear()
for (q, a) in zip(questions, answers):
h = hash(q)
if (h in known):
skipped += 1
continue
else:
known.add(h)
cache.append(self.vocabulary(q))
cache.append(self.vocabulary(a))
len_total = (len(q) + len(a))
index.append([offset, len_total, len(q)])
offset += len_total
if (len(cache) > 10000):
sync()
if (skipped > 0):
print(f'WARNING: removed {skipped} entries from {fname} because of repeats.')
if (len(cache) > 0):
sync()
return index
def get_task_name_list(self) -> List[str]:
extracted_dir = os.path.join(self.cache_dir, 'mathematics_dataset-v1.0')
res = set()
for s in os.listdir(extracted_dir):
if ('readme' in s):
continue
is_train = s.startswith('train-')
tname = (s[6:] if is_train else s)
for f in os.listdir(os.path.join(extracted_dir, s)):
assert f.endswith('.txt')
f = f[:(- 4)]
if (tname == 'extrapolate'):
for e in ['_big', '_more', '_longer']:
i = f.find(e)
if (i > 0):
f = f[:i]
break
if is_train:
res.add(f'{f}_train_{tname}')
res.add(f'{f}_test_{tname}')
else:
res.add(f'{f}_{tname}')
return list(sorted(res))
def split_test(self, data: List) -> Tuple[(List, List)]:
def copy_filtered(data: List, filter) -> List:
return [data[i] for i in range(len(data)) if filter(i)]
seed = np.random.RandomState(1234)
test_indices = set(seed.choice(len(data), 10000, replace=False).tolist())
return (copy_filtered(data, (lambda i: (i not in test_indices))), copy_filtered(data, (lambda i: (i in test_indices))))
def write_index_list(self, fname: str, ilist: List[List]):
f = open(os.path.join(self.task_cache, fname), 'wb')
for l in ilist:
np.asarray(l, dtype=np.uint32).astype('uint32').tofile(f)
def collect_missing_tasks(self, tasks: List[str]) -> List[str]:
return [t for t in tasks if (not os.path.isfile(os.path.join(self.task_cache, (t + '.raw'))))]
def find_extrapolation_set(self, task: str) -> Optional[str]:
extrapolate_dir = os.path.join(self.cache_dir, 'mathematics_dataset-v1.0', f'extrapolate')
found = None
for f in os.listdir(extrapolate_dir):
if f.startswith(task):
assert (found is None), f'Multiple extrapolation sets found for task {task}'
found = os.path.join(extrapolate_dir, f)
return found
def create_task_cache(self, task: str):
raw_file = open(os.path.join(self.task_cache, (task + '.raw')), 'wb')
data = [os.path.join(self.cache_dir, 'mathematics_dataset-v1.0', f'train-{k}', (task + '.txt')) for k in self.DIFFICULTIES]
data.append(os.path.join(self.cache_dir, 'mathematics_dataset-v1.0', f'interpolate', (task + '.txt')))
extrapolation = self.find_extrapolation_set(task)
if (extrapolation is not None):
print(f'Found extrapolation set {extrapolation}')
data.append(extrapolation)
known = set()
data = [self.translate_file(d, raw_file, known) for d in data]
data = [(self.split_test(d) if (i < len(self.DIFFICULTIES)) else d) for (i, d) in enumerate(data)]
for (i, (n, d)) in enumerate(zip(self.DIFFICULTIES, data)):
self.write_index_list(f'{task}_train_{n}.idx', d[0])
self.write_index_list(f'{task}_test_{n}.idx', d[1])
self.write_index_list(f'{task}_interpolate.idx', data[len(self.DIFFICULTIES)])
if (extrapolation is not None):
self.write_index_list(f'{task}_extrapolate.idx', data[(len(self.DIFFICULTIES) + 1)])
def list_task_indices(self, task: str):
return [file[(len(task) + 1):(- 4)] for file in os.listdir(self.task_cache) if (file.endswith('.idx') and file.startswith(task))]
def load_task(self, task: str):
if (task in DeepmindMathDataset.raw_data):
return
DeepmindMathDataset.raw_data[task] = np.memmap(os.path.join(self.task_cache, (task + '.raw')), dtype='uint8', mode='r')
DeepmindMathDataset.index[task] = {n: np.memmap(os.path.join(self.task_cache, (task + f'_{n}.idx')), dtype='uint32', mode='r') for n in self.list_task_indices(task)}
def load_vocab(self):
if (DeepmindMathDataset.vocabulary is not None):
return
vocabulary = self.get_cached(self.vocab_cache, self.create_vocab)
DeepmindMathDataset.vocabulary = framework.data_structures.CharVocabulary(vocabulary)
DeepmindMathDataset.in_vocabulary = DeepmindMathDataset.vocabulary
DeepmindMathDataset.out_vocabulary = DeepmindMathDataset.vocabulary
print(f'Vocabulary: {len(vocabulary)}')
print(vocabulary)
def __len__(self) -> int:
return self.count
def __init__(self, tasks: List[str], sets: List[str]=['train_easy', 'train_medium', 'train_hard'], cache_dir: str='./cache/dm_math/'):
super().__init__()
self.cache_dir = cache_dir
self.vocab_cache = os.path.join(self.cache_dir, 'vocabulary.dat')
self.version_cache = os.path.join(self.cache_dir, 'version.dat')
self.task_cache = os.path.join(self.cache_dir, 'cached_tasks')
os.makedirs(self.cache_dir, exist_ok=True)
self.download()
self.verify_cache_version()
os.makedirs(self.task_cache, exist_ok=True)
self.data_tables = []
self.index_tables = []
self.offset_table = []
self.table_type = []
self.count = 0
self.task_list = self.get_task_name_list()
print('Loading vocabulary')
self.load_vocab()
with self.lock():
missing_tasks = self.collect_missing_tasks(tasks)
framework.utils.parallel_map(missing_tasks, self.create_task_cache, max_parallel=16)
for t in tasks:
self.load_task(t)
for set in sets:
print(f'Loading task {t}, set {set}')
if ((set == 'extrapolate') and (set not in DeepmindMathDataset.index[t])):
print(f'No extrapolation set for {t}')
continue
self.table_type.append(self.task_list.index(f'{t}_{set}'))
self.index_tables.append(DeepmindMathDataset.index[t][set])
self.data_tables.append(DeepmindMathDataset.raw_data[t])
self.offset_table.append(self.count)
self.count += (self.index_tables[(- 1)].shape[0] // 3)
print(f'Loaded {len(self)} samples')
def get_index(self, item: int) -> Tuple[(np.ndarray, np.ndarray, int)]:
table_index = (bisect.bisect(self.offset_table, item) - 1)
relative_index = (item - self.offset_table[table_index])
(offset, t_len, q_len) = self.index_tables[table_index][(relative_index * 3):((relative_index + 1) * 3)]
return (self.data_tables[table_index][offset:(offset + q_len)], self.data_tables[table_index][(offset + q_len):(offset + t_len)], self.table_type[table_index])
def __getitem__(self, item: int) -> Dict[(str, Any)]:
(q, a, tid) = self.get_index(item)
return {'in': q, 'out': a, 'in_len': q.shape[0], 'out_len': a.shape[0], 'type': tid}
def start_test(self) -> TypedTextSequenceTestState:
return TypedTextSequenceTestState(self.in_vocabulary.ind_to_str, self.out_vocabulary.ind_to_str, self.task_list) |
class MSRVTTQADataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def dataset_cls(self):
return MSRVTTQADataset
def dataset_name(self):
return 'msrvttqa'
def setup(self, stage):
super().setup(stage)
self.answer2id = self.train_dataset.ans_lab_dict
sorted_a2i = sorted(self.answer2id.items(), key=(lambda x: x[1]))
self.num_class = (max(self.answer2id.values()) + 1)
self.id2answer = defaultdict((lambda : 'unknown'))
for (k, v) in sorted_a2i:
self.id2answer[v] = k |
def substep_p2g(x: ti.types.ndarray(ndim=1), v: ti.types.ndarray(ndim=1), C: ti.types.ndarray(ndim=1), J: ti.types.ndarray(ndim=1), grid_v: ti.types.ndarray(ndim=2), grid_m: ti.types.ndarray(ndim=2)):
for p in x:
Xp = (x[p] / dx)
base = int((Xp - 0.5))
fx = (Xp - base)
w = [(0.5 * ((1.5 - fx) ** 2)), (0.75 - ((fx - 1) ** 2)), (0.5 * ((fx - 0.5) ** 2))]
stress = ((((((- dt) * 4) * E) * p_vol) * (J[p] - 1)) / (dx ** 2))
affine = (ti.Matrix([[stress, 0], [0, stress]]) + (p_mass * C[p]))
for (i, j) in ti.static(ti.ndrange(3, 3)):
offset = ti.Vector([i, j])
dpos = ((offset - fx) * dx)
weight = (w[i].x * w[j].y)
grid_v[(base + offset)] += (weight * ((p_mass * v[p]) + (affine dpos)))
grid_m[(base + offset)] += (weight * p_mass) |
def test_add_loss():
tl = Timeline()
photon = Photon('', tl, encoding_type={'name': 'single_atom'})
assert (photon.loss == 0)
photon.add_loss(0.5)
assert (photon.loss == 0.5)
photon.add_loss(0.5)
assert (photon.loss == 0.75) |
class DDIMSampler(object):
def __init__(self, model, schedule='linear', **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if (type(attr) == torch.Tensor):
if (attr.device != torch.device('cuda')):
attr = attr.to(torch.device('cuda'))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize='uniform', ddim_eta=0.0, verbose=True):
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert (alphas_cumprod.shape[0] == self.ddpm_num_timesteps), 'alphas have to be defined for each timestep'
to_torch = (lambda x: x.clone().detach().to(torch.float32).to(self.model.device))
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt((1.0 / alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(((1.0 / alphas_cumprod.cpu()) - 1))))
(ddim_sigmas, ddim_alphas, ddim_alphas_prev) = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt((1.0 - ddim_alphas)))
sigmas_for_original_sampling_steps = (ddim_eta * torch.sqrt((((1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod)) * (1 - (self.alphas_cumprod / self.alphas_cumprod_prev)))))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
_grad()
def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, **kwargs):
if (conditioning is not None):
if isinstance(conditioning, dict):
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
if (cbs != batch_size):
print(f'Warning: Got {cbs} conditionings but batch-size is {batch_size}')
elif (conditioning.shape[0] != batch_size):
print(f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}')
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
(C, H, W) = shape
size = (batch_size, C, H, W)
(samples, intermediates) = self.ddim_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning)
return (samples, intermediates)
_grad()
def ddim_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None):
device = self.model.betas.device
b = shape[0]
if (x_T is None):
img = torch.randn(shape, device=device)
else:
img = x_T
if (timesteps is None):
timesteps = (self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps)
elif ((timesteps is not None) and (not ddim_use_original_steps)):
subset_end = (int((min((timesteps / self.ddim_timesteps.shape[0]), 1) * self.ddim_timesteps.shape[0])) - 1)
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = (reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps))
total_steps = (timesteps if ddim_use_original_steps else timesteps.shape[0])
print(f'Running DDIM Sampling with {total_steps} timesteps')
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
for (i, step) in enumerate(iterator):
index = ((total_steps - i) - 1)
ts = torch.full((b,), step, device=device, dtype=torch.long)
if (mask is not None):
assert (x0 is not None)
img_orig = self.model.q_sample(x0, ts)
img = ((img_orig * mask) + ((1.0 - mask) * img))
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning)
(img, pred_x0) = outs
if callback:
callback(i)
if img_callback:
img_callback(pred_x0, i)
if (((index % log_every_t) == 0) or (index == (total_steps - 1))):
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return (img, intermediates)
_grad()
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None):
(b, *_, device) = (*x.shape, x.device)
if ((unconditional_conditioning is None) or (unconditional_guidance_scale == 1.0)):
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat(([x] * 2))
t_in = torch.cat(([t] * 2))
c_in = torch.cat([unconditional_conditioning, c])
(e_t_uncond, e_t) = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = (e_t_uncond + (unconditional_guidance_scale * (e_t - e_t_uncond)))
if (score_corrector is not None):
assert (self.model.parameterization == 'eps')
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
alphas = (self.model.alphas_cumprod if use_original_steps else self.ddim_alphas)
alphas_prev = (self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev)
sqrt_one_minus_alphas = (self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas)
sigmas = (self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas)
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)
pred_x0 = ((x - (sqrt_one_minus_at * e_t)) / a_t.sqrt())
if quantize_denoised:
(pred_x0, _, *_) = self.model.first_stage_model.quantize(pred_x0)
dir_xt = (((1.0 - a_prev) - (sigma_t ** 2)).sqrt() * e_t)
noise = ((sigma_t * noise_like(x.shape, device, repeat_noise)) * temperature)
if (noise_dropout > 0.0):
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = (((a_prev.sqrt() * pred_x0) + dir_xt) + noise)
return (x_prev, pred_x0) |
def register_Ns3LteUeCcmRrcSapProviderLcsConfig_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteUeCcmRrcSapProvider::LcsConfig const &', 'arg0')])
cls.add_instance_attribute('componentCarrierId', 'uint8_t', is_const=False)
cls.add_instance_attribute('lcConfig', 'ns3::LteUeCmacSapProvider::LogicalChannelConfig', is_const=False)
cls.add_instance_attribute('msu', 'ns3::LteMacSapUser *', is_const=False)
return |
class DiscreteEncoder(EncoderBase):
def __init__(self):
super(DiscreteEncoder, self).__init__()
def fit(self, df, targets, configurations):
self.reset()
for target in targets:
for (method, parameter) in configurations:
nbins = parameter['nbins']
self._fit_one(df, target, method, nbins)
def _fit_one(self, df, target, method, nbins):
if (method == 'uniform'):
intervals = self._get_uniform_intervals(df, target, nbins)
name = (((('discrete_' + remove_continuous_discrete_prefix(target)) + '_nbins_') + str(nbins)) + '_uniform_dis_encoder')
self.trans_ls.append((target, name, intervals))
elif (method == 'quantile'):
intervals = self._get_quantile_intervals(df, target, nbins)
name = (((('discrete_' + remove_continuous_discrete_prefix(target)) + '_nbins_') + str(nbins)) + '_quantile_dis_encoder')
self.trans_ls.append((target, name, intervals))
else:
raise Exception('Not Implemented Yet')
def transform(self, df):
result = df.copy(deep=True)
for (target, _, _) in self.trans_ls:
if (target not in df.columns):
raise Exception('The columns to be transformed are not in the dataframe.')
for (target, name, intervals) in self.trans_ls:
result[name] = encode_label(result[target].map((lambda x: get_interval(x, intervals))))
return result
def _get_uniform_intervals(self, df, target, nbins):
target_var = df[target]
minimum = target_var[(target_var != (- np.inf))].min()
maximum = target_var[(target_var != np.inf)].max()
intervals = get_uniform_interval(minimum, maximum, nbins)
return intervals
def _get_quantile_intervals(self, df, target, nbins):
return get_quantile_interval(df[target], nbins) |
class DegenerateMetricParal(DegenerateMetric, TensorFieldParal):
def __init__(self, vector_field_module, name, signature=None, latex_name=None):
TensorFieldParal.__init__(self, vector_field_module, (0, 2), name=name, latex_name=latex_name, sym=(0, 1))
ndim = self._ambient_domain.dimension()
if (signature is None):
signature = ((ndim - 1), 0, 1)
else:
try:
for elt in signature:
if ((elt < 0) or (not isinstance(elt, (int, Integer)))):
raise ValueError('{} must be a positive integer'.format(elt))
sign = ((signature[0] + signature[1]) + signature[2])
if (sign != ndim):
raise ValueError('{} is different from {}'.format(sign, ndim))
except TypeError:
raise TypeError('signature must be an iterable')
self._signature = (signature[0], signature[1], signature[2])
self._signature_pm = self._signature
def set(self, symbiform):
if (not isinstance(symbiform, TensorFieldParal)):
raise TypeError(('the argument must be a tensor field with ' + 'values on a parallelizable domain'))
if (symbiform._tensor_type != (0, 2)):
raise TypeError('the argument must be of tensor type (0,2)')
if (symbiform._sym != ((0, 1),)):
raise TypeError('the argument must be symmetric')
if (symbiform._vmodule is not self._vmodule):
raise TypeError(('the symmetric bilinear form and the metric are ' + 'not defined on the same vector field module'))
self._components.clear()
for frame in symbiform._components:
self._components[frame] = symbiform._components[frame].copy()
for (dom, symbiform_rst) in symbiform._restrictions.items():
rst = self.restrict(dom)
rst.set(symbiform_rst)
def restrict(self, subdomain, dest_map=None):
if (subdomain == self._domain):
return self
if (subdomain not in self._restrictions):
resu = TensorFieldParal.restrict(self, subdomain, dest_map=dest_map)
resu._signature = self._signature
resu._signature_pm = self._signature_pm
self._restrictions[subdomain] = resu
return self._restrictions[subdomain] |
def test_hash_collisions():
X = [list('Thequickbrownfoxjumped')]
Xt = FeatureHasher(alternate_sign=True, n_features=1, input_type='string').fit_transform(X)
assert (abs(Xt.data[0]) < len(X[0]))
Xt = FeatureHasher(alternate_sign=False, n_features=1, input_type='string').fit_transform(X)
assert (Xt.data[0] == len(X[0])) |
class ContrastCLIPBottleneckEnt(AbstractCLIPBottleneck):
def __init__(self, feature_dim, num_classes, num_domains, hparams, pretrained, idx2class):
super(ContrastCLIPBottleneckEnt, self).__init__(feature_dim, num_classes, num_domains, hparams, pretrained, idx2class, DiscreteEntropyBottleneck, use_clip_contrast=True) |
class Batch():
def __init__(self, src=None, trg=None, dec=None):
(self.src, self.trg, self.dec) = (src, trg, dec) |
def get_dependent_dists(dists, dist):
if (dist not in dists):
raise DistlibException(('given distribution %r is not a member of the list' % dist.name))
graph = make_graph(dists)
dep = [dist]
todo = graph.reverse_list[dist]
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if (succ not in dep):
todo.append(succ)
dep.pop(0)
return dep |
class ChoiceStateVarLayer(LayerBase):
layer_class = 'choice_state_var'
def __init__(self, beam_size, search=NotSpecified, input_type='prob', prob_scale=1.0, base_beam_score_scale=1.0, random_sample_scale=0.0, length_normalization=True, custom_score_combine=None, source_beam_sizes=None, scheduled_sampling=False, cheating=False, explicit_search_sources=None, score_dependent=True, **kwargs):
super(ChoiceStateVarLayer, self).__init__(**kwargs)
rec_layer = self.network.parent_layer
assert isinstance(rec_layer, RecStepByStepLayer)
self.stochastic_var = rec_layer.stochastic_vars[self.name]
cell = rec_layer.cell
assert isinstance(cell, SubnetworkRecCellSingleStep)
assert (self.network in {cell.net, cell.net_delayed_update})
if (self.network == cell.net):
rec_layer.stochastic_var_order.append(self.name)
self.score_dependent = score_dependent
if self.score_dependent:
assert (len(self.sources) == 1)
(source,) = self.sources
self.stochastic_var.assign_score(source)
self.output.placeholder = self.stochastic_var.get_choice()
def transform_config_dict(cls, d, network, get_layer):
assert (d.get('from', NotSpecified) is not NotSpecified), "specify 'from' explicitly for choice layer"
if (not isinstance(d['from'], (tuple, list))):
d['from'] = [d['from']]
if (d.get('target', NotSpecified) is not None):
assert ('target' in d), ("%s: specify 'target' explicitly" % (cls.__name__,))
if isinstance(d['target'], str):
d['target'] = [d['target']]
assert isinstance(d['target'], list)
assert (len(d['target']) == len(d['from']))
if d.get('explicit_search_source'):
assert ('explicit_search_sources' not in d)
d['explicit_search_sources'] = [get_layer(d.pop('explicit_search_source'))]
elif d.get('explicit_search_sources'):
assert isinstance(d['explicit_search_sources'], (list, tuple))
d['explicit_search_sources'] = [get_layer(name) for name in d['explicit_search_sources']]
parent_rec_layer = network.parent_layer
if parent_rec_layer:
assert isinstance(parent_rec_layer, RecStepByStepLayer)
cell = parent_rec_layer.cell
assert isinstance(cell, SubnetworkRecCellSingleStep)
assert (network in {cell.net, cell.net_delayed_update})
if (network == cell.net_delayed_update):
d['from'] = ()
d['score_dependent'] = False
super(ChoiceStateVarLayer, cls).transform_config_dict(d, network=network, get_layer=get_layer)
def get_out_data_from_opts(cls, name, sources, target, network, **kwargs):
target = (target[0] if isinstance(target, list) else target)
if target:
out_data = cls._static_get_target_value(target=target, network=network, mark_data_key_as_used=False).copy_template(name=('%s_output' % name))
out_data.available_for_inference = True
else:
out_data = sources[0].output.copy_template().copy_as_batch_major()
dim_tags = list(out_data.dim_tags)
del dim_tags[out_data.feature_dim_axis]
out_data = Data(name=('%s_output' % name), dim_tags=dim_tags, sparse=True, dim=out_data.dim, batch=(out_data.batch.copy_set_beam(None) if out_data.batch else network.get_global_batch_info()))
return out_data |
_testing
def test_random_arith(level=MAX_LEVEL, trials=1):
i = 0
for x in random_rings(level):
print(('survived %s tests' % i))
i += 1
print(x)
a = x.random_element()
b = x.random_element()
print(a, b)
print(((((a * b) + a) - b) + 1))
if (i >= trials):
return |
class TokenizerUtilsTest(unittest.TestCase):
def check_tokenizer_from_pretrained(self, tokenizer_class):
s3_models = list(tokenizer_class.max_model_input_sizes.keys())
for model_name in s3_models[:1]:
tokenizer = tokenizer_class.from_pretrained(model_name)
self.assertIsNotNone(tokenizer)
self.assertIsInstance(tokenizer, tokenizer_class)
self.assertIsInstance(tokenizer, PreTrainedTokenizer)
for special_tok in tokenizer.all_special_tokens:
self.assertIsInstance(special_tok, str)
special_tok_id = tokenizer.convert_tokens_to_ids(special_tok)
self.assertIsInstance(special_tok_id, int)
def assert_dump_and_restore(self, be_original: BatchEncoding, equal_op: Optional[Callable]=None):
batch_encoding_str = pickle.dumps(be_original)
self.assertIsNotNone(batch_encoding_str)
be_restored = pickle.loads(batch_encoding_str)
self.assertEqual(be_restored.is_fast, be_original.is_fast)
if be_original.is_fast:
self.assertIsNotNone(be_restored.encodings)
else:
self.assertIsNone(be_restored.encodings)
for (original_v, restored_v) in zip(be_original.values(), be_restored.values()):
if equal_op:
self.assertTrue(equal_op(restored_v, original_v))
else:
self.assertEqual(restored_v, original_v)
def test_pretrained_tokenizers(self):
self.check_tokenizer_from_pretrained(GPT2Tokenizer)
def test_tensor_type_from_str(self):
self.assertEqual(TensorType('tf'), TensorType.TENSORFLOW)
self.assertEqual(TensorType('pt'), TensorType.PYTORCH)
self.assertEqual(TensorType('np'), TensorType.NUMPY)
_tokenizers
def test_batch_encoding_pickle(self):
import numpy as np
tokenizer_p = BertTokenizer.from_pretrained('bert-base-cased')
tokenizer_r = BertTokenizerFast.from_pretrained('bert-base-cased')
with self.subTest('BatchEncoding (Python, return_tensors=None)'):
self.assert_dump_and_restore(tokenizer_p('Small example to encode'))
with self.subTest('BatchEncoding (Python, return_tensors=NUMPY)'):
self.assert_dump_and_restore(tokenizer_p('Small example to encode', return_tensors=TensorType.NUMPY), np.array_equal)
with self.subTest('BatchEncoding (Rust, return_tensors=None)'):
self.assert_dump_and_restore(tokenizer_r('Small example to encode'))
with self.subTest('BatchEncoding (Rust, return_tensors=NUMPY)'):
self.assert_dump_and_restore(tokenizer_r('Small example to encode', return_tensors=TensorType.NUMPY), np.array_equal)
_tf
_tokenizers
def test_batch_encoding_pickle_tf(self):
import tensorflow as tf
def tf_array_equals(t1, t2):
return tf.reduce_all(tf.equal(t1, t2))
tokenizer_p = BertTokenizer.from_pretrained('bert-base-cased')
tokenizer_r = BertTokenizerFast.from_pretrained('bert-base-cased')
with self.subTest('BatchEncoding (Python, return_tensors=TENSORFLOW)'):
self.assert_dump_and_restore(tokenizer_p('Small example to encode', return_tensors=TensorType.TENSORFLOW), tf_array_equals)
with self.subTest('BatchEncoding (Rust, return_tensors=TENSORFLOW)'):
self.assert_dump_and_restore(tokenizer_r('Small example to encode', return_tensors=TensorType.TENSORFLOW), tf_array_equals)
_torch
_tokenizers
def test_batch_encoding_pickle_pt(self):
import torch
tokenizer_p = BertTokenizer.from_pretrained('bert-base-cased')
tokenizer_r = BertTokenizerFast.from_pretrained('bert-base-cased')
with self.subTest('BatchEncoding (Python, return_tensors=PYTORCH)'):
self.assert_dump_and_restore(tokenizer_p('Small example to encode', return_tensors=TensorType.PYTORCH), torch.equal)
with self.subTest('BatchEncoding (Rust, return_tensors=PYTORCH)'):
self.assert_dump_and_restore(tokenizer_r('Small example to encode', return_tensors=TensorType.PYTORCH), torch.equal)
_tokenizers
def test_batch_encoding_is_fast(self):
tokenizer_p = BertTokenizer.from_pretrained('bert-base-cased')
tokenizer_r = BertTokenizerFast.from_pretrained('bert-base-cased')
with self.subTest('Python Tokenizer'):
self.assertFalse(tokenizer_p('Small example to_encode').is_fast)
with self.subTest('Rust Tokenizer'):
self.assertTrue(tokenizer_r('Small example to_encode').is_fast)
_tokenizers
def test_batch_encoding_word_to_tokens(self):
tokenizer_r = BertTokenizerFast.from_pretrained('bert-base-cased')
encoded = tokenizer_r(['Test', '\xad', 'test'], is_split_into_words=True)
self.assertEqual(encoded.word_to_tokens(0), TokenSpan(start=1, end=2))
self.assertEqual(encoded.word_to_tokens(1), None)
self.assertEqual(encoded.word_to_tokens(2), TokenSpan(start=2, end=3))
def test_batch_encoding_with_labels(self):
batch = BatchEncoding({'inputs': [[1, 2, 3], [4, 5, 6]], 'labels': [0, 1]})
tensor_batch = batch.convert_to_tensors(tensor_type='np')
self.assertEqual(tensor_batch['inputs'].shape, (2, 3))
self.assertEqual(tensor_batch['labels'].shape, (2,))
with CaptureStderr() as cs:
tensor_batch = batch.convert_to_tensors(tensor_type='np')
self.assertFalse(len(cs.err), msg=f'should have no warning, but got {cs.err}')
batch = BatchEncoding({'inputs': [1, 2, 3], 'labels': 0})
tensor_batch = batch.convert_to_tensors(tensor_type='np', prepend_batch_axis=True)
self.assertEqual(tensor_batch['inputs'].shape, (1, 3))
self.assertEqual(tensor_batch['labels'].shape, (1,))
_torch
def test_batch_encoding_with_labels_pt(self):
batch = BatchEncoding({'inputs': [[1, 2, 3], [4, 5, 6]], 'labels': [0, 1]})
tensor_batch = batch.convert_to_tensors(tensor_type='pt')
self.assertEqual(tensor_batch['inputs'].shape, (2, 3))
self.assertEqual(tensor_batch['labels'].shape, (2,))
with CaptureStderr() as cs:
tensor_batch = batch.convert_to_tensors(tensor_type='pt')
self.assertFalse(len(cs.err), msg=f'should have no warning, but got {cs.err}')
batch = BatchEncoding({'inputs': [1, 2, 3], 'labels': 0})
tensor_batch = batch.convert_to_tensors(tensor_type='pt', prepend_batch_axis=True)
self.assertEqual(tensor_batch['inputs'].shape, (1, 3))
self.assertEqual(tensor_batch['labels'].shape, (1,))
_tf
def test_batch_encoding_with_labels_tf(self):
batch = BatchEncoding({'inputs': [[1, 2, 3], [4, 5, 6]], 'labels': [0, 1]})
tensor_batch = batch.convert_to_tensors(tensor_type='tf')
self.assertEqual(tensor_batch['inputs'].shape, (2, 3))
self.assertEqual(tensor_batch['labels'].shape, (2,))
with CaptureStderr() as cs:
tensor_batch = batch.convert_to_tensors(tensor_type='tf')
self.assertFalse(len(cs.err), msg=f'should have no warning, but got {cs.err}')
batch = BatchEncoding({'inputs': [1, 2, 3], 'labels': 0})
tensor_batch = batch.convert_to_tensors(tensor_type='tf', prepend_batch_axis=True)
self.assertEqual(tensor_batch['inputs'].shape, (1, 3))
self.assertEqual(tensor_batch['labels'].shape, (1,))
_flax
def test_batch_encoding_with_labels_jax(self):
batch = BatchEncoding({'inputs': [[1, 2, 3], [4, 5, 6]], 'labels': [0, 1]})
tensor_batch = batch.convert_to_tensors(tensor_type='jax')
self.assertEqual(tensor_batch['inputs'].shape, (2, 3))
self.assertEqual(tensor_batch['labels'].shape, (2,))
with CaptureStderr() as cs:
tensor_batch = batch.convert_to_tensors(tensor_type='jax')
self.assertFalse(len(cs.err), msg=f'should have no warning, but got {cs.err}')
batch = BatchEncoding({'inputs': [1, 2, 3], 'labels': 0})
tensor_batch = batch.convert_to_tensors(tensor_type='jax', prepend_batch_axis=True)
self.assertEqual(tensor_batch['inputs'].shape, (1, 3))
self.assertEqual(tensor_batch['labels'].shape, (1,))
def test_padding_accepts_tensors(self):
features = [{'input_ids': np.array([0, 1, 2])}, {'input_ids': np.array([0, 1, 2, 3])}]
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
batch = tokenizer.pad(features, padding=True)
self.assertTrue(isinstance(batch['input_ids'], np.ndarray))
self.assertEqual(batch['input_ids'].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
batch = tokenizer.pad(features, padding=True, return_tensors='np')
self.assertTrue(isinstance(batch['input_ids'], np.ndarray))
self.assertEqual(batch['input_ids'].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
_torch
def test_padding_accepts_tensors_pt(self):
import torch
features = [{'input_ids': torch.tensor([0, 1, 2])}, {'input_ids': torch.tensor([0, 1, 2, 3])}]
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
batch = tokenizer.pad(features, padding=True)
self.assertTrue(isinstance(batch['input_ids'], torch.Tensor))
self.assertEqual(batch['input_ids'].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
batch = tokenizer.pad(features, padding=True, return_tensors='pt')
self.assertTrue(isinstance(batch['input_ids'], torch.Tensor))
self.assertEqual(batch['input_ids'].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
_tf
def test_padding_accepts_tensors_tf(self):
import tensorflow as tf
features = [{'input_ids': tf.constant([0, 1, 2])}, {'input_ids': tf.constant([0, 1, 2, 3])}]
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
batch = tokenizer.pad(features, padding=True)
self.assertTrue(isinstance(batch['input_ids'], tf.Tensor))
self.assertEqual(batch['input_ids'].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
batch = tokenizer.pad(features, padding=True, return_tensors='tf')
self.assertTrue(isinstance(batch['input_ids'], tf.Tensor))
self.assertEqual(batch['input_ids'].numpy().tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
_tokenizers
def test_instantiation_from_tokenizers(self):
bert_tokenizer = Tokenizer(WordPiece(unk_token='[UNK]'))
PreTrainedTokenizerFast(tokenizer_object=bert_tokenizer)
_tokenizers
def test_instantiation_from_tokenizers_json_file(self):
bert_tokenizer = Tokenizer(WordPiece(unk_token='[UNK]'))
with tempfile.TemporaryDirectory() as tmpdirname:
bert_tokenizer.save(os.path.join(tmpdirname, 'tokenizer.json'))
PreTrainedTokenizerFast(tokenizer_file=os.path.join(tmpdirname, 'tokenizer.json')) |
def drop_not_type_specific_keywords(schema: Schema, new_type: str) -> None:
keywords = TYPE_SPECIFIC_KEYS.get(new_type, ())
for keyword in tuple(schema):
if ((keyword not in keywords) and (keyword not in ANY_TYPE_KEYS)):
schema.pop(keyword, None) |
def retry_on_connect_failures(func=None, connect_errors=ADDRESS_IN_USE):
if (func is None):
return partial(retry_on_connect_failures, connect_errors=connect_errors)
(func)
def wrapper(*args, **kwargs):
tries_remaining = 10
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if (str(error) in connect_errors):
tries_remaining -= 1
if (tries_remaining == 0):
raise
time.sleep(random.random())
continue
raise
return wrapper |
def test_valarray(doc):
lst = m.cast_valarray()
assert (lst == [1, 4, 9])
assert m.load_valarray(lst)
assert (doc(m.cast_valarray) == 'cast_valarray() -> List[int]')
assert (doc(m.load_valarray) == 'load_valarray(arg0: List[int]) -> bool') |
.parametrize('dtype', ((np.float64, np.float32), np.float64, None, 'numeric'))
.parametrize('bool_dtype', ('bool', 'boolean'))
def test_check_dataframe_mixed_float_dtypes(dtype, bool_dtype):
if (bool_dtype == 'boolean'):
pd = importorskip('pandas', minversion='1.0')
else:
pd = importorskip('pandas')
df = pd.DataFrame({'int': [1, 2, 3], 'float': [0, 0.1, 2.1], 'bool': pd.Series([True, False, True], dtype=bool_dtype)}, columns=['int', 'float', 'bool'])
array = check_array(df, dtype=dtype)
assert (array.dtype == np.float64)
expected_array = np.array([[1.0, 0.0, 1.0], [2.0, 0.1, 0.0], [3.0, 2.1, 1.0]], dtype=float)
assert_allclose_dense_sparse(array, expected_array) |
def pad_vocabulary(math):
if (math == 'fp16'):
pad_vocab = 8
elif (math == 'fp32'):
pad_vocab = 1
return pad_vocab |
class TestGIL():
def setup_method(self):
self.messages = []
def log(self, message):
self.messages.append(message)
def make_worker_thread(self, target, args):
log = self.log
class WorkerThread(threading.Thread):
def run(self):
log('interpolation started')
target(*args)
log('interpolation complete')
return WorkerThread()
.slow
.xfail(reason='race conditions, may depend on system load')
def test_rectbivariatespline(self):
def generate_params(n_points):
x = y = np.linspace(0, 1000, n_points)
(x_grid, y_grid) = np.meshgrid(x, y)
z = (x_grid * y_grid)
return (x, y, z)
def calibrate_delay(requested_time):
for n_points in itertools.count(5000, 1000):
args = generate_params(n_points)
time_started = time.time()
interpolate(*args)
if ((time.time() - time_started) > requested_time):
return args
def interpolate(x, y, z):
scipy.interpolate.RectBivariateSpline(x, y, z)
args = calibrate_delay(requested_time=3)
worker_thread = self.make_worker_thread(interpolate, args)
worker_thread.start()
for i in range(3):
time.sleep(0.5)
self.log('working')
worker_thread.join()
assert_equal(self.messages, ['interpolation started', 'working', 'working', 'working', 'interpolation complete']) |
class GPTQAccuracyTest(GPTQBaseTest):
def get_gptq_config(self):
return GradientPTQConfig(5, optimizer=torch.optim.Adam([torch.Tensor([])], lr=0.0001), optimizer_rest=torch.optim.Adam([torch.Tensor([])], lr=0.0001), loss=multiple_tensors_mse_loss, train_bias=True, rounding_type=self.rounding_type, use_hessian_based_weights=self.hessian_weights, optimizer_bias=torch.optim.Adam([torch.Tensor([])], lr=0.4), hessian_weights_config=GPTQHessianScoresConfig(log_norm=self.log_norm_weights, scale_log_norm=self.scaled_log_norm), gptq_quantizer_params_override=self.override_params)
def get_gptq_configv2(self):
return GradientPTQConfigV2(5, optimizer=torch.optim.Adam([torch.Tensor([])], lr=0.0001), optimizer_rest=torch.optim.Adam([torch.Tensor([])], lr=0.0001), loss=multiple_tensors_mse_loss, train_bias=True, rounding_type=self.rounding_type, use_hessian_based_weights=self.hessian_weights, optimizer_bias=torch.optim.Adam([torch.Tensor([])], lr=0.4), hessian_weights_config=GPTQHessianScoresConfig(log_norm=self.log_norm_weights, scale_log_norm=self.scaled_log_norm), gptq_quantizer_params_override=self.override_params)
def gptq_compare(self, ptq_model, gptq_model, input_x=None):
ptq_weights = torch_tensor_to_numpy(list(ptq_model.parameters()))
gptq_weights = torch_tensor_to_numpy(list(gptq_model.parameters()))
self.unit_test.assertTrue((len(ptq_weights) == len(gptq_weights)), msg='PTQ model number of weights different from GPTQ model!') |
class SquadExample(object):
def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ''
s += ('qas_id: %s' % self.qas_id)
s += (', question_text: %s' % self.question_text)
s += (', doc_tokens: [%s]' % ' '.join(self.doc_tokens))
if self.start_position:
s += (', start_position: %d' % self.start_position)
if self.end_position:
s += (', end_position: %d' % self.end_position)
if self.is_impossible:
s += (', is_impossible: %r' % self.is_impossible)
return s |
class NotANumber(Constant):
def __init__(self, name='NaN'):
conversions = dict(matlab='NaN')
Constant.__init__(self, name, conversions=conversions)
def __float__(self):
return float('nan')
def _mpfr_(self, R):
return R('NaN')
def _real_double_(self, R):
return R.NaN()
def _sympy_(self):
import sympy
return sympy.nan |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_pe_ruc(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
if (output_format in {'compact', 'standard'}):
result = ([ruc.compact(val)] + result)
if (output_format == 'dni'):
try:
result = ([ruc.to_dni(val)] + result)
except:
result = [np.nan]
return result |
def get_labeled_dataloader(dataset_name: str, augmentation: str, batch_size: int, image_size: int=None, siamese=False, unlabeled_ratio: int=20, num_workers=2, shuffle=True, drop_last=False, split_seed=1):
(unlabeled, labeled) = get_split_dataloader(dataset_name, augmentation, batch_size, image_size, siamese=siamese, unlabeled_ratio=unlabeled_ratio, num_workers=num_workers, shuffle=shuffle, drop_last=drop_last, seed=split_seed)
return labeled |
class PDAG(nx.DiGraph):
def __init__(self, directed_ebunch=[], undirected_ebunch=[], latents=[]):
super(PDAG, self).__init__(((directed_ebunch + undirected_ebunch) + [(Y, X) for (X, Y) in undirected_ebunch]))
self.latents = set(latents)
self.directed_edges = set(directed_ebunch)
self.undirected_edges = set(undirected_ebunch)
def copy(self):
return PDAG(directed_ebunch=list(self.directed_edges.copy()), undirected_ebunch=list(self.undirected_edges.copy()), latents=self.latents)
def to_dag(self, required_edges=[]):
dag = DAG()
dag.add_nodes_from(self.nodes())
dag.add_edges_from(self.directed_edges)
dag.latents = self.latents
pdag = self.copy()
while (pdag.number_of_nodes() > 0):
found = False
for X in pdag.nodes():
directed_outgoing_edges = (set(pdag.successors(X)) - set(pdag.predecessors(X)))
undirected_neighbors = (set(pdag.successors(X)) & set(pdag.predecessors(X)))
neighbors_are_clique = all((pdag.has_edge(Y, Z) for Z in pdag.predecessors(X) for Y in undirected_neighbors if (not (Y == Z))))
if ((not directed_outgoing_edges) and ((not undirected_neighbors) or neighbors_are_clique)):
found = True
for Y in pdag.predecessors(X):
dag.add_edge(Y, X)
pdag.remove_node(X)
break
if (not found):
warn((('PDAG has no faithful extension (= no oriented DAG with the ' + 'same v-structures as PDAG). Remaining undirected PDAG edges ') + 'oriented arbitrarily.'))
for (X, Y) in pdag.edges():
if (not dag.has_edge(Y, X)):
try:
dag.add_edge(X, Y)
except ValueError:
pass
break
return dag |
def symbol2number48(symbol):
order = 0
char_list = list(symbol)
if (('o' in char_list) or ('' in char_list)):
order = 4
elif (('#' in char_list) and ('5' in char_list)):
order = 3
elif (('m' in char_list) and ('j' not in char_list)):
order = 2
else:
order = 1
index = 0
if (len(char_list) == 1):
if ((char_list[0] == 'A') or (char_list[0] == 'a')):
index = 9
if ((char_list[0] == 'B') or (char_list[0] == 'b')):
index = 11
if ((char_list[0] == 'C') or (char_list[0] == 'c')):
index = 0
if ((char_list[0] == 'D') or (char_list[0] == 'd')):
index = 2
if ((char_list[0] == 'E') or (char_list[0] == 'e')):
index = 4
if ((char_list[0] == 'F') or (char_list[0] == 'f')):
index = 5
if ((char_list[0] == 'G') or (char_list[0] == 'g')):
index = 7
elif (char_list[1] == 'b'):
if ((char_list[0] == 'A') or (char_list[0] == 'a')):
index = 8
if ((char_list[0] == 'B') or (char_list[0] == 'b')):
index = 10
if ((char_list[0] == 'D') or (char_list[0] == 'd')):
index = 1
if ((char_list[0] == 'E') or (char_list[0] == 'e')):
index = 3
if ((char_list[0] == 'G') or (char_list[0] == 'g')):
index = 6
else:
if ((char_list[0] == 'A') or (char_list[0] == 'a')):
index = 9
if ((char_list[0] == 'B') or (char_list[0] == 'b')):
index = 11
if ((char_list[0] == 'C') or (char_list[0] == 'c')):
index = 0
if ((char_list[0] == 'D') or (char_list[0] == 'd')):
index = 2
if ((char_list[0] == 'E') or (char_list[0] == 'e')):
index = 4
if ((char_list[0] == 'F') or (char_list[0] == 'f')):
index = 5
if ((char_list[0] == 'G') or (char_list[0] == 'g')):
index = 7
return np.asarray([((index * 4) + (order - 1))]) |
def _nb_nodes(feedback: _Feedback, is_chunked) -> int:
for inp in feedback.features.inputs:
if (inp.location in [_Location.NODE, _Location.EDGE]):
if is_chunked:
return inp.data.shape[2]
else:
return inp.data.shape[1]
assert False |
class FrameSelectionStrategy(Enum):
RANDOM_K = 'random_k'
FIRST_K = 'first_k'
LAST_K = 'last_k'
ALL = 'all' |
class MNASNet(TorchVisionModel):
def __init__(self, tasks, model_args):
super(MNASNet, self).__init__(models.mnasnet1_0, tasks, model_args) |
def enable_hooks(args: argparse.Namespace) -> List[int]:
registered = []
if args.sequential:
def make_sequential(sdfg: dace.SDFG):
for sd in sdfg.all_sdfgs_recursive():
sd.openmp_sections = False
for (n, _) in sdfg.all_nodes_recursive():
if isinstance(n, dace.nodes.EntryNode):
sched = getattr(n, 'schedule', False)
if (sched in (dace.ScheduleType.CPU_Multicore, dace.ScheduleType.CPU_Persistent, dace.ScheduleType.Default)):
n.schedule = dace.ScheduleType.Sequential
registered.append(dace.hooks.register_sdfg_call_hook(before_hook=make_sequential))
return registered |
class PreconditionFailed(HTTPException):
code = 412
description = 'The precondition on the request for the URL failed positive evaluation.' |
class MissingOverleafCredentials(OverleafException):
def __init__(self, **kwargs):
message = 'Overleaf credentials `OVERLEAF_EMAIL` and/or `OVERLEAF_PASSWORD` not found. These should be set as both environment variables and GitHub repository secrets.'
super().__init__(message, level=kwargs.get('level', 'warn')) |
def LF_contact_covid(s):
rgx = '\\b(known\\s)*(contact(s)*)(\\s)*(with|w\\/)*(\\s)*(known|confirmed)*(\\s)*(coronavirus|covid|covid\\s19|covid-19|covid(\\s)*\\+)(\\scontact)*\\b'
trigger = match_regex(rgx, s)
if (not trigger):
return ABSTAIN
return (EXPOSURE if (not is_negated(trigger)) else NO_EXPOSURE) |
class ProjectivePlaneCurve_field(ProjectivePlaneCurve, ProjectiveCurve_field):
_point = ProjectivePlaneCurvePoint_field
def arithmetic_genus(self):
if (not self.is_irreducible()):
raise TypeError('this curve must be irreducible')
d = self.defining_polynomial().total_degree()
return Integer((d - 1)).binomial(2)
def fundamental_group(self):
from sage.schemes.curves.zariski_vankampen import fundamental_group
F = self.base_ring()
from sage.rings.qqbar import QQbar
if (QQbar.coerce_map_from(F) is None):
raise NotImplementedError('the base field must have an embedding to the algebraic field')
f = self.affine_patch(2).defining_polynomial()
if (f.degree() == self.degree()):
return fundamental_group(f, projective=True)
else:
return fundamental_group(f, projective=False)
def rational_parameterization(self):
if self.genus():
raise TypeError('this curve must have geometric genus zero')
if (not is_RationalField(self.base_ring())):
raise TypeError('this curve must be defined over the rational field')
singular.lib('paraplanecurves.lib')
R = singular.paraPlaneCurve(self.defining_polynomial())
singular.setring(R)
param = singular('PARA').sage().gens()
R = R.sage()
C = self.change_ring(R.base_ring())
H = Hom(ProjectiveSpace(R.base_ring(), 1, R.gens()), C)
return H(param)
def riemann_surface(self, **kwargs):
return self.affine_patch(2).riemann_surface(**kwargs) |
class NumberFieldStructure(UniqueRepresentation):
def __init__(self, other):
self.other = other
def create_structure(self, field):
raise NotImplementedError |
def memlet_check_parameters(memlet, volume, dynamic, subsets):
if (memlet.volume != volume):
raise RuntimeError('Expected volume of {}, got {}'.format(volume, memlet.volume))
elif (dynamic and (not memlet.dynamic)):
raise RuntimeError('Expected dynamic volume, got static')
elif (memlet.dynamic and (not dynamic)):
raise RuntimeError('Expected static volume, got dynamic')
if (len(subsets) != memlet.subset.dims()):
raise RuntimeError('Expected subset of dim {}, got {}'.format(len(subsets), memlet.subset.dims()))
for i in range(len(subsets)):
if (subsets[i] != memlet.subset.ranges[i]):
raise RuntimeError('Expected subset {} at dim {}, got {}'.format(subsets[i], i, memlet.subset.ranges[i])) |
def semantic_unit_segment(tag_seq):
(tag_item_lists, seg_pointers) = ([], [])
for (idx, tag_item) in enumerate(tag_seq):
if (tag_item[0] != OUTSIDE):
tag_item_lists.append(tag_item)
seg_pointers.append(idx)
return (tag_item_lists, seg_pointers) |
def load_checkpoint(model, optimizer, model_dir, map_location=None, step=None):
path = os.path.join(model_dir, 'model_checkpoint')
if (step is not None):
path += '-{:08d}'.format(step)
if os.path.exists(path):
print(('Loading model from %s' % path))
checkpoint = torch.load(path, map_location=map_location)
old_state_dict = model.state_dict()
for key in old_state_dict.keys():
if (key not in checkpoint['model']):
checkpoint['model'][key] = old_state_dict[key]
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
return checkpoint.get('step', 0)
return 0 |
class EntityMention(Mention):
def __init__(self, doc_id, sent_id, tokens_numbers, tokens, mention_str, head_text, head_lemma, is_singleton, is_continuous, coref_chain, mention_type):
super(EntityMention, self).__init__(doc_id, sent_id, tokens_numbers, tokens, mention_str, head_text, head_lemma, is_singleton, is_continuous, coref_chain)
self.predicates = {}
self.mention_type = mention_type
def add_predicate(self, predicate_id, relation_to_predicate):
self.predicates[predicate_id] = relation_to_predicate
def __str__(self):
a0_pred = '-'
a1_pred = '-'
aloc_pred = '-'
atmp_pred = '-'
for (pred, rel) in self.predicates.items():
if (rel == 'A0'):
a0_pred += (pred[0] + '-')
elif (rel == 'A1'):
a1_pred += (pred[0] + '-')
elif (rel == 'AM-TMP'):
atmp_pred += (pred[0] + '-')
elif (rel == 'AM-LOC'):
aloc_pred += (pred[0] + '-')
return '{}_a0-pred: {}_a1-pred: {}_loc-pred: {}_tmp-pred: {}_{}'.format(super(EntityMention, self).__str__(), a0_pred, a1_pred, aloc_pred, atmp_pred, self.mention_id) |
def _trim_arity(func, maxargs=2):
if (func in singleArgBuiltins):
return (lambda s, l, t: func(t))
limit = [0]
foundArity = [False]
def extract_stack(limit=0):
offset = (- 2)
frame_summary = traceback.extract_stack(limit=(((- offset) + limit) - 1))[offset]
return [(frame_summary.filename, frame_summary.lineno)]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[(- 1)]
return [(frame_summary.filename, frame_summary.lineno)]
LINE_DIFF = 6
this_line = extract_stack(limit=2)[(- 1)]
pa_call_line_synth = (this_line[0], (this_line[1] + LINE_DIFF))
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[(- 1)]
if (not (extract_tb(tb, limit=2)[(- 1)][:2] == pa_call_line_synth)):
raise
finally:
del tb
if (limit[0] <= maxargs):
limit[0] += 1
continue
raise
func_name = '<parse action>'
try:
func_name = getattr(func, '__name__', getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper |
def create_reverse_dependency_tree():
cache = {}
all_modules = (list(PATH_TO_TRANFORMERS.glob('**/*.py')) + list(PATH_TO_TESTS.glob('**/*.py')))
all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules]
edges = [(dep, mod) for mod in all_modules for dep in get_module_dependencies(mod, cache=cache)]
return list(set(edges)) |
def normalize(a):
ma = np.max(a)
mi = np.min(a)
assert (ma > mi)
a = ((a - mi) / (ma - mi))
return a |
def create_temp_with_dir():
tmpdir = tempfile.mkdtemp()
(yield tmpdir)
rmtree(tmpdir, ignore_errors=True) |
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
config = BertConfig.from_json_file(bert_config_file)
print('Building PyTorch model from configuration: {}'.format(str(config)))
model = BertForPreTraining(config)
load_tf_weights_in_bert(model, config, tf_checkpoint_path)
print('Save PyTorch model to {}'.format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path) |
class Func_legendre_P(GinacFunction):
def __init__(self):
BuiltinFunction.__init__(self, 'legendre_P', nargs=2, latex_name='P', conversions={'maxima': 'legendre_p', 'mathematica': 'LegendreP', 'maple': 'LegendreP', 'giac': 'legendre'}) |
def make_spec(vnnlib_filename, onnx_filename):
(num_inputs, num_outputs, inp_dtype) = get_num_inputs_outputs(onnx_filename)
vnnlib_spec = read_vnnlib_simple(vnnlib_filename, num_inputs, num_outputs)
rv = []
for (box, spec_list) in vnnlib_spec:
if (len(spec_list) == 1):
(mat, rhs) = spec_list[0]
spec = Specification(mat, rhs)
else:
spec_obj_list = [Specification(mat, rhs) for (mat, rhs) in spec_list]
spec = DisjunctiveSpec(spec_obj_list)
rv.append((box, spec))
return (rv, inp_dtype) |
def test_unique_objects_after_inlining(empty_open_api_3_schema):
empty_open_api_3_schema['paths'] = {'/test': {'post': {'requestBody': {'content': {'application/json': {'schema': {'$ref': '#/components/schemas/step5'}}}}, 'responses': {'default': {'description': 'Success'}}}}}
empty_open_api_3_schema['components'] = {'schemas': {'final': {'type': 'object'}, 'step1': {'$ref': '#/components/schemas/final'}, 'step2': {'$ref': '#/components/schemas/step1'}, 'step3': {'$ref': '#/components/schemas/step2'}, 'step4': {'$ref': '#/components/schemas/step3'}, 'step5': {'properties': {'first': {'$ref': '#/components/schemas/step4'}, 'second': {'$ref': '#/components/schemas/step4'}}}}}
schema = schemathesis.from_dict(empty_open_api_3_schema)
assert_unique_objects(schema['/test']['post'].body[0].definition) |
class FastestDetNeck(nn.Module):
def __init__(self, in_channels=[512, 1024, 2048], out_channels=96):
super(FastestDetNeck, self).__init__()
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.avg_pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.SPP = SPP(sum(in_channels), out_channels)
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
def forward(self, x):
(C3, C4, C5) = x
P5 = self.upsample(C5)
P3 = self.avg_pool(C3)
P_cat = torch.cat((P3, C4, P5), dim=1)
return self.SPP(P_cat) |
.parametrize('sql', ['select verrrylongcolumn from foo', 'select "verrrylongcolumn" from "foo"'])
def test_truncate_strings_doesnt_truncate_identifiers(sql):
formatted = sqlparse.format(sql, truncate_strings=2)
assert (formatted == sql) |
.parametrize('observation_shape', [(4,), ((4,), (8,)), (3, 84, 84)])
def test_get_shape_from_observation(observation_shape: Shape) -> None:
observation = create_observation(observation_shape)
assert (tuple(get_shape_from_observation(observation)) == observation_shape) |
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True
try:
__import__(module_name)
except ImportError:
if sys.exc_info()[(- 1)].tb_next:
raise NoAppException('While importing "{name}", an ImportError was raised:\n\n{tb}'.format(name=module_name, tb=traceback.format_exc()))
elif raise_if_not_found:
raise NoAppException('Could not import "{name}".'.format(name=module_name))
else:
return
module = sys.modules[module_name]
if (app_name is None):
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name) |
def data_iterator_csv_dataset(uri, batch_size, shuffle=False, rng=None, use_thread=True, normalize=True, with_memory_cache=True, with_file_cache=True, cache_dir=None, epoch_begin_callbacks=[], epoch_end_callbacks=[], stop_exhausted=False):
ds = CsvDataSource(uri, shuffle=shuffle, rng=rng, normalize=normalize)
return data_iterator(ds, use_thread=use_thread, batch_size=batch_size, with_memory_cache=with_memory_cache, with_file_cache=with_file_cache, cache_dir=cache_dir, epoch_begin_callbacks=epoch_begin_callbacks, epoch_end_callbacks=epoch_end_callbacks, stop_exhausted=stop_exhausted) |
def TorchGELUPattern(patterns: list):
gelu_input = OuterNode()
div_tensor = OuterNode(is_tensor=True)
add_tensor = OuterNode(tensor_value=1)
mul_tensor = OuterNode(tensor_value=0.5)
_div = PatternNode('Div', [gelu_input, div_tensor])
_erf = PatternNode('Erf', [_div])
_add = PatternNode('Add', [_erf, add_tensor])
_mu_0 = PatternNode('Mul', [gelu_input, _add])
_mul_1 = PatternNode('Mul', [_mu_0, mul_tensor])
gelu = PatternNode('GELU', [gelu_input])
patterns.append(ReformInfo(name='GELU', src_nodes=[_div, _erf, _add, _mu_0, _mul_1], dst_nodes=[gelu])) |
def register_Ns3PhyTxStatsCalculator_methods(root_module, cls):
cls.add_constructor([param('ns3::PhyTxStatsCalculator const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DlPhyTransmission', 'void', [param('ns3::PhyTransmissionStatParameters', 'params')])
cls.add_method('DlPhyTransmissionCallback', 'void', [param('ns3::Ptr< ns3::PhyTxStatsCalculator >', 'phyTxStats'), param('std::string', 'path'), param('ns3::PhyTransmissionStatParameters', 'params')], is_static=True)
cls.add_method('GetDlTxOutputFilename', 'std::string', [])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('GetUlTxOutputFilename', 'std::string', [])
cls.add_method('SetDlTxOutputFilename', 'void', [param('std::string', 'outputFilename')])
cls.add_method('SetUlTxOutputFilename', 'void', [param('std::string', 'outputFilename')])
cls.add_method('UlPhyTransmission', 'void', [param('ns3::PhyTransmissionStatParameters', 'params')])
cls.add_method('UlPhyTransmissionCallback', 'void', [param('ns3::Ptr< ns3::PhyTxStatsCalculator >', 'phyTxStats'), param('std::string', 'path'), param('ns3::PhyTransmissionStatParameters', 'params')], is_static=True)
return |
def plot_spk_cur_mem_spk(spk_in, syn_rec, mem_rec, spk_rec, title):
(fig, ax) = plt.subplots(4, figsize=(8, 7), sharex=True, gridspec_kw={'height_ratios': [0.4, 1, 1, 0.4]})
splt.raster(spk_in, ax[0], s=400, c='black', marker='|')
ax[0].set_ylabel('Input Spikes')
ax[0].set_title('Synaptic Conductance-based Neuron Model With Input Spikes')
ax[0].set_yticks([])
ax[1].plot(syn_rec.detach().numpy())
ax[1].set_ylim([0, 0.5])
ax[1].set_ylabel('Synaptic Current ($I_{syn}$)')
plt.xlabel('Time step')
ax[2].plot(mem_rec.detach().numpy())
ax[2].set_ylim([0, 1.5])
ax[2].set_ylabel('Membrane Potential ($U_{mem}$)')
ax[2].axhline(y=1, alpha=0.25, linestyle='dashed', c='black', linewidth=2)
plt.xlabel('Time step')
splt.raster(spk_rec, ax[3], s=400, c='black', marker='|')
plt.ylabel('Output spikes')
ax[3].set_yticks([])
plt.show() |
class IRBlock(nn.Module):
expansion: int = 1
def __init__(self, in_ch: int, out_ch: int, s: int=1, downsample: Optional[nn.Module]=None) -> None:
super().__init__()
self.bn0 = nn.BatchNorm2d(in_ch)
self.conv1 = nn.Conv2d(in_ch, out_ch, 3, s, 1, bias=False)
self.bn1 = nn.BatchNorm2d(out_ch)
self.prelu = nn.PReLU()
self.conv2 = nn.Conv2d(out_ch, out_ch, 3, 1, 1, bias=False)
self.bn2 = nn.BatchNorm2d(out_ch)
self.downsample = downsample
self.se = SEBlock(out_ch)
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.prelu(self.bn1(self.conv1(self.bn0(x))))
out = self.bn2(self.conv2(out))
out = self.se(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.prelu(out)
return out |
def sample_machine_instructions(machine_instructions, n):
return random.sample(machine_instructions, min(n, len(machine_instructions))) |
class CrossAttention(nn.Module):
def __init__(self, dim: int, nhead: int, dropout: float=0.0, batch_first: bool=True, add_pe_to_qkv: List[bool]=[True, True, False], residual: bool=True, norm: bool=True):
super().__init__()
self.cross_attn = nn.MultiheadAttention(dim, nhead, dropout=dropout, batch_first=batch_first)
if norm:
self.norm = nn.LayerNorm(dim)
else:
self.norm = nn.Identity()
self.dropout = nn.Dropout(dropout)
self.add_pe_to_qkv = add_pe_to_qkv
self.residual = residual
def forward(self, x: torch.Tensor, mem: torch.Tensor, x_pe: torch.Tensor, mem_pe: torch.Tensor, attn_mask: bool=None, *, need_weights: bool=False) -> (torch.Tensor, torch.Tensor):
x = self.norm(x)
if self.add_pe_to_qkv[0]:
q = (x + x_pe)
else:
q = x
if any(self.add_pe_to_qkv[1:]):
mem_with_pe = (mem + mem_pe)
k = (mem_with_pe if self.add_pe_to_qkv[1] else mem)
v = (mem_with_pe if self.add_pe_to_qkv[2] else mem)
else:
k = v = mem
r = x
(x, weights) = self.cross_attn(q, k, v, attn_mask=attn_mask, need_weights=need_weights, average_attn_weights=False)
if self.residual:
return ((r + self.dropout(x)), weights)
else:
return (self.dropout(x), weights) |
def test_zero_crop():
arr = np.arange(45).reshape(9, 5)
out = crop(arr, 0)
assert (out.shape == (9, 5)) |
def from_pretrained(model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', archive_map=None, **kwargs):
from fairseq import checkpoint_utils, file_utils
if (archive_map is not None):
if (model_name_or_path in archive_map):
model_name_or_path = archive_map[model_name_or_path]
if ((data_name_or_path is not None) and (data_name_or_path in archive_map)):
data_name_or_path = archive_map[data_name_or_path]
model_path = file_utils.load_archive_file(model_name_or_path)
if data_name_or_path.startswith('.'):
kwargs['data'] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs['data'] = file_utils.load_archive_file(data_name_or_path)
for (file, arg) in {'code': 'bpe_codes', 'bpecodes': 'bpe_codes', 'sentencepiece.bpe.model': 'sentencepiece_vocab'}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if ('user_dir' in kwargs):
utils.import_user_module(argparse.Namespace(user_dir=kwargs['user_dir']))
(models, args, task) = checkpoint_utils.load_model_ensemble_and_task([os.path.join(model_path, cpt) for cpt in checkpoint_file.split(':')], arg_overrides=kwargs)
return {'args': args, 'task': task, 'models': models} |
def log(spark):
date = datetime(2019, 1, 1)
return spark.createDataFrame(data=[[0, 0, date, 1.0], [1, 0, date, 1.0], [2, 1, date, 2.0], [2, 1, date, 2.0], [1, 1, date, 2.0], [2, 2, date, 2.0], [0, 2, date, 2.0]], schema=get_schema('user_idx', 'item_idx', 'timestamp', 'relevance')) |
def test_lstm_tree_forward(pretrain_file):
model = build_model(pretrain_file, '--num_tree_lstm_layers', '1', '--constituency_composition', 'tree_lstm')
run_forward_checks(model)
model = build_model(pretrain_file, '--num_tree_lstm_layers', '2', '--constituency_composition', 'tree_lstm')
run_forward_checks(model)
model = build_model(pretrain_file, '--num_tree_lstm_layers', '3', '--constituency_composition', 'tree_lstm')
run_forward_checks(model) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.