code stringlengths 101 5.91M |
|---|
def detect(cfgfile, weightfile, imgfolder):
m = Darknet(cfgfile)
m.load_weights(weightfile)
print(('Loading weights from %s... Done!' % weightfile))
use_cuda = True
if use_cuda:
m.cuda()
imgfiles = [x for x in os.listdir(imgfolder) if (x[(- 4):] == '.jpg')]
imgfiles.sort()
for imgname in imgfiles:
imgfile = os.path.join(imgfolder, imgname)
img = Image.open(imgfile).convert('RGB')
sized = img.resize((m.width, m.height))
start = time.time()
boxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
finish = time.time()
print(('%s: Predicted in %f seconds.' % (imgfile, (finish - start))))
class_names = load_class_names(namesfile)
img = plot_boxes(img, boxes, 'result/{}'.format(os.path.basename(imgfile)), class_names)
img = np.array(img)
cv2.imshow('{}'.format(os.path.basename(imgfolder)), img)
cv2.resizeWindow('{}'.format(os.path.basename(imgfolder)), 1000, 800)
cv2.waitKey(1000) |
def mean_stdev_masked(input_tensor, is_valid, items_axis, dimensions_axis, fixed_ref=None):
if (fixed_ref is not None):
mean = fixed_ref
else:
mean = reduce_mean_masked(input_tensor, is_valid, axis=items_axis, keepdims=True)
centered = (input_tensor - mean)
n_new_dims = (input_tensor.shape.rank - is_valid.shape.rank)
is_valid = expand_dims(is_valid, ([(- 1)] * n_new_dims))
n_valid = tf.math.count_nonzero(is_valid, axis=items_axis, keepdims=True, dtype=input_tensor.dtype)
sum_of_squared_deviations = reduce_sum_masked(tf.square(centered), is_valid, axis=[items_axis, dimensions_axis], keepdims=True)
stdev = tf.sqrt((tf.math.divide_no_nan(sum_of_squared_deviations, n_valid) + 1e-10))
return (mean, stdev) |
def remove_extra_spaces(s: str) -> str:
s = re.sub('\u200b', '', s)
s = re.sub('[ \u3000]+', ' ', s)
s = s.replace(' ?', '?')
s = s.replace(' !', '!')
s = s.replace(' ,', ',')
s = s.replace(' .', '.')
s = s.replace(' :', ':')
return s.strip() |
def sort_vol_slice(path):
vol = re.findall('[a-z]+_([0-9]+)_.+?\\.npy', path.split('/')[(- 1)])[0]
slice_ = re.findall('[a-z]+_[0-9]+_([0-9]+).+', path.split('/')[(- 1)])[0]
return ((int(vol) * 1000) + int(slice_)) |
class ImagenetDataModule(LightningDataModule):
name = 'imagenet'
def __init__(self, data_dir: str, image_size: int=224, train_transforms=None, val_transforms=None, test_transforms=None, img_dtype='float32', cache_val_dataset=False, mixup: Optional[Callable]=None, num_aug_repeats: int=0, num_workers: int=0, batch_size: int=32, batch_size_eval: Optional[int]=None, shuffle: bool=True, pin_memory: bool=True, drop_last: bool=False, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.image_size = image_size
self.train_transforms = train_transforms
self.val_transforms = val_transforms
self.test_transforms = test_transforms
assert (img_dtype in ['float32', 'float16', 'bfloat16'])
self.img_dtype = torch.__getattribute__(img_dtype)
self.cache_val_dataset = cache_val_dataset
self.mixup = mixup
self.num_aug_repeats = num_aug_repeats
self.dims = (3, self.image_size, self.image_size)
self.data_dir = Path(data_dir).expanduser()
self.num_workers = num_workers
self.batch_size = batch_size
self.batch_size_eval = (batch_size_eval if (batch_size_eval is not None) else self.batch_size)
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
def num_classes(self) -> int:
return 1000
def _verify_splits(self, data_dir: str, split: str) -> None:
dirs = os.listdir(data_dir)
if (split not in dirs):
raise FileNotFoundError(f'a {split} Imagenet split was not found in {data_dir}, make sure the folder contains a subfolder named {split}')
def prepare_data(self) -> None:
def setup(self, stage: Optional[str]=None) -> None:
if ((stage == 'fit') or (stage is None)):
train_transforms = (self.train_transform() if (self.train_transforms is None) else self.train_transforms)
val_transforms = (self.val_transform() if (self.val_transforms is None) else self.val_transforms)
if (self.img_dtype is not torch.float32):
assert isinstance(train_transforms, transforms.Compose)
assert isinstance(val_transforms, transforms.Compose)
convert_dtype = transforms.Lambda((lambda x: x.to(dtype=self.img_dtype)))
train_transforms.transforms.append(convert_dtype)
val_transforms.transforms.append(convert_dtype)
self.dataset_train = imagenet_lmdb_dataset(os.path.join(self.data_dir, 'train'), transform=train_transforms)
self.dataset_val = imagenet_lmdb_dataset(os.path.join(self.data_dir, 'val'), transform=val_transforms)
if ((stage == 'test') or (stage is None)):
test_transforms = (self.val_transform() if (self.test_transforms is None) else self.test_transforms)
if (self.img_dtype is not torch.float32):
assert isinstance(test_transforms, transforms.Compose)
convert_dtype = transforms.Lambda((lambda x: x.to(dtype=self.img_dtype)))
test_transforms.transforms.append(convert_dtype)
self.dataset_test = imagenet_lmdb_dataset(os.path.join(self.data_dir, 'val'), transform=test_transforms)
def train_transform(self) -> Callable:
preprocessing = transforms.Compose([transforms.RandomResizedCrop(self.image_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), imagenet_normalization()])
return preprocessing
def val_transform(self) -> Callable:
preprocessing = transforms.Compose([transforms.Resize((self.image_size + 32)), transforms.CenterCrop(self.image_size), transforms.ToTensor(), imagenet_normalization()])
return preprocessing
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
if (self.num_aug_repeats == 0):
shuffle = self.shuffle
sampler = None
else:
shuffle = False
from timm.data.distributed_sampler import RepeatAugSampler
sampler = RepeatAugSampler(self.dataset_train, num_repeats=self.num_aug_repeats)
return self._data_loader(self.dataset_train, batch_size=self.batch_size, shuffle=shuffle, mixup=self.mixup, sampler=sampler)
def val_dataloader(self, *args: Any, **kwargs: Any) -> Union[(DataLoader, List[DataLoader])]:
if (not self.cache_val_dataset):
sampler = (DistributedSampler(self.dataset_val, shuffle=False, drop_last=self.drop_last) if (self.num_aug_repeats != 0) else None)
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval, sampler=sampler)
else:
print('Caching val dataset')
sampler = (SequentialSampler(self.dataset_val) if (self.trainer.world_size <= 1) else DistributedSampler(self.dataset_val, shuffle=False, drop_last=self.drop_last))
indices = list(iter(sampler))
loader = DataLoader(self.dataset_val, batch_size=None, shuffle=False, sampler=sampler, num_workers=self.num_workers, drop_last=self.drop_last)
batches = list(loader)
assert (len(batches) == len(indices))
self.dataset_val = DictDataset(dict(zip(indices, batches)), length=len(self.dataset_val))
sampler = (DistributedSampler(self.dataset_val, shuffle=False, drop_last=self.drop_last) if (self.num_aug_repeats != 0) else None)
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval, sampler=sampler)
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[(DataLoader, List[DataLoader])]:
sampler = (DistributedSampler(self.dataset_test, shuffle=False, drop_last=self.drop_last) if (self.num_aug_repeats != 0) else None)
return self._data_loader(self.dataset_test, batch_size=self.batch_size_eval, sampler=sampler)
def _data_loader(self, dataset: Dataset, batch_size: int, shuffle: bool=False, mixup: Optional[Callable]=None, sampler=None) -> DataLoader:
collate_fn = ((lambda batch: mixup(*default_collate(batch))) if (mixup is not None) else default_collate)
return DataLoader(dataset, collate_fn=collate_fn, batch_size=batch_size, shuffle=shuffle, sampler=sampler, num_workers=self.num_workers, drop_last=self.drop_last, pin_memory=self.pin_memory, persistent_workers=True) |
class Trainer():
_STEPS_PER_LOSS_WRITE = 10
_STEPS_PER_GRAD_WRITE = 10
_STEPS_PER_LR_WRITE = 10
def __init__(self, module, device, train_metrics, train_loader, opts, lr_schedulers, max_epochs, max_grad_norm, test_metrics, test_loader, epochs_per_test, early_stopping, valid_loss, valid_loader, max_bad_valid_epochs, visualizer, writer, should_checkpoint_latest, should_checkpoint_best_valid, checkpoint_to_load):
self._module = module
self._device = device
self._train_metrics = train_metrics
self._train_loader = train_loader
self._opts = opts
self._lr_schedulers = lr_schedulers
self._max_epochs = max_epochs
self._max_grad_norm = max_grad_norm
self._test_metrics = test_metrics
self._test_loader = test_loader
self._epochs_per_test = epochs_per_test
self._valid_loss = valid_loss
self._valid_loader = valid_loader
self._max_bad_valid_epochs = max_bad_valid_epochs
self._best_valid_loss = float('inf')
self._num_bad_valid_epochs = 0
self._visualizer = visualizer
self._writer = writer
self._should_checkpoint_best_valid = should_checkpoint_best_valid
self._trainer = Engine(self._train_batch)
AverageMetric().attach(self._trainer)
ProgressBar(persist=True).attach(self._trainer, list(self._opts.keys()))
self._trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
self._trainer.add_event_handler(Events.ITERATION_COMPLETED, self._log_training_info)
if early_stopping:
self._validator = Engine(self._validate_batch)
AverageMetric().attach(self._validator)
ProgressBar(persist=False, desc='Validating').attach(self._validator)
self._trainer.add_event_handler(Events.EPOCH_COMPLETED, self._validate)
self._tester = Engine(self._test_batch)
AverageMetric().attach(self._tester)
ProgressBar(persist=False, desc='Testing').attach(self._tester)
self._trainer.add_event_handler(Events.EPOCH_COMPLETED, self._test_and_log)
if should_checkpoint_latest:
self._trainer.add_event_handler(Events.EPOCH_COMPLETED, (lambda _: self._save_checkpoint('latest')))
try:
self._load_checkpoint(checkpoint_to_load)
except FileNotFoundError:
print(f"Did not find `{checkpoint_to_load}' checkpoint.", file=sys.stderr)
def train(self):
self._trainer.run(data=self._train_loader, max_epochs=self._max_epochs)
def test(self):
self._module.eval()
return self._tester.run(data=self._test_loader).metrics
def _train_batch(self, engine, batch):
self._module.train()
(x, _) = batch
x = x.to(self._device)
for (param_name, opt) in self._opts.items():
self._set_requires_grad(param_name, True)
opt.zero_grad()
all_values = self._train_metrics(self._module, x)
for (param_name, loss) in all_values['losses'].items():
self._isolate_params(param_name)
loss.backward()
self._clip_grad_norms(param_name)
for (param_name, opt) in self._opts.items():
opt.step()
self._lr_schedulers[param_name].step()
return {'metrics': all_values['losses']}
def _isolate_params(self, param_name):
for other_param_name in self._opts:
self._set_requires_grad(other_param_name, False)
self._set_requires_grad(param_name, True)
def _set_requires_grad(self, param_name, requires_grad):
for param in self._iter_params(param_name):
param.requires_grad = requires_grad
def _clip_grad_norms(self, param_name):
if (self._max_grad_norm is not None):
for param in self._iter_params(param_name):
torch.nn.utils.clip_grad_norm_(param, self._max_grad_norm)
def _iter_params(self, param_name):
for group in self._opts[param_name].param_groups:
for param in group['params']:
(yield param)
_grad()
def _test_and_log(self, engine):
epoch = engine.state.epoch
if (((epoch - 1) % self._epochs_per_test) == 0):
for (k, v) in self.test().items():
self._writer.write_scalar(f'test/{k}', v, global_step=engine.state.epoch)
if (not torch.isfinite(v)):
self._save_checkpoint(tag='nan_during_test')
self._visualizer.visualize(self._module, epoch)
def _test_batch(self, engine, batch):
(x, _) = batch
x = x.to(self._device)
return {'metrics': self._test_metrics(self._module, x)}
_grad()
def _validate(self, engine):
self._module.eval()
state = self._validator.run(data=self._valid_loader)
valid_loss = state.metrics['loss']
if (valid_loss < self._best_valid_loss):
print(f'Best validation loss {valid_loss} after epoch {engine.state.epoch}')
self._num_bad_valid_epochs = 0
self._best_valid_loss = valid_loss
if self._should_checkpoint_best_valid:
self._save_checkpoint(tag='best_valid')
else:
if (not torch.isfinite(valid_loss)):
self._save_checkpoint(tag='nan_during_validation')
self._num_bad_valid_epochs += 1
if (self._num_bad_valid_epochs > self._max_bad_valid_epochs):
print(f'No validation improvement after {self._num_bad_valid_epochs} epochs. Terminating.')
self._trainer.terminate()
def _validate_batch(self, engine, batch):
(x, _) = batch
x = x.to(self._device)
return {'metrics': {'loss': self._valid_loss(self._module, x)}}
def _log_training_info(self, engine):
i = engine.state.iteration
if ((i % self._STEPS_PER_LOSS_WRITE) == 0):
for (k, v) in engine.state.output['metrics'].items():
self._writer.write_scalar(f'train/{k}', v, global_step=i)
if ((i % self._STEPS_PER_GRAD_WRITE) == 0):
for param_name in self._opts:
self._writer.write_scalar(f'train/grad-norm-{param_name}', self._get_grad_norm(param_name), global_step=i)
if ((i % self._STEPS_PER_LR_WRITE) == 0):
for param_name in self._opts:
self._writer.write_scalar(f'train/lr-{param_name}', self._get_lr(param_name), global_step=i)
def _get_grad_norm(self, param_name):
norm = 0
for param in self._iter_params(param_name):
if (param.grad is not None):
norm += (param.grad.norm().item() ** 2)
return np.sqrt(norm)
def _get_lr(self, param_name):
(param_group,) = self._opts[param_name].param_groups
return param_group['lr']
def _save_checkpoint(self, tag):
checkpoint = {'epoch': self._trainer.state.epoch, 'iteration': self._trainer.state.iteration, 'module_state_dict': self._module.state_dict(), 'opt_state_dicts': {param_name: opt.state_dict() for (param_name, opt) in self._opts.items()}, 'lr_scheduler_state_dicts': self._get_lr_scheduler_state_dicts(), 'best_valid_loss': self._best_valid_loss, 'num_bad_valid_epochs': self._num_bad_valid_epochs}
self._writer.write_checkpoint(tag, checkpoint)
def _get_lr_scheduler_state_dicts(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='Please also save or load the state of the optimizer when saving or loading the scheduler.')
return {param_name: lr_scheduler.state_dict() for (param_name, lr_scheduler) in self._lr_schedulers.items()}
def _load_checkpoint(self, tag):
checkpoint = self._writer.load_checkpoint(tag, device=self._device)
_trainer.on(Events.STARTED)
def resume_trainer_state(engine):
engine.state.epoch = checkpoint['epoch']
engine.state.iteration = checkpoint['iteration']
self._module.load_state_dict(checkpoint['module_state_dict'])
for (param_name, state_dict) in checkpoint['opt_state_dicts'].items():
self._opts[param_name].load_state_dict(state_dict)
for (param_name, state_dict) in checkpoint['lr_scheduler_state_dicts'].items():
self._lr_schedulers[param_name].load_state_dict(state_dict)
self._best_valid_loss = checkpoint['best_valid_loss']
self._num_bad_valid_epochs = checkpoint['num_bad_valid_epochs']
print(f"Loaded checkpoint `{tag}' after epoch {checkpoint['epoch']}", file=sys.stderr) |
def session(engine):
from sqlalchemy.orm import sessionmaker
connection = engine.connect()
trans = connection.begin()
session = sessionmaker()(bind=connection)
(yield session)
session.close()
trans.rollback()
connection.close() |
class ElvenShortSword(BaseShortSword):
def __init__(self):
super().__init__('elven short sword', weight=30, damage=D.Dice.from_str('d8'), material=M.Wood, hit=0) |
def check_generator(params: Tuple, state: State) -> None:
(num_nodes, _, _, num_agents, num_nodes_per_agent, max_step) = params
assert (jnp.min(state.node_types) == UTILITY_NODE)
assert (jnp.max(state.node_types) == (num_agents - 1))
assert (state.positions.shape == (num_agents,))
assert (state.connected_nodes.shape == (num_agents, max_step))
assert (state.connected_nodes_index.shape == (num_agents, num_nodes))
assert (state.node_edges.shape == (num_agents, num_nodes, num_nodes))
assert (state.nodes_to_connect.shape == (num_agents, num_nodes_per_agent)) |
def split_tfrecord(cfg, logger):
tfrecord_path = cfg.DATASET.FFHQ_SOURCE
ffhq_size = cfg.DATASET.SIZE
part_size = (ffhq_size // cfg.DATASET.PART_COUNT)
logger.info(('Splitting into % size parts' % part_size))
chunk_size = 1024
for i in range(0, (cfg.DATASET.MAX_RESOLUTION_LEVEL + 1)):
part_num = 0
with tf.Graph().as_default(), tf.Session() as sess:
ds = tf.data.TFRecordDataset((tfrecord_path % i))
batch = ds.batch(chunk_size).make_one_shot_iterator().get_next()
while True:
try:
part_path = (cfg.DATASET.PATH % (i, part_num))
os.makedirs(os.path.dirname(part_path), exist_ok=True)
k = 0
with tf.python_io.TFRecordWriter(part_path) as writer:
for k in tqdm.tqdm(range((part_size // chunk_size))):
records = sess.run(batch)
for record in records:
writer.write(record)
part_num += 1
except tf.errors.OutOfRangeError:
break |
def create_dataset_artifact(opt):
with open(opt.data) as f:
data = yaml.safe_load(f)
logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') |
_name('new_eval')
def test_new_eval_extreme(benchmark):
new_eval_runner(benchmark, bond_dim=100, seq_len=1000) |
def test_close_with_paused():
(configs, datasets) = _load_test_data()
num_envs = len(configs)
env_fn_args = tuple(zip(configs, datasets, range(num_envs)))
with habitat.VectorEnv(env_fn_args=env_fn_args, multiprocessing_start_method='forkserver') as envs:
envs.reset()
envs.pause_at(3)
envs.pause_at(0)
assert envs._is_closed |
def pix2pix_discriminator(net, num_filters, padding=2, pad_mode='REFLECT', activation_fn=tf.nn.leaky_relu, is_training=False):
del is_training
end_points = {}
num_layers = len(num_filters)
def padded(net, scope):
if padding:
with tf.variable_scope(scope):
spatial_pad = tf.constant([[0, 0], [padding, padding], [padding, padding], [0, 0]], dtype=tf.int32)
return tf.pad(net, spatial_pad, pad_mode)
else:
return net
with tf.contrib.framework.arg_scope([layers.conv2d], kernel_size=[4, 4], stride=2, padding='valid', activation_fn=activation_fn):
net = layers.conv2d(padded(net, 'conv0'), num_filters[0], normalizer_fn=None, scope='conv0')
end_points['conv0'] = net
for i in range(1, (num_layers - 1)):
net = layers.conv2d(padded(net, ('conv%d' % i)), num_filters[i], scope=('conv%d' % i))
end_points[('conv%d' % i)] = net
net = layers.conv2d(padded(net, ('conv%d' % (num_layers - 1))), num_filters[(- 1)], stride=1, scope=('conv%d' % (num_layers - 1)))
end_points[('conv%d' % (num_layers - 1))] = net
logits = layers.conv2d(padded(net, ('conv%d' % num_layers)), 1, stride=1, activation_fn=None, normalizer_fn=None, scope=('conv%d' % num_layers))
end_points['logits'] = logits
end_points['predictions'] = tf.sigmoid(logits)
return (logits, end_points) |
def cleanup_log_dir(log_dir):
try:
os.makedirs(log_dir)
except OSError:
files = glob.glob(os.path.join(log_dir, '*.monitor.csv'))
for f in files:
os.remove(f) |
class BaseDetector(ABC):
def __init__(self):
pass
def image_preprocess(self, img_name):
pass
def images_detection(self, imgs, orig_dim_list):
pass
def detect_one_img(self, img_name):
pass |
def _fitFunc(pars, drim, l, b, dist, ext, e_ext):
amp = numpy.exp(pars[0])
fd = (amp * numpy.exp(pars[1]))
fs = (amp * numpy.exp(pars[2]))
fo = (amp * ((1.0 - fd) - fs))
dist_stretch = numpy.exp(pars[3])
model_ext = drim(l, b, (dist * dist_stretch), _fd=fd, _fs=fs, _fo=fo)
return (0.5 * numpy.sum((((model_ext - ext) ** 2.0) / (e_ext ** 2.0)))) |
class Code2VecModel(Code2VecModelBase):
def __init__(self, config: Config):
self.keras_train_model: Optional[keras.Model] = None
self.keras_eval_model: Optional[keras.Model] = None
self.keras_model_predict_function: Optional[K.GraphExecutionFunction] = None
self.training_status: ModelTrainingStatus = ModelTrainingStatus()
self._checkpoint: Optional[tf.train.Checkpoint] = None
self._checkpoint_manager: Optional[tf.train.CheckpointManager] = None
super(Code2VecModel, self).__init__(config)
def _create_keras_model(self):
path_source_token_input = Input((self.config.MAX_CONTEXTS,), dtype=tf.int32)
path_input = Input((self.config.MAX_CONTEXTS,), dtype=tf.int32)
path_target_token_input = Input((self.config.MAX_CONTEXTS,), dtype=tf.int32)
context_valid_mask = Input((self.config.MAX_CONTEXTS,))
paths_embedded = Embedding(self.vocabs.path_vocab.size, self.config.PATH_EMBEDDINGS_SIZE, name='path_embedding')(path_input)
token_embedding_shared_layer = Embedding(self.vocabs.token_vocab.size, self.config.TOKEN_EMBEDDINGS_SIZE, name='token_embedding')
path_source_token_embedded = token_embedding_shared_layer(path_source_token_input)
path_target_token_embedded = token_embedding_shared_layer(path_target_token_input)
context_embedded = Concatenate()([path_source_token_embedded, paths_embedded, path_target_token_embedded])
context_embedded = Dropout((1 - self.config.DROPOUT_KEEP_RATE))(context_embedded)
context_after_dense = TimeDistributed(Dense(self.config.CODE_VECTOR_SIZE, use_bias=False, activation='tanh'))(context_embedded)
(code_vectors, attention_weights) = AttentionLayer(name='attention')([context_after_dense, context_valid_mask])
target_index = Dense(self.vocabs.target_vocab.size, use_bias=False, activation='softmax', name='target_index')(code_vectors)
inputs = [path_source_token_input, path_input, path_target_token_input, context_valid_mask]
self.keras_train_model = keras.Model(inputs=inputs, outputs=target_index)
(topk_predicted_words, topk_predicted_words_scores) = TopKWordPredictionsLayer(self.config.TOP_K_WORDS_CONSIDERED_DURING_PREDICTION, self.vocabs.target_vocab.get_index_to_word_lookup_table(), name='target_string')(target_index)
self.keras_eval_model = keras.Model(inputs=inputs, outputs=[target_index, topk_predicted_words], name='code2vec-keras-model')
predict_outputs = tuple(KerasPredictionModelOutput(target_index=target_index, code_vectors=code_vectors, attention_weights=attention_weights, topk_predicted_words=topk_predicted_words, topk_predicted_words_scores=topk_predicted_words_scores))
self.keras_model_predict_function = K.function(inputs=inputs, outputs=predict_outputs)
def _create_metrics_for_keras_eval_model(self) -> Dict[(str, List[Union[(Callable, keras.metrics.Metric)]])]:
top_k_acc_metrics = []
for k in range(1, (self.config.TOP_K_WORDS_CONSIDERED_DURING_PREDICTION + 1)):
top_k_acc_metric = partial(sparse_top_k_categorical_accuracy, k=k)
top_k_acc_metric.__name__ = 'top{k}_acc'.format(k=k)
top_k_acc_metrics.append(top_k_acc_metric)
predicted_words_filters = [(lambda word_strings: tf.not_equal(word_strings, self.vocabs.target_vocab.special_words.OOV)), (lambda word_strings: tf.strings.regex_full_match(word_strings, '^[a-zA-Z\\|]+$'))]
words_subtokens_metrics = [WordsSubtokenPrecisionMetric(predicted_words_filters=predicted_words_filters, name='subtoken_precision'), WordsSubtokenRecallMetric(predicted_words_filters=predicted_words_filters, name='subtoken_recall'), WordsSubtokenF1Metric(predicted_words_filters=predicted_words_filters, name='subtoken_f1')]
return {'target_index': top_k_acc_metrics, 'target_string': words_subtokens_metrics}
def _create_optimizer(cls):
return tf.optimizers.Adam()
def _compile_keras_model(self, optimizer=None):
if (optimizer is None):
optimizer = self.keras_train_model.optimizer
if (optimizer is None):
optimizer = self._create_optimizer()
def zero_loss(true_word, topk_predictions):
return tf.constant(0.0, shape=(), dtype=tf.float32)
self.keras_train_model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer)
self.keras_eval_model.compile(loss={'target_index': 'sparse_categorical_crossentropy', 'target_string': zero_loss}, optimizer=optimizer, metrics=self._create_metrics_for_keras_eval_model())
def _create_data_reader(self, estimator_action: EstimatorAction, repeat_endlessly: bool=False):
return PathContextReader(vocabs=self.vocabs, config=self.config, model_input_tensors_former=_KerasModelInputTensorsFormer(estimator_action=estimator_action), estimator_action=estimator_action, repeat_endlessly=repeat_endlessly)
def _create_train_callbacks(self) -> List[Callback]:
keras_callbacks = [ModelTrainingStatusTrackerCallback(self.training_status), ModelTrainingProgressLoggerCallback(self.config, self.training_status)]
if self.config.is_saving:
keras_callbacks.append(ModelCheckpointSaverCallback(self, self.config.SAVE_EVERY_EPOCHS, self.logger))
if self.config.is_testing:
keras_callbacks.append(ModelEvaluationCallback(self))
if self.config.USE_TENSORBOARD:
log_dir = ('logs/scalars/train_' + common.now_str())
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir, update_freq=(self.config.NUM_BATCHES_TO_LOG_PROGRESS * self.config.TRAIN_BATCH_SIZE))
keras_callbacks.append(tensorboard_callback)
return keras_callbacks
def train(self):
train_data_input_reader = self._create_data_reader(estimator_action=EstimatorAction.Train)
training_history = self.keras_train_model.fit(train_data_input_reader.get_dataset(), steps_per_epoch=self.config.train_steps_per_epoch, epochs=self.config.NUM_TRAIN_EPOCHS, initial_epoch=self.training_status.nr_epochs_trained, verbose=self.config.VERBOSE_MODE, callbacks=self._create_train_callbacks())
self.log(training_history)
def evaluate(self) -> Optional[ModelEvaluationResults]:
val_data_input_reader = self._create_data_reader(estimator_action=EstimatorAction.Evaluate)
eval_res = self.keras_eval_model.evaluate(val_data_input_reader.get_dataset(), steps=self.config.test_steps, verbose=self.config.VERBOSE_MODE)
k = self.config.TOP_K_WORDS_CONSIDERED_DURING_PREDICTION
return ModelEvaluationResults(topk_acc=eval_res[3:(k + 3)], subtoken_precision=eval_res[(k + 3)], subtoken_recall=eval_res[(k + 4)], subtoken_f1=eval_res[(k + 5)], loss=eval_res[1])
def predict(self, predict_data_rows: Iterable[str]) -> List[ModelPredictionResults]:
predict_input_reader = self._create_data_reader(estimator_action=EstimatorAction.Predict)
input_iterator = predict_input_reader.process_and_iterate_input_from_data_lines(predict_data_rows)
all_model_prediction_results = []
for input_row in input_iterator:
input_for_predict = input_row[0][:4]
prediction_results = self.keras_model_predict_function(input_for_predict)
prediction_results = KerasPredictionModelOutput(*common.squeeze_single_batch_dimension_for_np_arrays(prediction_results))
input_row = _KerasModelInputTensorsFormer(estimator_action=EstimatorAction.Predict).from_model_input_form(input_row)
input_row = ReaderInputTensors(*common.squeeze_single_batch_dimension_for_np_arrays(input_row))
attention_per_context = self._get_attention_weight_per_context(path_source_strings=input_row.path_source_token_strings, path_strings=input_row.path_strings, path_target_strings=input_row.path_target_token_strings, attention_weights=prediction_results.attention_weights)
model_prediction_results = ModelPredictionResults(original_name=common.binary_to_string(input_row.target_string.item()), topk_predicted_words=common.binary_to_string_list(prediction_results.topk_predicted_words), topk_predicted_words_scores=prediction_results.topk_predicted_words_scores, attention_per_context=attention_per_context, code_vector=prediction_results.code_vectors)
all_model_prediction_results.append(model_prediction_results)
return all_model_prediction_results
def _save_inner_model(self, path):
if self.config.RELEASE:
self.keras_train_model.save_weights(self.config.get_model_weights_path(path))
else:
self._get_checkpoint_manager().save(checkpoint_number=self.training_status.nr_epochs_trained)
def _create_inner_model(self):
self._create_keras_model()
self._compile_keras_model()
self.keras_train_model.summary(print_fn=self.log)
def _load_inner_model(self):
self._create_keras_model()
self._compile_keras_model()
must_use_entire_model = self.config.is_training
entire_model_exists = os.path.exists(self.config.entire_model_load_path)
model_weights_exist = os.path.exists(self.config.model_weights_load_path)
use_full_model = (must_use_entire_model or (not model_weights_exist))
if (must_use_entire_model and (not entire_model_exists)):
raise ValueError('There is no model at path `{model_file_path}`. When loading the model for further training, we must use an entire saved model file (not just weights).'.format(model_file_path=self.config.entire_model_load_path))
if ((not entire_model_exists) and (not model_weights_exist)):
raise ValueError('There is no entire model to load at path `{entire_model_path}`, and there is no model weights file to load at path `{model_weights_path}`.'.format(entire_model_path=self.config.entire_model_load_path, model_weights_path=self.config.model_weights_load_path))
if use_full_model:
self.log('Loading entire model from path `{}`.'.format(self.config.entire_model_load_path))
latest_checkpoint = tf.train.latest_checkpoint(self.config.entire_model_load_path)
if (latest_checkpoint is None):
raise ValueError('Failed to load model: Model latest checkpoint is not found.')
self.log('Loading latest checkpoint `{}`.'.format(latest_checkpoint))
status = self._get_checkpoint().restore(latest_checkpoint)
status.initialize_or_restore()
self.training_status.nr_epochs_trained = int(latest_checkpoint.split('-')[(- 1)])
else:
self.log('Loading model weights from path `{}`.'.format(self.config.model_weights_load_path))
self.keras_train_model.load_weights(self.config.model_weights_load_path)
self.keras_train_model.summary(print_fn=self.log)
def _get_checkpoint(self):
assert ((self.keras_train_model is not None) and (self.keras_train_model.optimizer is not None))
if (self._checkpoint is None):
self._checkpoint = tf.train.Checkpoint(optimizer=self.keras_train_model.optimizer, model=self.keras_train_model)
return self._checkpoint
def _get_checkpoint_manager(self):
if (self._checkpoint_manager is None):
self._checkpoint_manager = tf.train.CheckpointManager(self._get_checkpoint(), self.config.entire_model_save_path, max_to_keep=self.config.MAX_TO_KEEP)
return self._checkpoint_manager
def _get_vocab_embedding_as_np_array(self, vocab_type: VocabType) -> np.ndarray:
assert (vocab_type in VocabType)
vocab_type_to_embedding_layer_mapping = {VocabType.Target: 'target_index', VocabType.Token: 'token_embedding', VocabType.Path: 'path_embedding'}
embedding_layer_name = vocab_type_to_embedding_layer_mapping[vocab_type]
weight = np.array(self.keras_train_model.get_layer(embedding_layer_name).get_weights()[0])
assert (len(weight.shape) == 2)
assert (self.vocabs.get(vocab_type).size in weight.shape)
if (self.vocabs.get(vocab_type).size != weight.shape[0]):
weight = np.transpose(weight)
return weight
def _create_lookup_tables(self):
PathContextReader.create_needed_vocabs_lookup_tables(self.vocabs)
self.log('Lookup tables created.')
def _initialize(self):
self._create_lookup_tables() |
class Img2Tensor(object):
def __init__(self, include_rgb: bool=False, include_grey: bool=True) -> None:
super().__init__()
assert (include_rgb or include_grey), f'Options must be True for at least one option, given {include_rgb}, {include_grey}'
self.include_rgb = include_rgb
self.include_grey = include_grey
def __call__(self, rgb_img: Image.Image) -> torch.Tensor:
assert (len(np.array(rgb_img).shape) in (2, 3)), f'Check data dimension: {np.array(rgb_img).shape}'
if (len(np.array(rgb_img).shape) == 3):
assert (np.array(rgb_img).shape[2] == 3)
isrgb: bool = (np.array(rgb_img).shape.__len__() == 3)
grey_img = tf.to_grayscale(rgb_img, num_output_channels=1)
grey_img_tensor = tf.to_tensor(grey_img)
assert (grey_img_tensor.shape[0] == 1)
if (not isrgb):
assert self.include_grey, f'Input grey image, you must set include_grey to be True'
return grey_img_tensor
rgb_img_tensor = tf.to_tensor(rgb_img)
assert (rgb_img_tensor.shape[0] == 3)
if (self.include_rgb and self.include_grey):
return torch.cat((grey_img_tensor, rgb_img_tensor), dim=0)
if (self.include_grey and (not self.include_rgb)):
return grey_img_tensor
if ((not self.include_grey) and self.include_rgb):
return rgb_img_tensor
raise AttributeError(f'Something wrong here with img, or options.')
def __repr__(self):
return f'Image2Tensor(include_rgb={self.include_rgb}, include_grey={self.include_grey})' |
def to_numpy(X):
if isinstance(X, np.ndarray):
return X
if hasattr(X, 'iloc'):
return X.values
if isinstance(X, (tuple, list)):
return np.array(X)
if (not isinstance(X, (torch.Tensor, PackedSequence))):
raise TypeError(f'Cannot convert {type(X)} to a numpy array.')
if X.is_cuda:
X = X.cpu()
if X.requires_grad:
X = X.detach()
return X.numpy() |
def apply_random_motion_blur(img, chance, mb_max_size, mask=None, rnd_state=None):
if (rnd_state is None):
rnd_state = np.random
mblur_rnd_kernel = (rnd_state.randint(mb_max_size) + 1)
mblur_rnd_deg = rnd_state.randint(360)
result = img
if (rnd_state.randint(100) < np.clip(chance, 0, 100)):
result = LinearMotionBlur(result, mblur_rnd_kernel, mblur_rnd_deg)
if (mask is not None):
result = ((img * (1 - mask)) + (result * mask))
return result |
def setup_estimator(hub_module, hub_module_signature, work_dir, tpu_name, save_checkpoints_steps, optimization_params, data_params):
num_classes = data_params['dataset'].get_num_classes()
params = {k: v for d in [optimization_params, data_params, {'hub_module': hub_module, 'hub_module_signature': hub_module_signature, 'num_classes': num_classes}] for (k, v) in d.items()}
if (tpu_name is not None):
cluster = tf.contrib.cluster_resolver.TPUClusterResolver(tpu=tpu_name)
config = tf.contrib.tpu.RunConfig(model_dir=work_dir, cluster=cluster, keep_checkpoint_max=None, save_checkpoints_steps=save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=TPU_ITERATION_PER_LOOP))
else:
config = tf.estimator.RunConfig(model_dir=work_dir, keep_checkpoint_max=None, save_checkpoints_steps=save_checkpoints_steps)
if (tpu_name is not None):
batch_size = params.pop('batch_size')
batch_size_eval = params.pop('batch_size_eval')
estimator = tf.contrib.tpu.TPUEstimator(model_fn=model.model_fn, model_dir=work_dir, params=params, config=config, use_tpu=True, train_batch_size=batch_size, eval_batch_size=batch_size_eval)
else:
estimator = tf.estimator.Estimator(model_fn=model.model_fn, model_dir=work_dir, params=params, config=config)
return estimator |
def build_non_MSE_yaml():
fake_yaml = "\n model:\n name: imagenet\n framework: onnxrt_qlinearops\n\n quantization:\n approach: post_training_static_quant\n calibration:\n sampling_size: 50\n op_wise: {\n 'Gather_*': {\n 'activation': {'dtype': ['fp32'], 'scheme':['sym']},\n 'weight': {'dtype': ['fp32'], 'scheme':['sym']}\n }\n }\n\n evaluation:\n accuracy:\n metric:\n MSE:\n compare_label: False\n performance:\n warmup: 5\n iteration: 10\n\n tuning:\n accuracy_criterion:\n relative: 0.1\n exit_policy:\n timeout: 0\n random_seed: 9527\n workspace:\n path: ./nc_workspace/recover/\n\n "
with open('non_MSE.yaml', 'w', encoding='utf-8') as f:
f.write(fake_yaml) |
class TanhNormal(torch.distributions.Distribution):
def __init__(self, loc, scale):
self._normal = Independent(Normal(loc, scale), 1)
super().__init__(batch_shape=self._normal.batch_shape, event_shape=self._normal.event_shape)
def log_prob(self, value, pre_tanh_value=None, epsilon=1e-06):
if (pre_tanh_value is None):
pre_tanh_value = (torch.log((((1 + epsilon) + value) / ((1 + epsilon) - value))) / 2)
norm_lp = self._normal.log_prob(pre_tanh_value)
ret = (norm_lp - torch.sum(torch.log((self._clip_but_pass_gradient((1.0 - (value ** 2))) + epsilon)), axis=(- 1)))
return ret
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
return self.rsample(sample_shape=sample_shape)
def rsample(self, sample_shape=torch.Size()):
z = self._normal.rsample(sample_shape)
return torch.tanh(z)
def rsample_with_pre_tanh_value(self, sample_shape=torch.Size()):
z = self._normal.rsample(sample_shape)
return (z, torch.tanh(z))
def cdf(self, value):
return self._normal.cdf(value)
def icdf(self, value):
return self._normal.icdf(value)
def _from_distribution(cls, new_normal):
new = cls(torch.zeros(1), torch.zeros(1))
new._normal = new_normal
return new
def expand(self, batch_shape, _instance=None):
new_normal = self._normal.expand(batch_shape, _instance)
new = self._from_distribution(new_normal)
return new
def enumerate_support(self, expand=True):
return self._normal.enumerate_support(expand)
def mean(self):
return torch.tanh(self._normal.mean)
def variance(self):
return self._normal.variance
def entropy(self):
return self._normal.entropy()
def _clip_but_pass_gradient(x, lower=0.0, upper=1.0):
clip_up = (x > upper).float()
clip_low = (x < lower).float()
with torch.no_grad():
clip = (((upper - x) * clip_up) + ((lower - x) * clip_low))
return (x + clip)
def __repr__(self):
return self.__class__.__name__ |
(version='2.0')
def process_config(config):
if isinstance(config, str):
try:
with open(config, 'r') as f:
content = f.read()
try:
from .schema_check import schema
except ImportError:
from ...conf.config import schema
val = yaml.safe_load(content)
schema.validate(val)
except FileNotFoundError as f:
logger.error('{}.'.format(f))
raise RuntimeError('The yaml file is not exist. Please check the file name or path.')
except Exception as e:
logger.error('{}.'.format(e))
raise RuntimeError('The yaml file format is not correct. Please refer to document.')
elif isinstance(config, DotDict):
val = config
else:
assert False, f'not supported type {config}'
return process_and_check_config(val) |
class TestAspectRatioGrouping(unittest.TestCase):
def test_reiter_leak(self):
data = [(1, 0), (0, 1), (1, 0), (0, 1)]
data = [{'width': a, 'height': b} for (a, b) in data]
batchsize = 2
dataset = AspectRatioGroupedDataset(data, batchsize)
for _ in range(5):
for (idx, __) in enumerate(dataset):
if (idx == 1):
break
for bucket in dataset._buckets:
self.assertLess(len(bucket), batchsize) |
def get_preds(model, span, inference_vectorizer):
if (len(span) == 0):
return 0
batch_instances = [span]
sens = torch.FloatTensor(batch_instances)
if USE_CUDA:
sens = sens.cuda()
preds = model(sens, batch_size=1)
pred = preds[0].data.tolist()[0]
return pred |
def get_final_report(text):
if ('FINAL REPORT' not in text):
return None
idx = text.index('FINAL REPORT')
text = text[idx:]
while (('(Over)' in text) and ('(Cont)' in text)):
text = (text[0:text.index('(Over)')] + text[(text.index('(Cont)') + 6):])
return text |
def set_seed(args: argparse.Namespace):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (args.n_gpu > 0):
torch.cuda.manual_seed_all(args.seed) |
def _is_valid_glassbox_explainer(proposed_explainer):
try:
is_valid_explainer = _is_valid_explainer(proposed_explainer, 'model')
has_fit = hasattr(proposed_explainer, 'fit')
has_predict = hasattr(proposed_explainer, 'predict')
if (not is_valid_explainer):
_log.warning('Explainer not valid due to missing explain_local or global function.')
if (not has_fit):
_log.warning('Explainer not valid due to missing fit function.')
if (not has_predict):
_log.warning('Explainer not valid due to missing predict function.')
return (is_valid_explainer and has_fit and has_predict)
except Exception as e:
_log.warning('Validate function threw exception {}'.format(e))
return False |
class PSP(BaseDecodeHead):
def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs):
super(PSP, self).__init__(input_transform='multiple_select', **kwargs)
self.psp_modules = PPM(pool_scales, self.in_channels[(- 1)], self.channels, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, align_corners=self.align_corners)
self.bottleneck = ConvModule((self.in_channels[(- 1)] + (len(pool_scales) * self.channels)), self.channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
def psp_forward(self, inputs):
x = inputs[(- 1)]
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = torch.cat(psp_outs, dim=1)
output = self.bottleneck(psp_outs)
return output
def forward(self, inputs):
inputs = self._transform_inputs(inputs)
return self.psp_forward(inputs) |
def fof_paths(G, i):
fofs = {}
neighbors = list(nx.neighbors(G, i))
for k in neighbors:
for j in nx.neighbors(G, k):
if ((j in neighbors) or (j == i)):
continue
if (j not in fofs):
fofs[j] = 0
fofs[j] += 1
return fofs |
def test_update_move_metadata_fn():
nmoves_per_update = 5
original_std_move = 0.9
def multiplicative_adjustment(val, accept_avg):
return (val * accept_avg)
move_masks = jnp.array([[1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0, 1.0]])
accept_sums = jnp.array([0.5, 0.5, 1.5, 2.25, 3.0])
std_move_after_update = 0.54
update_metadata_fn = dwpa.make_update_move_metadata_fn(nmoves_per_update, multiplicative_adjustment)
metadata = dwpa.MoveMetadata(std_move=original_std_move, move_acceptance_sum=0.0, moves_since_update=0)
for i in range(0, 4):
metadata = update_metadata_fn(metadata, move_masks[i])
np.testing.assert_allclose(metadata['moves_since_update'], (i + 1))
np.testing.assert_allclose(metadata['move_acceptance_sum'], accept_sums[i])
np.testing.assert_allclose(metadata['std_move'], original_std_move)
metadata = update_metadata_fn(metadata, move_masks[4])
np.testing.assert_allclose(metadata['moves_since_update'], 0)
np.testing.assert_allclose(metadata['move_acceptance_sum'], 0)
np.testing.assert_allclose(metadata['std_move'], std_move_after_update) |
def main(test_files, pretrained_file, labeldict, output_dir, batch_size=32):
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
print((20 * '='), ' Preparing for testing ', (20 * '='))
output_dir = os.path.normpath(output_dir)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
checkpoint = torch.load(pretrained_file)
vocab_size = checkpoint['model']['_word_embedding.weight'].size(0)
embedding_dim = checkpoint['model']['_word_embedding.weight'].size(1)
hidden_size = checkpoint['model']['_projection.0.weight'].size(0)
num_classes = checkpoint['model']['_classification.4.weight'].size(0)
print('\t* Loading test data...')
with open(os.path.normpath(test_files['matched']), 'rb') as pkl:
matched_test_data = NLIDataset(pickle.load(pkl))
with open(os.path.normpath(test_files['mismatched']), 'rb') as pkl:
mismatched_test_data = NLIDataset(pickle.load(pkl))
matched_test_loader = DataLoader(matched_test_data, shuffle=False, batch_size=batch_size)
mismatched_test_loader = DataLoader(mismatched_test_data, shuffle=False, batch_size=batch_size)
print('\t* Building model...')
model = ESIM(vocab_size, embedding_dim, hidden_size, num_classes=num_classes, device=device).to(device)
model.load_state_dict(checkpoint['model'])
print((20 * '='), ' Prediction on MNLI with ESIM model on device: {} '.format(device), (20 * '='))
print('\t* Prediction for matched test set...')
predictions = predict(model, matched_test_loader, labeldict)
with open(os.path.join(output_dir, 'matched_predictions.csv'), 'w') as output_f:
output_f.write('pairID,gold_label\n')
for pair_id in predictions:
output_f.write((((pair_id + ',') + predictions[pair_id]) + '\n'))
print('\t* Prediction for mismatched test set...')
predictions = predict(model, mismatched_test_loader, labeldict)
with open(os.path.join(output_dir, 'mismatched_predictions.csv'), 'w') as output_f:
output_f.write('pairID,gold_label\n')
for pair_id in predictions:
output_f.write((((pair_id + ',') + predictions[pair_id]) + '\n')) |
class DeprecateAction(argparse.Action):
def __init__(self, option_strings, dest, help=None, **kwargs):
super(DeprecateAction, self).__init__(option_strings, dest, nargs=0, help=help, **kwargs)
def __call__(self, parser, namespace, values, flag_name):
help = (self.help if (self.mdhelp is not None) else '')
msg = ("Flag '%s' is deprecated. %s" % (flag_name, help))
raise argparse.ArgumentTypeError(msg) |
class TestEasy_post_processing(TestCase):
def test_easy_post_processing(self):
inp = ["In two years ' time , the Scandinavian nation is slated to become the first in the world to phase out radio entirely .", 'Digitally , there are four times that number .', 'Frum : Ukrainians want to enter EU and lessen dependence on Russia ; Putin fighting to stop it .', "-LRB- CNN -RRB- He might have just won one of sport 's most prestigious events , but it was n't long before Jordan Spieth 's thoughts turned to his autistic sister in the glow of victory . "]
for x in inp:
y = easy_post_processing(x)
print(y) |
class NiceRepr(object):
def __nice__(self):
if hasattr(self, '__len__'):
return str(len(self))
else:
raise NotImplementedError('Define the __nice__ method for {!r}'.format(self.__class__))
def __repr__(self):
try:
nice = self.__nice__()
classname = self.__class__.__name__
return '<{0}({1}) at {2}>'.format(classname, nice, hex(id(self)))
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
def __str__(self):
try:
classname = self.__class__.__name__
nice = self.__nice__()
return '<{0}({1})>'.format(classname, nice)
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self) |
def test_scale():
scale = Scale()
assert (scale.scale.data == 1.0)
assert (scale.scale.dtype == torch.float)
x = torch.rand(1, 3, 64, 64)
output = scale(x)
assert (output.shape == (1, 3, 64, 64))
scale = Scale(10.0)
assert (scale.scale.data == 10.0)
assert (scale.scale.dtype == torch.float)
x = torch.rand(1, 3, 64, 64)
output = scale(x)
assert (output.shape == (1, 3, 64, 64)) |
class MultiCameraImageDataset(Dataset):
def __init__(self, ds_type='train', ds_name='wildtrack', root='/home/xzhangga/datasets/WildTrack/', crop_size=(256, 256), num_camera=7, **kwargs):
super().__init__()
self.path = Path(f'{root}')
self.ds_name = ds_name
self.ds_type = ds_type
if (ds_type == 'train'):
self.transform = transforms.Compose([transforms.ToTensor(), transforms.RandomCrop(crop_size), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomVerticalFlip(p=0.5)])
else:
self.transform = transforms.Compose([transforms.ToTensor()])
self.num_camera = num_camera
self.image_lists = self.get_files()
if (ds_type == 'test'):
self.crop = MinimalCrop(min_div=64)
else:
self.crop = None
print(f'Loaded dataset {ds_name} from {self.path}. Found {len(self.image_lists[0])} files.')
def __len__(self):
return len(self.image_lists[0])
def __getitem__(self, index):
image_list = [Image.open(self.image_lists[i][index]).convert('RGB') for i in range(self.num_camera)]
if (self.crop is not None):
image_list = [self.crop(image) for image in image_list]
frames = np.concatenate([np.asarray(image) for image in image_list], axis=(- 1))
frames = torch.chunk(self.transform(frames), self.num_camera)
return frames
def set_stage(self, stage):
if (stage == 0):
print('Using (32, 32) crops')
self.crop = transforms.RandomCrop((32, 32))
elif (stage == 1):
print('Using (28, 28) crops')
self.crop = transforms.RandomCrop((28, 28))
def get_files(self):
if (self.ds_name == 'wildtrack'):
image_lists = [[] for i in range(self.num_camera)]
for image_path in self.path.glob(f'images/C1/*.png'):
if ((self.ds_type == 'train') and (int(image_path.stem) <= 2000)):
image_lists[0].append(str(image_path))
for idx in range(1, self.num_camera):
image_lists[idx].append(str(image_path).replace('C1', ('C' + str((idx + 1)))))
elif ((self.ds_type == 'test') and (int(image_path.stem) > 2000)):
image_lists[0].append(str(image_path))
for idx in range(1, self.num_camera):
image_lists[idx].append(str(image_path).replace('C1', ('C' + str((idx + 1)))))
else:
raise NotImplementedError
return image_lists |
class InceptionV3Aux(nn.Module):
def __init__(self, inception_blocks=None, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'):
super(InceptionV3Aux, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
if (inception_blocks is None):
inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux]
assert (len(inception_blocks) == 7)
conv_block = inception_blocks[0]
inception_a = inception_blocks[1]
inception_b = inception_blocks[2]
inception_c = inception_blocks[3]
inception_d = inception_blocks[4]
inception_e = inception_blocks[5]
inception_aux = inception_blocks[6]
self.Conv2d_1a_3x3 = conv_block(in_chans, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3)
self.Mixed_5b = inception_a(192, pool_features=32)
self.Mixed_5c = inception_a(256, pool_features=64)
self.Mixed_5d = inception_a(288, pool_features=64)
self.Mixed_6a = inception_b(288)
self.Mixed_6b = inception_c(768, channels_7x7=128)
self.Mixed_6c = inception_c(768, channels_7x7=160)
self.Mixed_6d = inception_c(768, channels_7x7=160)
self.Mixed_6e = inception_c(768, channels_7x7=192)
self.AuxLogits = inception_aux(768, num_classes)
self.Mixed_7a = inception_d(768)
self.Mixed_7b = inception_e(1280)
self.Mixed_7c = inception_e(2048)
self.num_features = 2048
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.fc = nn.Linear((self.num_features * self.global_pool.feat_mult()), num_classes)
for m in self.modules():
if (isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear)):
stddev = (m.stddev if hasattr(m, 'stddev') else 0.1)
trunc_normal_(m.weight, std=stddev)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward_features(self, x):
x = self.Conv2d_1a_3x3(x)
x = self.Conv2d_2a_3x3(x)
x = self.Conv2d_2b_3x3(x)
x = F.max_pool2d(x, kernel_size=3, stride=2)
x = self.Conv2d_3b_1x1(x)
x = self.Conv2d_4a_3x3(x)
x = F.max_pool2d(x, kernel_size=3, stride=2)
x = self.Mixed_5b(x)
x = self.Mixed_5c(x)
x = self.Mixed_5d(x)
x = self.Mixed_6a(x)
x = self.Mixed_6b(x)
x = self.Mixed_6c(x)
x = self.Mixed_6d(x)
x = self.Mixed_6e(x)
aux = (self.AuxLogits(x) if self.training else None)
x = self.Mixed_7a(x)
x = self.Mixed_7b(x)
x = self.Mixed_7c(x)
return (x, aux)
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.num_classes = num_classes
if (self.num_classes > 0):
self.fc = nn.Linear((self.num_features * self.global_pool.feat_mult()), num_classes)
else:
self.fc = nn.Identity()
def forward(self, x):
(x, aux) = self.forward_features(x)
x = self.global_pool(x).flatten(1)
if (self.drop_rate > 0):
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.fc(x)
return (x, aux) |
def parse_args():
parser = argparse.ArgumentParser('Train Cognition Network')
parser.add_argument('--cfg', type=str, help='path to config file')
parser.add_argument('--model-dir', type=str, help='root path to store checkpoint')
parser.add_argument('--log-dir', type=str, help='tensorboard log dir')
parser.add_argument('--dist', help='whether to use distributed training', default=False, action='store_true')
parser.add_argument('--slurm', help='whether this is a slurm job', default=False, action='store_true')
parser.add_argument('--do-test', help='whether to generate csv result on test set', default=False, action='store_true')
parser.add_argument('--cudnn-off', help='disable cudnn', default=False, action='store_true')
parser.add_argument('--partial-pretrain', type=str)
args = parser.parse_args()
if (args.cfg is not None):
update_config(args.cfg)
if (args.model_dir is not None):
config.OUTPUT_PATH = os.path.join(args.model_dir, config.OUTPUT_PATH)
if (args.partial_pretrain is not None):
config.NETWORK.PARTIAL_PRETRAIN = args.partial_pretrain
if args.slurm:
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
addr = subprocess.getoutput('scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(29500)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
os.environ['LOCAL_RANK'] = str((proc_id % num_gpus))
return (args, config) |
class SNLIClassifier(nn.Module):
def __init__(self, num_classes, input_dim, hidden_dim, num_layers, use_batchnorm, dropout_prob):
super(SNLIClassifier, self).__init__()
self.num_classes = num_classes
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.use_batchnorm = use_batchnorm
self.dropout_prob = dropout_prob
if use_batchnorm:
self.bn_mlp_input = nn.BatchNorm1d(num_features=(4 * input_dim))
self.bn_mlp_output = nn.BatchNorm1d(num_features=hidden_dim)
self.dropout = nn.Dropout(dropout_prob)
mlp_layers = []
for i in range(num_layers):
layer_in_features = (hidden_dim if (i > 0) else (4 * input_dim))
linear_layer = nn.Linear(in_features=layer_in_features, out_features=hidden_dim)
relu_layer = nn.ReLU()
mlp_layer = nn.Sequential(linear_layer, relu_layer)
mlp_layers.append(mlp_layer)
self.mlp = nn.Sequential(*mlp_layers)
self.clf_linear = nn.Linear(in_features=hidden_dim, out_features=num_classes)
self.reset_parameters()
def reset_parameters(self):
if self.use_batchnorm:
self.bn_mlp_input.reset_parameters()
self.bn_mlp_output.reset_parameters()
for i in range(self.num_layers):
linear_layer = self.mlp[i][0]
init.kaiming_normal_(linear_layer.weight.data)
init.constant_(linear_layer.bias.data, val=0)
init.uniform_(self.clf_linear.weight.data, (- 0.005), 0.005)
init.constant_(self.clf_linear.bias.data, val=0)
def forward(self, pre, hyp):
f1 = pre
f2 = hyp
f3 = torch.abs((pre - hyp))
f4 = (pre * hyp)
mlp_input = torch.cat([f1, f2, f3, f4], dim=1)
if self.use_batchnorm:
mlp_input = self.bn_mlp_input(mlp_input)
mlp_input = self.dropout(mlp_input)
mlp_output = self.mlp(mlp_input)
if self.use_batchnorm:
mlp_output = self.bn_mlp_output(mlp_output)
mlp_output = self.dropout(mlp_output)
logits = self.clf_linear(mlp_output)
return logits |
class Conv1dWithInitialization(BaseModule):
def __init__(self, **kwargs):
super(Conv1dWithInitialization, self).__init__()
self.conv1d = torch.nn.Conv1d(**kwargs)
torch.nn.init.orthogonal_(self.conv1d.weight.data, gain=1)
def forward(self, x):
return self.conv1d(x) |
def main(_):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
with detection_graph.as_default():
with tf.Session() as sess:
input_image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
result_embedding = tf.get_default_graph().get_tensor_by_name('person_embedding:0')
print('', input_image_tensor)
if os.path.exists(export_path):
os.rmdir(export_path)
log.info('Exporting trained model to', export_path)
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
input_image = tf.saved_model.utils.build_tensor_info(input_image_tensor)
reid_embedding = tf.saved_model.utils.build_tensor_info(result_embedding)
reid_mgn_signature = tf.saved_model.signature_def_utils.build_signature_def(inputs={'input_image': input_image}, outputs={'reid_embedding': reid_embedding}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={'reid_mgn_serving': reid_mgn_signature}, legacy_init_op=legacy_init_op)
builder.save()
log.info('Build Done') |
def run_seq_group_alignments(seq_groups, alignment_runner, args):
dirs = set(os.listdir(args.output_dir))
for (seq, names) in seq_groups:
first_name = names[0]
alignment_dir = os.path.join(args.output_dir, first_name)
try:
os.makedirs(alignment_dir)
except Exception as e:
logging.warning(f'Failed to create directory for {first_name} with exception {e}...')
continue
(fd, fasta_path) = tempfile.mkstemp(suffix='.fasta')
with os.fdopen(fd, 'w') as fp:
fp.write(f'''>query
{seq}''')
try:
alignment_runner.run(fasta_path, alignment_dir)
except:
logging.warning(f'Failed to run alignments for {first_name}. Skipping...')
os.remove(fasta_path)
os.rmdir(alignment_dir)
continue
os.remove(fasta_path)
for name in names[1:]:
if (name in dirs):
logging.warning(f'{name} has already been processed. Skipping...')
continue
cp_dir = os.path.join(args.output_dir, name)
os.makedirs(cp_dir, exist_ok=True)
for f in os.listdir(alignment_dir):
copyfile(os.path.join(alignment_dir, f), os.path.join(cp_dir, f)) |
def main():
data_path = '/path/to/musdb18hq'
save_path = '/path/to/musdb18hq_custom_limiter_fixed_attack'
batch_size = 1
num_workers = 1
sr = 44100
dataset = DelimitValidDataset(root=data_path, use_custom_limiter=True, custom_limiter_attack_range=[2.0, 2.0])
data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False)
dict_valid_loudness = {}
dict_limiter_params = {}
for (limited_audio, orig_audio, audio_name, loudness, custom_attack, custom_release) in tqdm.tqdm(data_loader):
audio_name = audio_name[0]
limited_audio = limited_audio[0].numpy()
loudness = float(loudness[0].numpy())
dict_valid_loudness[audio_name] = loudness
dict_limiter_params[audio_name] = {'attack_ms': float(custom_attack[0].numpy()), 'release_ms': float(custom_release[0].numpy())}
os.makedirs(os.path.join(save_path, 'valid'), exist_ok=True)
audio_path = os.path.join(save_path, 'valid', audio_name)
sf.write(f'{audio_path}.wav', limited_audio.T, sr)
with open(os.path.join(save_path, 'valid_loudness.json'), 'w') as f:
json.dump(dict_valid_loudness, f, indent=4)
with open(os.path.join(save_path, 'valid_limiter_params.json'), 'w') as f:
json.dump(dict_limiter_params, f, indent=4) |
def find_crop_x_boundaries(img):
(width, height) = img.size
pixels = img.load()
white_color = (255, 255, 255)
leftmost_x = None
rightmost_x = None
for x in range(width):
all_white = True
for y in range(height):
if (pixels[(x, y)] != white_color):
all_white = False
break
if ((not all_white) and (leftmost_x is None)):
leftmost_x = x
elif (all_white and (leftmost_x is not None) and (rightmost_x is None)):
rightmost_x = (x - 1)
return (leftmost_x, rightmost_x) |
class Render():
def __init__(self, width=1600, height=1200, name='GL Renderer', program_files=['simple.fs', 'simple.vs'], color_size=1, ms_rate=1):
self.width = width
self.height = height
self.name = name
self.display_mode = ((GLUT_DOUBLE | GLUT_RGB) | GLUT_DEPTH)
self.use_inverse_depth = False
global _glut_window
if (_glut_window is None):
glutInit()
glutInitDisplayMode(self.display_mode)
glutInitWindowSize(self.width, self.height)
glutInitWindowPosition(0, 0)
_glut_window = glutCreateWindow('My Render.')
glEnable(GL_DEPTH_TEST)
glClampColor(GL_CLAMP_READ_COLOR, GL_FALSE)
glClampColor(GL_CLAMP_FRAGMENT_COLOR, GL_FALSE)
glClampColor(GL_CLAMP_VERTEX_COLOR, GL_FALSE)
shader_list = []
for program_file in program_files:
(_, ext) = os.path.splitext(program_file)
if (ext == '.vs'):
shader_list.append(loadShader(GL_VERTEX_SHADER, program_file))
elif (ext == '.fs'):
shader_list.append(loadShader(GL_FRAGMENT_SHADER, program_file))
elif (ext == '.gs'):
shader_list.append(loadShader(GL_GEOMETRY_SHADER, program_file))
self.program = createProgram(shader_list)
for shader in shader_list:
glDeleteShader(shader)
self.model_mat_unif = glGetUniformLocation(self.program, 'ModelMat')
self.persp_mat_unif = glGetUniformLocation(self.program, 'PerspMat')
self.vertex_buffer = glGenBuffers(1)
(self.quad_program, self.quad_buffer) = self.init_quad_program()
self.frame_buffer = glGenFramebuffers(1)
glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer)
self.intermediate_fbo = None
if (ms_rate > 1):
self.color_buffer = []
for i in range(color_size):
color_buffer = glGenTextures(1)
multi_sample_rate = ms_rate
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, color_buffer)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, multi_sample_rate, GL_RGBA32F, self.width, self.height, GL_TRUE)
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, 0)
glFramebufferTexture2D(GL_FRAMEBUFFER, (GL_COLOR_ATTACHMENT0 + i), GL_TEXTURE_2D_MULTISAMPLE, color_buffer, 0)
self.color_buffer.append(color_buffer)
self.render_buffer = glGenRenderbuffers(1)
glBindRenderbuffer(GL_RENDERBUFFER, self.render_buffer)
glRenderbufferStorageMultisample(GL_RENDERBUFFER, multi_sample_rate, GL_DEPTH24_STENCIL8, self.width, self.height)
glBindRenderbuffer(GL_RENDERBUFFER, 0)
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, self.render_buffer)
attachments = []
for i in range(color_size):
attachments.append((GL_COLOR_ATTACHMENT0 + i))
glDrawBuffers(color_size, attachments)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
self.intermediate_fbo = glGenFramebuffers(1)
glBindFramebuffer(GL_FRAMEBUFFER, self.intermediate_fbo)
self.screen_texture = []
for i in range(color_size):
screen_texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, screen_texture)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, self.width, self.height, 0, GL_RGBA, GL_FLOAT, None)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glFramebufferTexture2D(GL_FRAMEBUFFER, (GL_COLOR_ATTACHMENT0 + i), GL_TEXTURE_2D, screen_texture, 0)
self.screen_texture.append(screen_texture)
glDrawBuffers(color_size, attachments)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
else:
self.color_buffer = []
for i in range(color_size):
color_buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, color_buffer)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, self.width, self.height, 0, GL_RGBA, GL_FLOAT, None)
glFramebufferTexture2D(GL_FRAMEBUFFER, (GL_COLOR_ATTACHMENT0 + i), GL_TEXTURE_2D, color_buffer, 0)
self.color_buffer.append(color_buffer)
self.depth_buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.depth_buffer)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_DEPTH_TEXTURE_MODE, GL_INTENSITY)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL)
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, self.width, self.height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, None)
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, self.depth_buffer, 0)
attachments = []
for i in range(color_size):
attachments.append((GL_COLOR_ATTACHMENT0 + i))
glDrawBuffers(color_size, attachments)
self.screen_texture = self.color_buffer
glBindFramebuffer(GL_FRAMEBUFFER, 0)
self.render_texture = None
self.render_texture_v2 = {}
self.vertex_data = None
self.vertex_dim = None
self.n_vertices = None
self.model_view_matrix = None
self.projection_matrix = None
glutDisplayFunc(self.display)
def init_quad_program(self):
shader_list = []
shader_list.append(loadShader(GL_VERTEX_SHADER, 'quad.vs'))
shader_list.append(loadShader(GL_FRAGMENT_SHADER, 'quad.fs'))
the_program = createProgram(shader_list)
for shader in shader_list:
glDeleteShader(shader)
quad_vertices = np.array([(- 1.0), 1.0, 0.0, 1.0, (- 1.0), (- 1.0), 0.0, 0.0, 1.0, (- 1.0), 1.0, 0.0, (- 1.0), 1.0, 0.0, 1.0, 1.0, (- 1.0), 1.0, 0.0, 1.0, 1.0, 1.0, 1.0])
quad_buffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, quad_buffer)
glBufferData(GL_ARRAY_BUFFER, quad_vertices, GL_STATIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, 0)
return (the_program, quad_buffer)
def set_mesh(self, vertices, faces):
self.vertex_data = vertices[faces.reshape([(- 1)])]
self.vertex_dim = self.vertex_data.shape[1]
self.n_vertices = self.vertex_data.shape[0]
glBindBuffer(GL_ARRAY_BUFFER, self.vertex_buffer)
glBufferData(GL_ARRAY_BUFFER, self.vertex_data, GL_STATIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, 0)
def set_viewpoint(self, projection, model_view):
self.projection_matrix = projection
self.model_view_matrix = model_view
def draw_init(self):
glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer)
glEnable(GL_DEPTH_TEST)
glClearColor(1.0, 1.0, 1.0, 0.0)
if self.use_inverse_depth:
glDepthFunc(GL_GREATER)
glClearDepth(0.0)
else:
glDepthFunc(GL_LESS)
glClearDepth(1.0)
glClear((GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT))
def draw_end(self):
if (self.intermediate_fbo is not None):
for i in range(len(self.color_buffer)):
glBindFramebuffer(GL_READ_FRAMEBUFFER, self.frame_buffer)
glReadBuffer((GL_COLOR_ATTACHMENT0 + i))
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self.intermediate_fbo)
glDrawBuffer((GL_COLOR_ATTACHMENT0 + i))
glBlitFramebuffer(0, 0, self.width, self.height, 0, 0, self.width, self.height, GL_COLOR_BUFFER_BIT, GL_NEAREST)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glDepthFunc(GL_LESS)
glClearDepth(1.0)
def draw(self):
self.draw_init()
glUseProgram(self.program)
glUniformMatrix4fv(self.model_mat_unif, 1, GL_FALSE, self.model_view_matrix.transpose())
glUniformMatrix4fv(self.persp_mat_unif, 1, GL_FALSE, self.projection_matrix.transpose())
glBindBuffer(GL_ARRAY_BUFFER, self.vertex_buffer)
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, self.vertex_dim, GL_DOUBLE, GL_FALSE, 0, None)
glDrawArrays(GL_TRIANGLES, 0, self.n_vertices)
glDisableVertexAttribArray(0)
glBindBuffer(GL_ARRAY_BUFFER, 0)
glUseProgram(0)
self.draw_end()
def get_color(self, color_id=0):
glBindFramebuffer(GL_FRAMEBUFFER, (self.intermediate_fbo if (self.intermediate_fbo is not None) else self.frame_buffer))
glReadBuffer((GL_COLOR_ATTACHMENT0 + color_id))
data = glReadPixels(0, 0, self.width, self.height, GL_RGBA, GL_FLOAT, outputType=None)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
rgb = data.reshape(self.height, self.width, (- 1))
rgb = np.flip(rgb, 0)
return rgb
def get_z_value(self):
glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer)
data = glReadPixels(0, 0, self.width, self.height, GL_DEPTH_COMPONENT, GL_FLOAT, outputType=None)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
z = data.reshape(self.height, self.width)
z = np.flip(z, 0)
return z
def display(self):
self.draw()
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glClearColor(1.0, 1.0, 1.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT)
glUseProgram(self.quad_program)
glBindBuffer(GL_ARRAY_BUFFER, self.quad_buffer)
size_of_double = 8
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 2, GL_DOUBLE, GL_FALSE, (4 * size_of_double), None)
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 2, GL_DOUBLE, GL_FALSE, (4 * size_of_double), c_void_p((2 * size_of_double)))
glDisable(GL_DEPTH_TEST)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self.screen_texture[0])
glUniform1i(glGetUniformLocation(self.quad_program, 'screenTexture'), 0)
glDrawArrays(GL_TRIANGLES, 0, 6)
glDisableVertexAttribArray(1)
glDisableVertexAttribArray(0)
glEnable(GL_DEPTH_TEST)
glBindBuffer(GL_ARRAY_BUFFER, 0)
glUseProgram(0)
glutSwapBuffers()
glutPostRedisplay()
def show(self):
glutMainLoop() |
class UploadCommand(BaseUserCommand):
def run(self):
print(ANSI.red('Deprecated: used to be the way to upload a model to S3. We now use a git-based system for storing models and other artifacts. Use the `repo create` command instead.'))
exit(1) |
_model
def gluon_resnet152_v1c(pretrained=False, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', **kwargs)
return _create_resnet('gluon_resnet152_v1c', pretrained, **model_args) |
class TestSnapshot(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.temp_dir = tempfile.TemporaryDirectory()
snapshot_config = SnapshotConfig(snapshot_dir=self.temp_dir.name, snapshot_mode='all', snapshot_gap=1)
fixture_exp(snapshot_config, self.sess)
for c in self.graph.collections:
self.graph.clear_collection(c)
def teardown_method(self):
self.temp_dir.cleanup()
super().teardown_method()
.parametrize('load_mode, last_epoch', [*configurations])
def test_load(self, load_mode, last_epoch):
snapshotter = Snapshotter()
saved = snapshotter.load(self.temp_dir.name, load_mode)
assert isinstance(saved['algo'], VPG)
assert isinstance(saved['env'], GarageEnv)
assert isinstance(saved['algo'].policy, CategoricalMLPPolicy)
assert (saved['stats'].total_epoch == last_epoch)
def test_load_with_invalid_load_mode(self):
snapshotter = Snapshotter()
with pytest.raises(ValueError):
snapshotter.load(self.temp_dir.name, 'foo') |
class nnUNetTrainerV2_insaneDA(nnUNetTrainerV2):
def setup_DA_params(self):
self.deep_supervision_scales = ([[1, 1, 1]] + list((list(i) for i in (1 / np.cumprod(np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))))[:(- 1)])
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
self.data_aug_params['rotation_x'] = (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi))
self.data_aug_params['rotation_y'] = (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi))
self.data_aug_params['rotation_z'] = (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi))
if self.do_dummy_2D_aug:
self.data_aug_params['dummy_2D'] = True
self.print_to_log_file('Using dummy2d data augmentation')
self.data_aug_params['elastic_deform_alpha'] = default_2D_augmentation_params['elastic_deform_alpha']
self.data_aug_params['elastic_deform_sigma'] = default_2D_augmentation_params['elastic_deform_sigma']
self.data_aug_params['rotation_x'] = default_2D_augmentation_params['rotation_x']
else:
self.do_dummy_2D_aug = False
if ((max(self.patch_size) / min(self.patch_size)) > 1.5):
default_2D_augmentation_params['rotation_x'] = (((((- 180.0) / 360) * 2.0) * np.pi), (((180.0 / 360) * 2.0) * np.pi))
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params['mask_was_used_for_normalization'] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array(([self.patch_size[0]] + list(self.basic_generator_patch_size)))
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range'])
self.data_aug_params['scale_range'] = (0.65, 1.6)
self.data_aug_params['do_elastic'] = True
self.data_aug_params['elastic_deform_alpha'] = (0.0, 1300.0)
self.data_aug_params['elastic_deform_sigma'] = (9.0, 15.0)
self.data_aug_params['p_eldef'] = 0.2
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['gamma_range'] = (0.6, 2)
self.data_aug_params['patch_size_for_spatialtransform'] = self.patch_size
def initialize(self, training=True, force_load_plans=False):
if (not self.was_initialized):
maybe_mkdir_p(self.output_folder)
if (force_load_plans or (self.plans is None)):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
net_numpool = len(self.net_num_pool_op_kernel_sizes)
weights = np.array([(1 / (2 ** i)) for i in range(net_numpool)])
mask = np.array([(True if (i < (net_numpool - 1)) else False) for i in range(net_numpool)])
weights[(~ mask)] = 0
weights = (weights / weights.sum())
self.loss = MultipleOutputLoss2(self.loss, weights)
self.folder_with_preprocessed_data = join(self.dataset_directory, (self.plans['data_identifier'] + ('_stage%d' % self.stage)))
if training:
(self.dl_tr, self.dl_val) = self.get_basic_generators()
if self.unpack_data:
print('unpacking dataset')
unpack_dataset(self.folder_with_preprocessed_data)
print('done')
else:
print('INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you will wait all winter for your model to finish!')
(self.tr_gen, self.val_gen) = get_insaneDA_augmentation(self.dl_tr, self.dl_val, self.data_aug_params['patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory)
self.print_to_log_file(('TRAINING KEYS:\n %s' % str(self.dataset_tr.keys())), also_print_to_console=False)
self.print_to_log_file(('VALIDATION KEYS:\n %s' % str(self.dataset_val.keys())), also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True |
def main(args):
cfg = setup(args)
print(cfg)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
print('# of layers require gradient:')
for c in trainer.checkpointer.model.named_children():
grad = np.array([param.requires_grad for param in getattr(trainer.checkpointer.model, c[0]).parameters()])
print(c[0], grad.sum())
trainer.resume_or_load(resume=args.resume)
return trainer.train() |
def get_env(env_str, api_key=None, initialtags=None, poslabels=None, user=None, device=None, threshold=0.6):
if (env_str == 'OpenImage'):
return OpenImage(poslabels, initialtags)
elif (env_str == 'Flickr'):
return Flicker(api_key, initialtags, user, device, threshold)
raise NotImplementedError |
_model_architecture(model_name='s2spect2_conformer', arch_name='s2spect_conformer_translatotron2')
def s2spect2_conformer_architecture_base_legacy(args):
s2spect2_conformer_architecture_base(args) |
def ndcg(correct_duplicates: List, retrieved_duplicates: List) -> float:
if ((len(retrieved_duplicates) == 0) and (len(correct_duplicates) == 0)):
return 1.0
if ((not len(retrieved_duplicates)) or (not len(correct_duplicates))):
return 0.0
def dcg(rel):
relevance_numerator = [((2 ** k) - 1) for k in rel]
relevance_denominator = [np.log2((k + 2)) for k in range(len(rel))]
dcg_terms = [(relevance_numerator[k] / relevance_denominator[k]) for k in range(len(rel))]
dcg_at_k = np.sum(dcg_terms)
return dcg_at_k
relevance = np.array([(1 if (i in correct_duplicates) else 0) for i in retrieved_duplicates])
dcg_k = dcg(relevance)
if (dcg_k == 0):
return 0.0
idcg_k = dcg(sorted(relevance, reverse=True))
return (dcg_k / idcg_k) |
('word')
class WordTokenizer(Tokenizer):
def __init__(self, word_splitter: WordSplitter=None, word_filter: WordFilter=PassThroughWordFilter(), word_stemmer: WordStemmer=PassThroughWordStemmer(), start_tokens: List[str]=None, end_tokens: List[str]=None) -> None:
self._word_splitter = (word_splitter or SpacyWordSplitter())
self._word_filter = word_filter
self._word_stemmer = word_stemmer
self._start_tokens = (start_tokens or [])
self._start_tokens.reverse()
self._end_tokens = (end_tokens or [])
def tokenize(self, text: str) -> List[Token]:
words = self._word_splitter.split_words(text)
return self._filter_and_stem(words)
def batch_tokenize(self, texts: List[str]) -> List[List[Token]]:
batched_words = self._word_splitter.batch_split_words(texts)
return [self._filter_and_stem(words) for words in batched_words]
def _filter_and_stem(self, words: List[Token]) -> List[Token]:
filtered_words = self._word_filter.filter_words(words)
stemmed_words = [self._word_stemmer.stem_word(word) for word in filtered_words]
for start_token in self._start_tokens:
stemmed_words.insert(0, Token(start_token, 0))
for end_token in self._end_tokens:
stemmed_words.append(Token(end_token, (- 1)))
return stemmed_words |
def createModel(input_data, input_size, sequence_length, slots, slot_size, intent_size, layer_size=128, isTraining=True):
cell_fw = tf.contrib.rnn.BasicLSTMCell(layer_size)
cell_bw = tf.contrib.rnn.BasicLSTMCell(layer_size)
if (isTraining == True):
cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, input_keep_prob=0.5, output_keep_prob=0.5)
cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, input_keep_prob=0.5, output_keep_prob=0.5)
if arg.embedding_path:
embedding_weight = np.load(arg.embedding_path)
embedding = tf.Variable(embedding_weight, name='embedding', dtype=tf.float32)
else:
embedding = tf.get_variable('embedding', [input_size, layer_size])
inputs = tf.nn.embedding_lookup(embedding, input_data)
(state_outputs, final_state) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=sequence_length, dtype=tf.float32)
final_state = tf.concat([final_state[0][0], final_state[0][1], final_state[1][0], final_state[1][1]], 1)
state_outputs = tf.concat([state_outputs[0], state_outputs[1]], 2)
state_shape = state_outputs.get_shape()
with tf.variable_scope('attention'):
slot_inputs = state_outputs
if (not remove_slot_attn):
with tf.variable_scope('slot_attn'):
attn_size = state_shape[2].value
origin_shape = tf.shape(state_outputs)
hidden = tf.expand_dims(state_outputs, 1)
hidden_conv = tf.expand_dims(state_outputs, 2)
k = tf.get_variable('AttnW', [1, 1, attn_size, attn_size])
hidden_features = tf.nn.conv2d(hidden_conv, k, [1, 1, 1, 1], 'SAME')
hidden_features = tf.reshape(hidden_features, origin_shape)
hidden_features = tf.expand_dims(hidden_features, 1)
v = tf.get_variable('AttnV', [attn_size])
slot_inputs_shape = tf.shape(slot_inputs)
slot_inputs = tf.reshape(slot_inputs, [(- 1), attn_size])
y = core_rnn_cell._linear(slot_inputs, attn_size, True)
y = tf.reshape(y, slot_inputs_shape)
y = tf.expand_dims(y, 2)
s = tf.reduce_sum((v * tf.tanh((hidden_features + y))), [3])
a = tf.nn.softmax(s)
a = tf.expand_dims(a, (- 1))
slot_d = tf.reduce_sum((a * hidden), [2])
slot_reinforce_state = tf.expand_dims(slot_d, 2)
else:
attn_size = state_shape[2].value
slot_d = slot_inputs
slot_reinforce_state = tf.expand_dims(slot_inputs, 2)
slot_inputs = tf.reshape(slot_inputs, [(- 1), attn_size])
intent_input = final_state
with tf.variable_scope('intent_attn'):
attn_size = state_shape[2].value
hidden = tf.expand_dims(state_outputs, 2)
k = tf.get_variable('AttnW', [1, 1, attn_size, attn_size])
hidden_features = tf.nn.conv2d(hidden, k, [1, 1, 1, 1], 'SAME')
v = tf.get_variable('AttnV', [attn_size])
y = core_rnn_cell._linear(intent_input, attn_size, True)
y = tf.reshape(y, [(- 1), 1, 1, attn_size])
s = tf.reduce_sum((v * tf.tanh((hidden_features + y))), [2, 3])
a = tf.nn.softmax(s)
a = tf.expand_dims(a, (- 1))
a = tf.expand_dims(a, (- 1))
d = tf.reduce_sum((a * hidden), [1, 2])
r_intent = d
intent_context_states = d
if (arg.priority_order == 'intent_first'):
for n in range(arg.iteration_num):
with tf.variable_scope(('intent_subnet' + str((n - 1)))):
attn_size = state_shape[2].value
hidden = tf.expand_dims(state_outputs, 2)
k1 = tf.get_variable('W1', [1, 1, attn_size, attn_size])
k2 = tf.get_variable('W2', [1, 1, attn_size, attn_size])
slot_reinforce_features = tf.nn.conv2d(slot_reinforce_state, k1, [1, 1, 1, 1], 'SAME')
hidden_features = tf.nn.conv2d(hidden, k2, [1, 1, 1, 1], 'SAME')
v1 = tf.get_variable('AttnV', [attn_size])
bias = tf.get_variable('Bias', [attn_size])
s = tf.reduce_sum((v1 * tf.tanh(((hidden_features + slot_reinforce_features) + bias))), [2, 3])
a = tf.nn.softmax(s)
a = tf.expand_dims(a, (- 1))
a = tf.expand_dims(a, (- 1))
r = tf.reduce_sum((a * slot_reinforce_state), [1, 2])
r_intent = (r + intent_context_states)
intent_output = tf.concat([r_intent, intent_input], 1)
with tf.variable_scope(('slot_subnet' + str((n - 1)))):
intent_gate = core_rnn_cell._linear(r_intent, attn_size, True)
intent_gate = tf.reshape(intent_gate, [(- 1), 1, intent_gate.get_shape()[1].value])
v1 = tf.get_variable('gateV', [attn_size])
relation_factor = (v1 * tf.tanh((slot_d + intent_gate)))
relation_factor = tf.reduce_sum(relation_factor, [2])
relation_factor = tf.expand_dims(relation_factor, (- 1))
slot_reinforce_state1 = (slot_d * relation_factor)
slot_reinforce_state = tf.expand_dims(slot_reinforce_state1, 2)
slot_reinforce_vector = tf.reshape(slot_reinforce_state1, [(- 1), attn_size])
slot_output = tf.concat([slot_reinforce_vector, slot_inputs], 1)
else:
for n in range(arg.iteration_num):
with tf.variable_scope(('slot_subnet' + str((n - 1)))):
intent_gate = core_rnn_cell._linear(r_intent, attn_size, True)
intent_gate = tf.reshape(intent_gate, [(- 1), 1, intent_gate.get_shape()[1].value])
v1 = tf.get_variable('gateV', [attn_size])
relation_factor = (v1 * tf.tanh((slot_d + intent_gate)))
relation_factor = tf.reduce_sum(relation_factor, [2])
relation_factor = tf.expand_dims(relation_factor, (- 1))
slot_reinforce_state = (slot_d * relation_factor)
slot_reinforce_vector = tf.reshape(slot_reinforce_state, [(- 1), attn_size])
slot_output = tf.concat([slot_reinforce_vector, slot_inputs], 1)
with tf.variable_scope(('intent_subnet' + str((n - 1)))):
attn_size = state_shape[2].value
hidden = tf.expand_dims(state_outputs, 2)
slot_reinforce_output = tf.expand_dims(slot_reinforce_state, 2)
k1 = tf.get_variable('W1', [1, 1, attn_size, attn_size])
k2 = tf.get_variable('W2', [1, 1, attn_size, attn_size])
slot_features = tf.nn.conv2d(slot_reinforce_output, k1, [1, 1, 1, 1], 'SAME')
hidden_features = tf.nn.conv2d(hidden, k2, [1, 1, 1, 1], 'SAME')
v1 = tf.get_variable('AttnV', [attn_size])
bias = tf.get_variable('Bias', [attn_size])
s = tf.reduce_sum((v1 * tf.tanh(((hidden_features + slot_features) + bias))), [2, 3])
a = tf.nn.softmax(s)
a = tf.expand_dims(a, (- 1))
a = tf.expand_dims(a, (- 1))
r = tf.reduce_sum((a * slot_reinforce_output), [1, 2])
r_intent = (r + intent_context_states)
intent_output = tf.concat([r_intent, intent_input], 1)
with tf.variable_scope('intent_proj'):
intent = core_rnn_cell._linear(intent_output, intent_size, True)
with tf.variable_scope('slot_proj'):
slot = core_rnn_cell._linear(slot_output, slot_size, True)
if arg.use_crf:
nstep = tf.shape(state_outputs)[1]
slot = tf.reshape(slot, [(- 1), nstep, slot_size])
outputs = [slot, intent]
return outputs |
def fuse_depth_map(frame, prev_keyframe):
actual_fuse_v = np.vectorize(actual_fuse, signature='(1)->(),()', excluded=[1, 2])
(D, U) = actual_fuse_v(index_matrix, frame, prev_keyframe)
frame.D = np.reshape(D, im_size)
frame.U = np.reshape(U, im_size)
return (frame.D, frame.U) |
_pipeline_test
class Text2TextGenerationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def get_test_pipeline(self, model, tokenizer, processor):
generator = Text2TextGenerationPipeline(model=model, tokenizer=tokenizer)
return (generator, ['Something to write', 'Something else'])
def run_pipeline_test(self, generator, _):
outputs = generator('Something there')
self.assertEqual(outputs, [{'generated_text': ANY(str)}])
self.assertFalse(outputs[0]['generated_text'].startswith('Something there'))
outputs = generator(['This is great !', 'Something else'], num_return_sequences=2, do_sample=True)
self.assertEqual(outputs, [[{'generated_text': ANY(str)}, {'generated_text': ANY(str)}], [{'generated_text': ANY(str)}, {'generated_text': ANY(str)}]])
outputs = generator(['This is great !', 'Something else'], num_return_sequences=2, batch_size=2, do_sample=True)
self.assertEqual(outputs, [[{'generated_text': ANY(str)}, {'generated_text': ANY(str)}], [{'generated_text': ANY(str)}, {'generated_text': ANY(str)}]])
with self.assertRaises(ValueError):
generator(4)
_torch
def test_small_model_pt(self):
generator = pipeline('text2text-generation', model='patrickvonplaten/t5-tiny-random', framework='pt')
outputs = generator('Something there', do_sample=False)
self.assertEqual(outputs, [{'generated_text': ''}])
num_return_sequences = 3
outputs = generator('Something there', num_return_sequences=num_return_sequences, num_beams=num_return_sequences)
target_outputs = [{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}]
self.assertEqual(outputs, target_outputs)
outputs = generator('This is a test', do_sample=True, num_return_sequences=2, return_tensors=True)
self.assertEqual(outputs, [{'generated_token_ids': ANY(torch.Tensor)}, {'generated_token_ids': ANY(torch.Tensor)}])
generator.tokenizer.pad_token_id = generator.model.config.eos_token_id
generator.tokenizer.pad_token = '<pad>'
outputs = generator(['This is a test', 'This is a second test'], do_sample=True, num_return_sequences=2, batch_size=2, return_tensors=True)
self.assertEqual(outputs, [[{'generated_token_ids': ANY(torch.Tensor)}, {'generated_token_ids': ANY(torch.Tensor)}], [{'generated_token_ids': ANY(torch.Tensor)}, {'generated_token_ids': ANY(torch.Tensor)}]])
_tf
def test_small_model_tf(self):
generator = pipeline('text2text-generation', model='patrickvonplaten/t5-tiny-random', framework='tf')
outputs = generator('Something there', do_sample=False)
self.assertEqual(outputs, [{'generated_text': ''}]) |
class ExponentialScheduler(LinearScheduler):
def __init__(self, start_value, end_value, n_iterations, start_iteration=0, base=10):
self.base = base
super(ExponentialScheduler, self).__init__(start_value=math.log(start_value, base), end_value=math.log(end_value, base), n_iterations=n_iterations, start_iteration=start_iteration)
def __call__(self, iteration):
linear_value = super(ExponentialScheduler, self).__call__(iteration)
return (self.base ** linear_value) |
class SSIterator(object):
def __init__(self, dialogue_file, batch_size, seed, max_len=(- 1), use_infinite_loop=True, dtype='int32'):
self.dialogue_file = dialogue_file
self.batch_size = batch_size
args = locals()
args.pop('self')
self.__dict__.update(args)
self.load_files()
self.exit_flag = False
def load_files(self):
self.data = cPickle.load(open(self.dialogue_file, 'r'))
self.data_len = len(self.data)
logger.debug(('Data len is %d' % self.data_len))
def start(self):
self.exit_flag = False
self.queue = Queue.Queue(maxsize=1000)
self.gather = SSFetcher(self)
self.gather.daemon = True
self.gather.start()
def __del__(self):
if hasattr(self, 'gather'):
self.gather.exitFlag = True
self.gather.join()
def __iter__(self):
return self
def next(self):
if self.exit_flag:
return None
batch = self.queue.get()
if (not batch):
self.exit_flag = True
return batch |
def _mel_to_linear_matrix(sr, n_fft, n_mels):
m = librosa.filters.mel(sr, n_fft, n_mels)
m_t = np.transpose(m)
p = np.matmul(m, m_t)
d = [((1.0 / x) if (np.abs(x) > 1e-08) else x) for x in np.sum(p, axis=0)]
return np.matmul(m_t, np.diag(d)) |
class ExampleModel(nn.Module):
def __init__(self):
super(ExampleModel, self).__init__()
self.test_cfg = None
self.conv = nn.Conv2d(3, 3, 3)
def forward(self, img, img_metas, return_loss=False, **kwargs):
return img |
class MultiHeadAttention(nn.Module):
def __init__(self, n_head, d_k, d_in):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_in = d_in
self.key = nn.Linear(d_in, (n_head * d_k))
self.query = nn.Parameter(torch.zeros(n_head, d_k)).requires_grad_(True)
nn.init.normal_(self.query, mean=0, std=np.sqrt((2.0 / d_k)))
self.temperature = np.power(d_k, 0.5)
self.dropout = nn.Dropout(0.1)
self.softmax = nn.Softmax(dim=(- 1))
def forward(self, x):
(B, T, C) = x.size()
q = self.query.repeat(B, 1, 1, 1).transpose(1, 2)
k = self.key(x).view(B, T, self.n_head, self.d_k).transpose(1, 2)
v = x.view(B, T, self.n_head, (C // self.n_head)).transpose(1, 2)
att = ((q k.transpose((- 2), (- 1))) / self.temperature)
att = self.softmax(att)
att = self.dropout(att)
y = (att v)
y = y.transpose(1, 2).contiguous().view(B, C)
return (y, att) |
def _trim(image):
background = PIL.Image.new(image.mode, image.size, image.getpixel((0, 0)))
diff = PIL.ImageChops.difference(image, background)
diff = PIL.ImageChops.add(diff, diff, 2.0, (- 100))
bbox = diff.getbbox()
if bbox:
image = image.crop(bbox)
return image |
_registry(operator_type='PositionIds')
class PositionIds(Operator):
def __init__(self):
super().__init__() |
class Dictionary(object):
def __init__(self, id2word, word2id, counts):
assert (len(id2word) == len(word2id) == len(counts))
self.id2word = id2word
self.word2id = word2id
self.counts = counts
self.bos_index = word2id[BOS_WORD]
self.eos_index = word2id[EOS_WORD]
self.pad_index = word2id[PAD_WORD]
self.unk_index = word2id[UNK_WORD]
self.check_valid()
def __len__(self):
return len(self.id2word)
def __getitem__(self, i):
return self.id2word[i]
def __contains__(self, w):
return (w in self.word2id)
def __eq__(self, y):
self.check_valid()
y.check_valid()
if (len(self.id2word) != len(y)):
return False
return all(((self.id2word[i] == y[i]) for i in range(len(y))))
def check_valid(self):
assert (self.bos_index == 0)
assert (self.eos_index == 1)
assert (self.pad_index == 2)
assert (self.unk_index == 3)
assert all(((self.id2word[(4 + i)] == (SPECIAL_WORD % i)) for i in range(SPECIAL_WORDS)))
assert (len(self.id2word) == len(self.word2id) == len(self.counts))
assert (set(self.word2id.keys()) == set(self.counts.keys()))
for i in range(len(self.id2word)):
assert (self.word2id[self.id2word[i]] == i)
last_count = 1e+18
for i in range((4 + SPECIAL_WORDS), (len(self.id2word) - 1)):
count = self.counts[self.id2word[i]]
assert (count <= last_count)
last_count = count
def index(self, word, no_unk=False):
if no_unk:
return self.word2id[word]
else:
return self.word2id.get(word, self.unk_index)
def max_vocab(self, max_vocab):
assert (max_vocab >= 1)
init_size = len(self)
self.id2word = {k: v for (k, v) in self.id2word.items() if (k < max_vocab)}
self.word2id = {v: k for (k, v) in self.id2word.items()}
self.counts = {k: v for (k, v) in self.counts.items() if (k in self.word2id)}
self.check_valid()
logger.info(('Maximum vocabulary size: %i. Dictionary size: %i -> %i (removed %i words).' % (max_vocab, init_size, len(self), (init_size - len(self)))))
def min_count(self, min_count):
assert (min_count >= 0)
init_size = len(self)
self.id2word = {k: v for (k, v) in self.id2word.items() if ((self.counts[self.id2word[k]] >= min_count) or (k < (4 + SPECIAL_WORDS)))}
self.word2id = {v: k for (k, v) in self.id2word.items()}
self.counts = {k: v for (k, v) in self.counts.items() if (k in self.word2id)}
self.check_valid()
logger.info(('Minimum frequency count: %i. Dictionary size: %i -> %i (removed %i words).' % (min_count, init_size, len(self), (init_size - len(self)))))
def read_vocab(vocab_path):
skipped = 0
assert os.path.isfile(vocab_path), vocab_path
word2id = {BOS_WORD: 0, EOS_WORD: 1, PAD_WORD: 2, UNK_WORD: 3}
for i in range(SPECIAL_WORDS):
word2id[(SPECIAL_WORD % i)] = (4 + i)
counts = {k: 0 for k in word2id.keys()}
f = codecs.open(vocab_path, 'r', encoding='utf-8', errors='ignore')
for (i, line) in enumerate(f):
if ('\u2028' in line):
skipped += 1
continue
line = line.rstrip().split()
if (len(line) != 2):
skipped += 1
continue
assert (len(line) == 2), (i, line)
assert line[1].isdigit(), (i, line)
if (line[0] in word2id):
skipped += 1
print(('%s already in vocab' % line[0]))
continue
if (not line[1].isdigit()):
skipped += 1
print(('Empty word at line %s with count %s' % (i, line)))
continue
word2id[line[0]] = (((4 + SPECIAL_WORDS) + i) - skipped)
counts[line[0]] = int(line[1])
f.close()
id2word = {v: k for (k, v) in word2id.items()}
dico = Dictionary(id2word, word2id, counts)
logger.info(('Read %i words from the vocabulary file.' % len(dico)))
if (skipped > 0):
logger.warning(('Skipped %i empty lines!' % skipped))
return dico
def index_data(path, bin_path, dico):
if ((bin_path is not None) and os.path.isfile(bin_path)):
print(('Loading data from %s ...' % bin_path))
data = torch.load(bin_path)
assert (dico == data['dico'])
return data
positions = []
sentences = []
unk_words = {}
f = codecs.open(path, 'r', encoding='utf-8', errors='ignore')
for (i, line) in enumerate(f):
if (((i % 1000000) == 0) and (i > 0)):
print(i)
s = line.rstrip().split()
if (len(s) == 0):
print(('Empty sentence in line %i.' % i))
count_unk = 0
indexed = []
for w in s:
word_id = dico.index(w, no_unk=False)
if ((0 <= word_id < (4 + SPECIAL_WORDS)) and (word_id != 3)):
logger.warning(('Found unexpected special word "%s" (%i)!!' % (w, word_id)))
continue
assert (word_id >= 0)
indexed.append(word_id)
if (word_id == dico.unk_index):
unk_words[w] = (unk_words.get(w, 0) + 1)
count_unk += 1
positions.append([len(sentences), (len(sentences) + len(indexed))])
sentences.extend(indexed)
sentences.append(1)
f.close()
positions = np.int64(positions)
if (len(dico) < (1 << 16)):
sentences = np.uint16(sentences)
elif (len(dico) < (1 << 31)):
sentences = np.int32(sentences)
else:
raise Exception('Dictionary is too big.')
assert (sentences.min() >= 0)
data = {'dico': dico, 'positions': positions, 'sentences': sentences, 'unk_words': unk_words}
if (bin_path is not None):
print(('Saving the data to %s ...' % bin_path))
torch.save(data, bin_path, pickle_protocol=4)
return data |
def video2frames(vid_path, out_dir):
global default_ffmpeg_vcodec, default_ffmpeg_pix_fmt, default_ffmpeg_exe_path
ffmpeg_exc_path = os.environ.get('ffmpeg_exe_path', default_ffmpeg_exe_path)
imgs = glob.glob(os.path.join(out_dir, '*.png'))
length = len(imgs)
if (length > 0):
print('Writing frames to file: done!')
return out_dir
print('{} Writing frames to file'.format(vid_path))
cmd = [ffmpeg_exc_path, '-i', vid_path, '-start_number', '0', '{temp_dir}/frame_%08d.png'.format(temp_dir=out_dir)]
print(' '.join(cmd))
subprocess.call(cmd)
return out_dir |
def add_mim_extention():
if ('develop' in sys.argv):
mode = 'symlink'
elif (('sdist' in sys.argv) or ('bdist_wheel' in sys.argv)):
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmedit', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if (osp.isfile(tar_path) or osp.islink(tar_path)):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if (mode == 'symlink'):
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
try:
os.symlink(src_relpath, tar_path)
except OSError:
mode = 'copy'
warnings.warn(f'Failed to create a symbolic link for {src_relpath}, and it will be copied to {tar_path}')
else:
continue
if (mode == 'copy'):
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}') |
_grad()
def eval_model(interpolation_net, BFrameCompressor: nn.Module, IFrameCompressor: nn.Module, sequence: Path, binpath: Path, **args: Any) -> Dict[(str, Any)]:
import time
org_seq = RawVideoSequence.from_file(str(sequence))
if (org_seq.format != VideoFormat.YUV420):
raise NotImplementedError(f'Unsupported video format: {org_seq.format}')
device = next(BFrameCompressor.parameters()).device
max_val = ((2 ** org_seq.bitdepth) - 1)
results = defaultdict(list)
keep_binaries = args['keep_binaries']
num_frames = args['vframes']
num_gop = args['GOP']
frame_arbitrary = args['frame_arbitrary']
with_interpolation = args['with_interpolation']
num_pixels = (org_seq.height * org_seq.width)
print('frame rate:', org_seq.framerate)
intra = args['intra']
if (with_interpolation and (not frame_arbitrary)):
(frames_idx_list, ref_idx_dict) = specific_frame_structure(num_gop)
reconstructions = []
f = binpath.open('wb')
print(f' encoding {sequence.stem}', file=sys.stderr)
write_uints(f, (org_seq.height, org_seq.width))
write_uchars(f, (org_seq.bitdepth,))
write_uints(f, (num_frames,))
with tqdm(total=num_frames) as pbar:
for i in range(num_frames):
x_cur = convert_yuv420_to_rgb(org_seq[i], device, max_val)
(x_cur, padding) = pad(x_cur)
if ((i % num_gop) == 0):
start = time.time()
enc_info = IFrameCompressor.compress(x_cur)
enc_time = (time.time() - start)
write_body(f, enc_info['shape'], enc_info['strings'])
start = time.time()
x_rec = IFrameCompressor.decompress(enc_info['strings'], enc_info['shape'])['x_hat']
dec_time = (time.time() - start)
first_rec = x_rec
last_key_frame = convert_yuv420_to_rgb(org_seq[(i + num_gop)], device, max_val)
(last_key_frame, _) = pad(last_key_frame)
last_enc_info = IFrameCompressor.compress(last_key_frame)
last_x_rec = IFrameCompressor.decompress(last_enc_info['strings'], last_enc_info['shape'])['x_hat']
reconstructions = []
reconstructions.append(x_rec)
elif with_interpolation:
cur_interpolation_idx = frames_idx_list[((i % num_gop) - 1)]
(left_ref_idx, right_ref_idx) = ref_idx_dict[cur_interpolation_idx]
if (left_ref_idx == 0):
left_x_rec = first_rec
else:
cur_pos_in_frame_idx_list = frames_idx_list.index(left_ref_idx)
left_x_rec = reconstructions[(cur_pos_in_frame_idx_list + 1)]
if (right_ref_idx == num_gop):
right_x_rec = last_x_rec
else:
cur_pos_in_frame_idx_list = frames_idx_list.index(right_ref_idx)
right_x_rec = reconstructions[(cur_pos_in_frame_idx_list + 1)]
x_cur = convert_yuv420_to_rgb(org_seq[(cur_interpolation_idx + ((i // num_gop) * num_gop))], device, max_val)
(x_cur, padding) = pad(x_cur)
start = time.time()
(y, enc_info) = BFrameCompressor.compress(x_cur)
enc_time = (time.time() - start)
write_body(f, enc_info['shape'], enc_info['strings'])
start = time.time()
mid_key = interpolation_net.inference(left_x_rec, right_x_rec, timestep=0.5)
x_rec = BFrameCompressor.decompress(enc_info['strings'], enc_info['shape'], mid_key)['x_hat']
dec_time = (time.time() - start)
reconstructions.append(x_rec)
else:
start = time.time()
(y, enc_info) = BFrameCompressor.compress(x_cur)
enc_time = (time.time() - start)
write_body(f, enc_info['shape'], enc_info['strings'])
start = time.time()
mid_key = torch.cat((first_rec, last_x_rec), 1)
x_rec = BFrameCompressor.decompress(enc_info['strings'], enc_info['shape'], mid_key)['x_hat']
dec_time = (time.time() - start)
x_rec = x_rec.clamp(0, 1)
if (with_interpolation and ((i % num_gop) != 0)):
metrics = compute_metrics_for_frame(org_seq[(cur_interpolation_idx + ((i // num_gop) * num_gop))], crop(x_rec, padding), device, max_val)
else:
metrics = compute_metrics_for_frame(org_seq[i], crop(x_rec, padding), device, max_val)
if (intra or ((i % num_gop) == 0)):
metrics['key_encoding_time'] = torch.tensor(enc_time)
metrics['key_decoding_time'] = torch.tensor(dec_time)
else:
metrics['inter_encoding_time'] = torch.tensor(enc_time)
metrics['inter_decoding_time'] = torch.tensor(dec_time)
for (k, v) in metrics.items():
results[k].append(v)
pbar.update(1)
f.close()
seq_results: Dict[(str, Any)] = {k: torch.mean(torch.stack(v)) for (k, v) in results.items()}
seq_results['bitrate'] = (((float(filesize(binpath)) * 8) * org_seq.framerate) / (num_frames * 1000))
seq_results['bpp'] = ((float(filesize(binpath)) * 8) / (num_frames * num_pixels))
if (not keep_binaries):
binpath.unlink()
for (k, v) in seq_results.items():
if isinstance(v, torch.Tensor):
seq_results[k] = v.item()
return seq_results |
def TrainForceField(SetName_='GoldStd'):
a = MSet(SetName_)
a.Load()
TreatedAtoms = a.AtomTypes()
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 201
PARAMS['batch_size'] = 100
PARAMS['test_freq'] = 5
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['GradScalar'] = 1
PARAMS['NeuronType'] = 'relu'
PARAMS['HiddenLayers'] = [200, 200, 200]
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='AtomizationEnergy')
tset = TensorMolData_BP_Direct_Linear(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_Grad_Linear')
manager.Train(maxstep=101) |
def store_multprec_laurent_system(polsys, decimals, **nbvar):
from phcpy.phcpy2c3 import py2c_syscon_clear_multprec_Laurent_system
from phcpy.phcpy2c3 import py2c_syscon_initialize_number_of_multprec_Laurentials
from phcpy.phcpy2c3 import py2c_syscon_store_multprec_Laurential
py2c_syscon_clear_multprec_Laurent_system()
dim = len(polsys)
fail = 0
py2c_syscon_initialize_number_of_multprec_Laurentials(dim)
for cnt in range(0, dim):
pol = polsys[cnt]
nchar = len(pol)
if (len(nbvar) == 0):
fail = py2c_syscon_store_multprec_Laurential(nchar, dim, (cnt + 1), decimals, pol)
else:
nvr = list(nbvar.values())[0]
fail = py2c_syscon_store_multprec_Laurential(nchar, nvr, (cnt + 1), decimals, pol)
if (fail != 0):
break
return fail |
def define_net_d(opt):
network_type = opt.pop('type')
net_d = dynamic_instantiation(_arch_modules, network_type, opt)
return net_d |
def retrace_graph_with(gm: GraphModule, tracer: Tracer=None, func: Callable[([GraphModule], GraphModule)]=None) -> GraphModule:
if ((tracer is None) and (func is None)):
raise ValueError('Either a tracer or a function using a tracer must be provided.')
elif ((tracer is not None) and (func is not None)):
raise ValueError('Either provide a tracer or a function using a tracer, but not both.')
else:
(gm, attributes) = prepare_for_retracing(gm)
tracing_func = (tracer.trace if tracer else func)
traced = tracing_func(gm)
restore_after_retracing_(traced, attributes)
return traced |
class GraphConv(nn.Module):
def __init__(self, args):
super(GraphConv, self).__init__()
self.args = args
hidden_size = args.hidden_size
self.n_atom_feats = mol_features.N_ATOM_FEATS
self.n_bond_feats = mol_features.N_BOND_FEATS
self.W_message_i = nn.Linear((self.n_atom_feats + self.n_bond_feats), hidden_size, bias=False)
if args.no_share:
self.W_message_h = nn.ModuleList([nn.Linear(hidden_size, hidden_size, bias=False) for _ in range((args.depth - 1))])
else:
self.W_message_h = nn.Linear(hidden_size, hidden_size, bias=False)
self.W_message_o = nn.Linear((self.n_atom_feats + hidden_size), hidden_size)
self.dropout = nn.Dropout(args.dropout)
def index_select_nei(self, input, dim, index):
target = torch.index_select(input=input, dim=0, index=index.view((- 1)))
return target.view((index.size() + input.size()[1:]))
def forward(self, graph_inputs):
(fatoms, fbonds, agraph, bgraph) = graph_inputs
nei_input_h = self.W_message_i(fbonds)
message_h = nn.ReLU()(nei_input_h)
for i in range((self.args.depth - 1)):
nei_message_h = self.index_select_nei(input=message_h, dim=0, index=bgraph)
nei_message_h = nei_message_h.sum(dim=1)
if self.args.no_share:
nei_message_h = self.W_message_h[i](nei_message_h)
else:
nei_message_h = self.W_message_h(nei_message_h)
message_h = nn.ReLU()((nei_input_h + nei_message_h))
nei_message_h = self.index_select_nei(input=message_h, dim=0, index=agraph)
nei_message_h = nei_message_h.sum(dim=1)
atom_input = torch.cat([fatoms, nei_message_h], dim=1)
atom_input = self.dropout(atom_input)
atom_h = nn.ReLU()(self.W_message_o(atom_input))
return atom_h |
def run(data_fn, prop_missing=0.0, max_num_feature=(- 1), feature_selection='random', k=10, data_dir='_data', out_dir='_out'):
from keras.models import load_model
from riddle import emr, feature_importance
from riddle.models import MLP
start = time.time()
base_out_dir = get_base_out_dir(out_dir, 'riddle', data_fn, prop_missing, max_num_feature, feature_selection)
recursive_mkdir(base_out_dir)
(x_unvec, y, idx_feat_dict, idx_class_dict, icd9_descript_dict, perm_indices) = get_preprocessed_data(data_dir, data_fn, prop_missing=prop_missing)
num_feature = len(idx_feat_dict)
num_class = len(idx_class_dict)
(list_sums_D, list_sums_D2, list_sums_contribs) = ([], [], [])
for k_idx in range(k):
full_out_dir = '{}/k_idx={}'.format(base_out_dir, k_idx)
print('\nPartition k = {}'.format(k_idx))
(x_train_unvec, y_train, _, _, x_test_unvec, y_test) = emr.get_k_fold_partition(x_unvec, y, k_idx=k_idx, k=k, perm_indices=perm_indices)
if (max_num_feature > 0):
(feat_encoding_dict, idx_feat_dict) = select_features(x_train_unvec, y_train, idx_feat_dict, method=feature_selection, num_feature=num_feature, max_num_feature=max_num_feature)
x_test_unvec = subset_reencode_features(x_test_unvec, feat_encoding_dict)
num_feature = max_num_feature
start = time.time()
temp_mlp = MLP(num_feature=num_feature, num_class=num_class)
hdf5_path = (full_out_dir + '/model.h5')
(sums_D, sums_D2, sums_contribs, pairs) = feature_importance.get_diff_sums(hdf5_path, x_test_unvec, process_x_func=temp_mlp.process_x, num_feature=num_feature, num_class=num_class)
with open((full_out_dir + '/sums_D.pkl'), 'wb') as f:
pickle.dump(sums_D, f)
with open((full_out_dir + '/sums_D2.pkl'), 'wb') as f:
pickle.dump(sums_D2, f)
with open((full_out_dir + '/sums_contribs.pkl'), 'wb') as f:
pickle.dump(sums_contribs, f)
list_sums_D.append(sums_D)
list_sums_D2.append(sums_D2)
list_sums_contribs.append(sums_contribs)
def compute_total_sums(list_sums):
total_sums = list_sums[0]
for i in range(1, len(list_sums)):
for j in range(len(total_sums)):
total_sums[j] = np.add(total_sums[j], list_sums[i][j])
return total_sums
total_sums_D = compute_total_sums(list_sums_D)
total_sums_D2 = compute_total_sums(list_sums_D2)
total_sums_contribs = compute_total_sums(list_sums_contribs)
num_sample = len(x_unvec)
run_interpretation_summary(x_unvec, y, total_sums_D, total_sums_D2, total_sums_contribs, idx_feat_dict=idx_feat_dict, idx_class_dict=idx_class_dict, icd9_descript_dict=icd9_descript_dict, pairs=pairs, num_sample=num_sample, full_out_dir=base_out_dir)
print('Computed DeepLIFT scores and analysis in {:.4f} seconds'.format((time.time() - start)))
print(('-' * 72))
print() |
def get_descriptive_statistics(dict_, labels_):
for j in range(len(labels_)):
try:
dict_[labels[j]] = (((str(np.mean(np.array(dict_[labels[j]]))) + ' (+/- ') + str(np.std(np.array(dict_[labels[j]])))) + ')')
except:
dict_.pop(labels[j])
return dict_ |
class BlenderbotOnnxConfig(OnnxSeq2SeqConfigWithPast):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task in ['default', 'seq2seq-lm']):
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})])
if self.use_past:
common_inputs['decoder_input_ids'] = {0: 'batch'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction='inputs')
elif (self.task == 'causal-lm'):
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})])
if self.use_past:
(_, num_decoder_layers) = self.num_layers
for i in range(num_decoder_layers):
common_inputs[f'past_key_values.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
common_inputs[f'past_key_values.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'})])
return common_inputs
def outputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task in ['default', 'seq2seq-lm']):
common_outputs = super().outputs
else:
common_outputs = super(OnnxConfigWithPast, self).outputs
if self.use_past:
(num_encoder_layers, _) = self.num_layers
for i in range(num_encoder_layers):
common_outputs[f'present.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
common_outputs[f'present.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework)
decoder_seq_length = (seq_length if (not self.use_past) else 1)
decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, decoder_seq_length, is_pair, framework)
decoder_inputs = {f'decoder_{name}': tensor for (name, tensor) in decoder_inputs.items()}
common_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, encoder_seq_length) = common_inputs['input_ids'].shape
decoder_seq_length = common_inputs['decoder_input_ids'].shape[1]
(num_encoder_attention_heads, num_decoder_attention_heads) = self.num_attention_heads
encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, (self._config.hidden_size // num_encoder_attention_heads))
decoder_past_length = decoder_seq_length
decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, (self._config.hidden_size // num_decoder_attention_heads))
common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1)
common_inputs['past_key_values'] = []
(_, num_decoder_layers) = self.num_layers
for _ in range(num_decoder_layers):
common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape)))
return common_inputs
def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, seqlen) = common_inputs['input_ids'].shape
past_key_values_length = seqlen
(_, num_decoder_layers) = self.num_layers
(num_encoder_attention_heads, _) = self.num_attention_heads
past_shape = (batch, num_encoder_attention_heads, past_key_values_length, (self._config.hidden_size // num_encoder_attention_heads))
mask_dtype = common_inputs['attention_mask'].dtype
common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1)
common_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_decoder_layers)]
return common_inputs
def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0)
token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add)
dummy_input = ([(' '.join([tokenizer.unk_token]) * seq_length)] * batch_size)
common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
return common_inputs
def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
if (self.task in ['default', 'seq2seq-lm']):
common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
elif (self.task == 'causal-lm'):
common_inputs = self._generate_dummy_inputs_for_causal_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
else:
common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
return common_inputs
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
if (self.task in ['default', 'seq2seq-lm']):
flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
else:
flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(flattened_output, name, idx, t)
def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[(str, Mapping[(int, str)])], direction: str):
if (direction not in ['inputs', 'outputs']):
raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
name = ('past_key_values' if (direction == 'inputs') else 'present')
(_, num_decoder_layers) = self.num_layers
encoder_sequence = 'past_encoder_sequence'
decoder_sequence = ('past_decoder_sequence' if (direction == 'inputs') else 'past_decoder_sequence + sequence')
for i in range(num_decoder_layers):
inputs_or_outputs[f'{name}.{i}.decoder.key'] = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.decoder.value'] = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.encoder.key'] = {0: 'batch', 2: encoder_sequence}
inputs_or_outputs[f'{name}.{i}.encoder.value'] = {0: 'batch', 2: encoder_sequence} |
class Cnn14(nn.Module):
def __init__(self, config):
super(Cnn14, self).__init__()
self.bn0 = nn.BatchNorm2d(64)
sr = config.wav.sr
window_size = config.wav.window_size
hop_length = config.wav.hop_length
mel_bins = config.wav.mel_bins
self.dropout = config.training.dropout
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_length, win_length=window_size, window='hann', center=True, pad_mode='reflect', freeze_parameters=True)
self.logmel_extractor = LogmelFilterBank(sr=sr, n_fft=window_size, n_mels=mel_bins, fmin=50, fmax=14000, ref=1.0, amin=1e-10, top_db=None, freeze_parameters=True)
self.is_spec_augment = config.training.spec_augmentation
if self.is_spec_augment:
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2, freq_drop_width=8, freq_stripes_num=2)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
self.fc1 = nn.Linear(2048, 512, bias=True)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.fc1)
def forward(self, input):
x = self.spectrogram_extractor(input)
x = self.logmel_extractor(x)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if (self.training and self.is_spec_augment):
x = self.spec_augmenter(x)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = (x1 + x2)
return x |
def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings):
def model_fn(features, labels, mode, params):
tf.logging.info('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info((' name = %s, shape = %s' % (name, features[name].shape)))
input_ids = features['input_ids']
input_mask = features['input_mask']
segment_ids = features['segment_ids']
masked_lm_positions = features['masked_lm_positions']
masked_lm_ids = features['masked_lm_ids']
masked_lm_weights = features['masked_lm_weights']
next_sentence_labels = features['next_sentence_labels']
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings)
(masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights)
(next_sentence_loss, next_sentence_example_loss, next_sentence_log_probs) = get_next_sentence_output(bert_config, model.get_sequence_output(), next_sentence_labels)
total_loss = (masked_lm_loss + next_sentence_loss)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info('**** Trainable Variables ****')
for var in tvars:
init_string = ''
if (var.name in initialized_variable_names):
init_string = ', *INIT_FROM_CKPT*'
tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string)
output_spec = None
if (mode == tf.estimator.ModeKeys.TRAIN):
train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn)
elif (mode == tf.estimator.ModeKeys.EVAL):
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels):
masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [(- 1), masked_lm_log_probs.shape[(- 1)]])
masked_lm_predictions = tf.argmax(masked_lm_log_probs, axis=(- 1), output_type=tf.int32)
masked_lm_predictions = tf.Print(masked_lm_predictions, [masked_lm_predictions], 'masked_lm_predictions')
masked_lm_ids = tf.Print(masked_lm_ids, [masked_lm_ids], 'masked_lm_ids')
exp_log_prop = tf.Print(tf.exp((- masked_lm_example_loss)), [tf.exp((- masked_lm_example_loss))], 'tf.exp(-masked_lm_example_loss)')
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [(- 1)])
masked_lm_ids = tf.reshape(masked_lm_ids, [(- 1)])
masked_lm_weights = tf.reshape(masked_lm_weights, [(- 1)])
masked_lm_accuracy = tf.metrics.accuracy(labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights)
masked_lm_auc = tf.metrics.auc(labels=masked_lm_ids, predictions=tf.exp((- masked_lm_example_loss)))
masked_lm_mean_loss = tf.metrics.mean(values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(next_sentence_log_probs, [(- 1), next_sentence_log_probs.shape[(- 1)]])
next_sentence_predictions = tf.argmax(next_sentence_log_probs, axis=(- 1), output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [(- 1)])
next_sentence_accuracy = tf.metrics.accuracy(labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(values=next_sentence_example_loss)
return {'masked_lm_accuracy': masked_lm_accuracy, 'masked_lm_auc': masked_lm_auc, 'masked_lm_loss': masked_lm_mean_loss, 'next_sentence_accuracy': next_sentence_accuracy, 'next_sentence_loss': next_sentence_mean_loss}
eval_metrics = (metric_fn, [masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn)
else:
raise ValueError(('Only TRAIN and EVAL modes are supported: %s' % mode))
return output_spec
return model_fn |
.parametrize('alpha', [0.001, 0.1, 1, 10, 100, 1000, 1000000.0])
.parametrize('penalty, lambda_1, lambda_2', [('l1', 1, 0), ('l2', 0, 1)])
def test_elastic_net_l1_l2_equivalence(alpha, penalty, lambda_1, lambda_2):
(X, y) = make_classification(random_state=0)
lr_enet = LogisticRegression(penalty='elasticnet', lambda_1=(lambda_1 * alpha), lambda_2=(lambda_2 * alpha), solver='qning-miso', random_state=0)
lr_expected = LogisticRegression(penalty=penalty, lambda_1=alpha, solver='qning-miso', random_state=0)
lr_enet.fit(X, y)
lr_expected.fit(X, y)
assert_array_almost_equal(lr_enet.coef_, lr_expected.coef_) |
def rmse(y_true, y_pred):
from keras import backend as K
return K.sqrt(K.mean(K.square((y_pred - y_true)), axis=(- 1))) |
def pattern_to_path(pattern):
act_path = (pattern[0], 'activation', *pattern[1][0])
weight_path = (pattern[0], 'weight', *pattern[1][1])
return (act_path, weight_path) |
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if (args.multiprocessing_distributed and (args.gpu != 0)):
def print_pass(*args):
pass
builtins.print = print_pass
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
print("=> creating model '{}'".format(args.arch))
if (args.dataset == 'cifar10'):
num_classes = 10
elif (args.dataset == 'cifar100'):
num_classes = 100
else:
sys.exit((- 1))
if (args.arch == 'efficientb0'):
model = efficientnet_b0(pretrained=False, num_classes=num_classes)
elif (args.arch == 'efficientb1'):
model = efficientnet_b1(pretrained=False, num_classes=num_classes)
elif (args.arch == 'mobilenetv3'):
model = mobilenetv3_large_100(num_classes=num_classes)
else:
model = models.__dict__[args.arch](num_classes=num_classes)
print(model)
for (name, param) in model.named_parameters():
if ((args.arch in ['resnet18', 'resnet34']) and (name not in ['fc.weight', 'fc.bias'])):
param.requires_grad = False
if ((args.arch in ['efficientb0', 'efficientb1', 'mobilenetv3']) and (name not in ['classifier.weight', 'classifier.bias'])):
param.requires_grad = False
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
if (args.gpu is None):
checkpoint = torch.load(args.pretrained)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.pretrained, map_location=loc)
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
if (args.arch in ['efficientb0', 'mobilenetv3']):
if (k.startswith('module.encoder_q') and (not k.startswith('module.encoder_q.classifier'))):
state_dict[k[len('module.encoder_q.'):]] = state_dict[k]
elif (k.startswith('module.encoder_q') and (not k.startswith('module.encoder_q.fc'))):
state_dict[k[len('module.encoder_q.'):]] = state_dict[k]
del state_dict[k]
msg = model.load_state_dict(state_dict, strict=False)
print(msg)
assert (len(msg.missing_keys) == 2)
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
if (args.arch in ['efficientb0', 'efficientb1', 'mobilenetv3']):
model.classifier.weight.data.normal_(mean=0.0, std=0.01)
model.classifier.bias.data.zero_()
else:
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
if args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
parameters = list(filter((lambda p: p.requires_grad), model.parameters()))
assert (len(parameters) == 2)
optimizer = torch.optim.SGD(parameters, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
checkpoint_path = get_last_checkpoint(args.resume)
if os.path.isfile(checkpoint_path):
print("=> loading checkpoint '{}'".format(checkpoint_path))
if (args.gpu is None):
checkpoint = torch.load(checkpoint_path)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(checkpoint_path, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if (args.gpu is not None):
best_acc1 = best_acc1.to(args.gpu)
out = model.load_state_dict(checkpoint['state_dict'], strict=False)
print(out)
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if (args.dataset == 'cifar10'):
train_dataset = datasets.CIFAR10(root=args.data, train=True, download=False, transform=transforms.Compose([transforms.RandomResizedCrop(224, interpolation=PIL.Image.BICUBIC), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
val_dataset = datasets.CIFAR10(root=args.data, train=False, download=False, transform=transforms.Compose([transforms.Resize(224, interpolation=PIL.Image.BICUBIC), transforms.CenterCrop(224), transforms.ToTensor(), normalize]))
elif (args.dataset == 'cifar100'):
train_dataset = datasets.CIFAR100(root=args.data, train=True, download=False, transform=transforms.Compose([transforms.RandomResizedCrop(224, interpolation=PIL.Image.BICUBIC), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
val_dataset = datasets.CIFAR100(root=args.data, train=False, download=False, transform=transforms.Compose([transforms.Resize(224, interpolation=PIL.Image.BICUBIC), transforms.CenterCrop(224), transforms.ToTensor(), normalize]))
else:
print('No dataset')
sys.exit((- 1))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
xts = []
tys = []
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
(MI_XTs, MI_TYs) = train(train_loader, model, criterion, optimizer, epoch, args)
xts.extend(MI_XTs)
tys.extend(MI_TYs)
acc1 = validate(val_loader, model, criterion, args)
is_best = (acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict()}, is_best)
if (epoch == args.start_epoch):
sanity_check(model.state_dict(), args.pretrained, args)
xts = np.array(xts)
tys = np.array(tys)
with open('./ckpt/xts', 'wb') as f:
np.save(f, xts)
with open('./ckpt/tys', 'wb') as f:
np.save(f, tys) |
class MixerBlock(nn.Module):
def __init__(self, dim, num_patch, token_dim, channel_dim, dropout=0.0):
super().__init__()
self.token_mix = nn.Sequential(nn.LayerNorm(dim), Rearrange('b p d -> b d p'), FeedForward(num_patch, token_dim, dropout), Rearrange('b d p -> b p d'))
self.channel_mix = nn.Sequential(nn.LayerNorm(dim), FeedForward(dim, channel_dim, dropout))
def forward(self, x):
x = (x + self.token_mix(x))
x = (x + self.channel_mix(x))
return x |
def register_all_coco(root):
for (dataset_name, splits_per_dataset) in _PREDEFINED_SPLITS_COCO.items():
for (key, (image_root, json_file)) in splits_per_dataset.items():
register_coco_instances(key, _get_builtin_metadata(dataset_name), (os.path.join(root, json_file) if ('://' not in json_file) else json_file), os.path.join(root, image_root))
for (prefix, (panoptic_root, panoptic_json, semantic_root)) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
prefix_instances = prefix[:(- len('_panoptic'))]
instances_meta = MetadataCatalog.get(prefix_instances)
(image_root, instances_json) = (instances_meta.image_root, instances_meta.json_file)
register_coco_panoptic_separated(prefix, _get_builtin_metadata('coco_panoptic_separated'), image_root, os.path.join(root, panoptic_root), os.path.join(root, panoptic_json), os.path.join(root, semantic_root), instances_json)
for (prefix, (panoptic_root, panoptic_json, semantic_root)) in _PREDEFINED_SPLITS_COCO_PANOPTIC_UNSEEN.items():
prefix_instances = prefix[:(- len('_panoptic_unseen*'))]
instances_meta = MetadataCatalog.get(prefix_instances)
(image_root, instances_json) = (instances_meta.image_root, instances_meta.json_file)
instances_json = ((instances_json.split('.')[0] + '_unseen1') + '.json')
register_coco_panoptic_separated(prefix, _get_builtin_metadata('coco_panoptic_separated'), image_root, os.path.join(root, panoptic_root), os.path.join(root, panoptic_json), os.path.join(root, semantic_root), instances_json) |
def basic_cleaners(text):
text = lowercase(text)
text = collapse_whitespace(text)
return text |
def dense_model(timesteps, n_class, n_features, classifier_architecture, dropout):
inputs = Input((timesteps, n_features))
x = Dense(128, activation=Mish())(inputs)
x = LayerNormalization()(x)
(x, a) = attention_simple(x, timesteps)
for (d, dr) in zip(classifier_architecture, dropout):
x = Dropout(dr)(x)
x = Dense(d, activation=Mish())(x)
x = LayerNormalization()(x)
outputs = Dense(n_class, activation='softmax')(x)
model = Model(inputs, outputs)
return model |
def load_nerve():
test_images = []
test_labels = []
for file in glob.glob(os.path.join(args['test_path'], 'orig', '*.tif')):
basename = os.path.basename(file)
file_name = basename[:(- 4)]
image_name = os.path.join(args['test_path'], 'orig', basename)
label_name = os.path.join(args['test_path'], 'mask2', (file_name + '_centerline_overlay.tif'))
test_images.append(image_name)
test_labels.append(label_name)
return (test_images, test_labels) |
def create_corrupted_utt2uniq(input_dir, output_dir, num_replicas, include_original, prefix):
corrupted_utt2uniq = {}
utt2spk = parse_file_to_dict((input_dir + '/utt2spk'), value_processor=(lambda x: ' '.join(x)))
keys = sorted(utt2spk.keys())
if include_original:
start_index = 0
else:
start_index = 1
for i in range(start_index, (num_replicas + 1)):
for utt_id in keys:
new_utt_id = get_new_id(utt_id, prefix, i)
corrupted_utt2uniq[new_utt_id] = utt_id
write_dict_to_file(corrupted_utt2uniq, (output_dir + '/utt2uniq')) |
class CustomTest(CustomBase):
def __init__(self, size, test_images_list_file):
super().__init__()
with open(test_images_list_file, 'r') as f:
paths = f.read().splitlines()
self.data = ImagePaths(paths=paths, size=size, random_crop=False) |
class SentenceMoversMetric(Metric):
def __init__(self, wordrep='glove', metric='sms', n_workers=24, tokenize=True):
self.wordrep = wordrep
self.metric = metric
self.model = (ElmoEmbedder() if (wordrep == 'elmo') else None)
self.n_workers = n_workers
self.tokenize = tokenize
def evaluate_example(self, summary, reference):
inLines = [(reference, summary)]
(token_doc_list, text_doc_list) = tokenize_texts(inLines, self.wordrep, self.tokenize)
score = get_sim(token_doc_list[0], text_doc_list[0], self.wordrep, self.model, self.metric)
score_dict = {f'sentence_movers_{self.wordrep}_{self.metric}': score}
return score_dict
def evaluate_batch(self, summaries, references, aggregate=True):
inLines = zip(references, summaries)
(token_doc_list, text_doc_list) = tokenize_texts(inLines, self.wordrep, self.tokenize)
p = Pool(processes=self.n_workers)
results = p.starmap(get_sim, zip(token_doc_list, text_doc_list, repeat(self.wordrep), repeat(self.model), repeat(self.metric)))
if aggregate:
score_dict = {f'sentence_movers_{self.wordrep}_{self.metric}': (sum(results) / len(results))}
else:
score_dict = [{f'sentence_movers_{self.wordrep}_{self.metric}': result} for result in results]
return score_dict
def supports_multi_ref(self):
return False |
class Vocab(defaultdict):
def __init__(self, train=True):
super().__init__((lambda : len(self)))
self.train = train
self.UNK = 'UNK'
self[self.UNK]
self.idx2w = self.update_idx2w()
def set_vocab(self):
self.train = False
def train(self):
self.train = True
def update_idx2w(self):
self.idx2w = dict([(i, w) for (w, i) in self.items()])
def ws2ids(self, ws):
if self.train:
return torch.tensor([self[w] for w in ws], dtype=torch.long)
else:
return [(self[w] if (w in self) else 0) for w in ws]
def ids2sent(self, ids):
return [self.idx2w[int(i)] for i in ids] |
def test_DVCCA_methods():
max_epochs = 2
latent_dimensions = 2
encoder_1 = architectures.Encoder(latent_dimensions=latent_dimensions, feature_size=feature_size[0], variational=True)
encoder_2 = architectures.Encoder(latent_dimensions=latent_dimensions, feature_size=feature_size[1], variational=True)
decoder_1 = architectures.Decoder(latent_dimensions=latent_dimensions, feature_size=feature_size[0])
decoder_2 = architectures.Decoder(latent_dimensions=latent_dimensions, feature_size=feature_size[1])
dvcca = DVCCA(latent_dimensions=latent_dimensions, encoders=[encoder_1, encoder_2], decoders=[decoder_1, decoder_2])
trainer = pl.Trainer(max_epochs=max_epochs, **trainer_kwargs)
trainer.fit(dvcca, train_loader) |
class RecordProcessor(FewGLUEDataProcessor):
def __init__(self):
super().__init__()
self.labels = ['0', '1']
def get_examples(path, split, seed=42, max_train_candidates_per_question: int=10) -> List[InputExample]:
examples = []
path = os.path.join(data_dir, '{}.jsonl'.format(split))
entity_shuffler = random.Random(seed)
with open(path, encoding='utf8') as f:
for (idx, line) in enumerate(f):
example_json = json.loads(line)
idx = example_json['idx']
text = example_json['passage']['text']
entities = set()
for entity_json in example_json['passage']['entities']:
start = entity_json['start']
end = entity_json['end']
entity = text[start:(end + 1)]
entities.add(entity)
entities = list(entities)
text = text.replace('\n', '- ')
questions = example_json['qas']
for question_json in questions:
question = question_json['query']
question_idx = question_json['idx']
answers = set()
for answer_json in question_json.get('answers', []):
answer = answer_json['text']
answers.add(answer)
answers = list(answers)
if (split == 'train'):
for (answer_idx, answer) in enumerate(answers):
candidates = [ent for ent in entities if (ent not in answers)]
if (len(candidates) > (max_train_candidates_per_question - 1)):
entity_shuffler.shuffle(candidates)
candidates = candidates[:(max_train_candidates_per_question - 1)]
guid = f'{split}-p{idx}-q{question_idx}-a{answer_idx}'
meta = {'passage_idx': idx, 'question_idx': question_idx, 'candidates': ([answer] + candidates), 'answers': [answer]}
ex_idx = [idx, question_idx, answer_idx]
example = InputExample(guid=guid, text_a=text, text_b=question, label='1', meta=meta, idx=ex_idx)
examples.append(example)
else:
guid = f'{split}-p{idx}-q{question_idx}'
meta = {'passage_idx': idx, 'question_idx': question_idx, 'candidates': entities, 'answers': answers}
example = InputExample(guid=guid, text_a=text, text_b=question, label='1', meta=meta)
examples.append(example)
question_indices = list(set((example.meta['question_idx'] for example in examples)))
label_distribution = Counter((example.label for example in examples))
logger.info(f'Returning {len(examples)} examples corresponding to {len(question_indices)} questions with label distribution {list(label_distribution.items())}')
return examples |
class GPRNet(torch.nn.Module):
def __init__(self, K=10):
super(GPRNet, self).__init__()
self.lin1 = Linear(1, 32)
self.lin2 = Linear(32, 64)
self.prop1 = GPR_prop(K)
self.fc2 = torch.nn.Linear(64, 1)
def reset_parameters(self):
self.prop1.reset_parameters()
def forward(self, data):
x = data.x_tmp
edge_index = data.edge_index
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = self.prop1(x, edge_index)
return self.fc2(x) |
def run(cfg):
print('Start making fragments')
uio.may_create_folder(cfg.out_root)
scenes = uio.list_folders(cfg.dataset_root, sort=False)
print('{} scenes'.format(len(scenes)))
for scene in scenes:
run_scene(cfg, scene)
print('Finished making fragments') |
def computerNetParameters(net):
params = list(net.parameters())
k = 0
for (index, i) in enumerate(params):
l = 1
print((index + 1), ('layer structure:' + str(list(i.size()))))
for j in i.size():
l *= j
print(('layer paramenters: ' + str(l)))
k += l
print(('network paramenters: ' + str(k)))
return k |
class Normalizer(TextTransformer):
def __init__(self, bigdl_type='float'):
super(Normalizer, self).__init__(bigdl_type) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.