code stringlengths 101 5.91M |
|---|
class Punctuation_Rate(object):
def __init__(self, sentence_objs):
self.sentence_objs = sentence_objs
def handle(self):
(tot_num_pron, tot_num_words) = (0, 0)
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PUNCTUATION)
tot_num_words += so.num_words()
return (tot_num_pron / tot_num_words) |
def compute_auxiliary_targets(observations, cell_size, output_size):
observations = observations[0]
return tuple(map((lambda x: compute_auxiliary_target(x, cell_size, output_size)), observations[:3])) |
def create_parser():
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--dataset', metavar='DATASET', default='imagenet', choices=datasets.__all__, help=(('dataset: ' + ' | '.join(datasets.__all__)) + ' (default: imagenet)'))
parser.add_argument('--train-subdir', type=str, default='train', help='the subdirectory inside the data directory that contains the training data')
parser.add_argument('--eval-subdir', type=str, default='val', help='the subdirectory inside the data directory that contains the evaluation data')
parser.add_argument('--labels', default=None, type=str, metavar='FILE', help='list of image labels (default: based on directory structure)')
parser.add_argument('--exclude-unlabeled', default=False, type=str2bool, metavar='BOOL', help='exclude unlabeled examples from the training set')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18', choices=architectures.__all__, help=('model architecture: ' + ' | '.join(architectures.__all__)))
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--labeled-batch-size', default=None, type=int, metavar='N', help='labeled examples per minibatch (default: no constrain)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='max learning rate')
parser.add_argument('--initial-lr', default=0.0, type=float, metavar='LR', help='initial learning rate when using linear rampup')
parser.add_argument('--lr-rampup', default=0, type=int, metavar='EPOCHS', help='length of learning rate rampup in the beginning')
parser.add_argument('--lr-rampdown-epochs', default=None, type=int, metavar='EPOCHS', help='length of learning rate cosine rampdown (>= length of training)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--nesterov', default=False, type=str2bool, help='use nesterov momentum', metavar='BOOL')
parser.add_argument('--weight-decay', '--wd', default=0.0001, type=float, metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--ema-decay', default=0.999, type=float, metavar='ALPHA', help='ema variable decay rate (default: 0.999)')
parser.add_argument('--consistency', default=None, type=float, metavar='WEIGHT', help='use consistency loss with given weight (default: None)')
parser.add_argument('--consistency-type', default='mse', type=str, metavar='TYPE', choices=['mse', 'kl'], help='consistency loss type to use')
parser.add_argument('--consistency-rampup', default=30, type=int, metavar='EPOCHS', help='length of the consistency loss ramp-up')
parser.add_argument('--logit-distance-cost', default=(- 1), type=float, metavar='WEIGHT', help='let the student model have two outputs and use an MSE loss between the logits with the given weight (default: only have one output)')
parser.add_argument('--checkpoint-epochs', default=1, type=int, metavar='EPOCHS', help='checkpoint frequency in epochs, 0 to turn checkpointing off (default: 1)')
parser.add_argument('--evaluation-epochs', default=1, type=int, metavar='EPOCHS', help='evaluation frequency in epochs, 0 to turn evaluation off (default: 1)')
parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', type=str2bool, help='evaluate model on evaluation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model')
return parser |
class BaseGANRunner(BaseRunner):
def __init__(self, config, logger):
super().__init__(config, logger)
self.inception_model = None
def moving_average_model(self, model, avg_model, beta=0.999):
model_params = dict(self.get_module(model).named_parameters())
avg_params = dict(self.get_module(avg_model).named_parameters())
assert (len(model_params) == len(avg_params))
for param_name in avg_params:
assert (param_name in model_params)
avg_params[param_name].data = ((avg_params[param_name].data * beta) + (model_params[param_name].data * (1 - beta)))
def build_models(self):
super().build_models()
assert ('generator' in self.models)
assert ('discriminator' in self.models)
self.z_space_dim = self.models['generator'].z_space_dim
self.resolution = self.models['generator'].resolution
self.G_kwargs_train = self.config.modules['generator'].get('kwargs_train', dict())
self.G_kwargs_val = self.config.modules['generator'].get('kwargs_val', dict())
self.D_kwargs_train = self.config.modules['discriminator'].get('kwargs_train', dict())
self.D_kwargs_val = self.config.modules['discriminator'].get('kwargs_val', dict())
def train_step(self, data, **train_kwargs):
raise NotImplementedError('Should be implemented in derived class.')
def val(self, **val_kwargs):
self.synthesize(**val_kwargs)
def synthesize(self, num, z=None, html_name=None, save_raw_synthesis=False):
if ((not html_name) and (not save_raw_synthesis)):
return
self.set_mode('val')
temp_dir = os.path.join(self.work_dir, 'synthesize_results')
os.makedirs(temp_dir, exist_ok=True)
if (z is not None):
assert isinstance(z, np.ndarray)
assert ((z.ndim == 2) and (z.shape[1] == self.z_space_dim))
num = min(num, z.shape[0])
z = torch.from_numpy(z).type(torch.FloatTensor)
if (not num):
return
self.logger.init_pbar()
task1 = self.logger.add_pbar_task('Synthesize', total=num)
indices = list(range(self.rank, num, self.world_size))
for batch_idx in range(0, len(indices), self.val_batch_size):
sub_indices = indices[batch_idx:(batch_idx + self.val_batch_size)]
batch_size = len(sub_indices)
if (z is None):
code = torch.randn(batch_size, self.z_space_dim).cuda()
else:
code = z[sub_indices].cuda()
with torch.no_grad():
if ('generator_smooth' in self.models):
G = self.models['generator_smooth']
else:
G = self.models['generator']
images = G(code, **self.G_kwargs_val)['image']
images = postprocess_image(images.detach().cpu().numpy())
for (sub_idx, image) in zip(sub_indices, images):
save_image(os.path.join(temp_dir, f'{sub_idx:06d}.jpg'), image)
self.logger.update_pbar(task1, (batch_size * self.world_size))
dist.barrier()
if (self.rank != 0):
return
if html_name:
task2 = self.logger.add_pbar_task('Visualize', total=num)
html = HtmlPageVisualizer(grid_size=num)
for image_idx in range(num):
image = load_image(os.path.join(temp_dir, f'{image_idx:06d}.jpg'))
(row_idx, col_idx) = divmod(image_idx, html.num_cols)
html.set_cell(row_idx, col_idx, image=image, text=f'Sample {image_idx:06d}')
self.logger.update_pbar(task2, 1)
html.save(os.path.join(self.work_dir, html_name))
if (not save_raw_synthesis):
shutil.rmtree(temp_dir)
self.logger.close_pbar()
def fid(self, fid_num, z=None, ignore_cache=False, align_tf=True):
self.set_mode('val')
if (self.val_loader is None):
self.build_dataset('val')
fid_num = min(fid_num, len(self.val_loader.dataset))
if (self.inception_model is None):
if align_tf:
self.logger.info(f'Building inception model (aligned with TensorFlow) ...')
else:
self.logger.info(f'Building inception model (using torchvision) ...')
self.inception_model = build_inception_model(align_tf).cuda()
self.logger.info(f'Finish building inception model.')
if (z is not None):
assert isinstance(z, np.ndarray)
assert ((z.ndim == 2) and (z.shape[1] == self.z_space_dim))
fid_num = min(fid_num, z.shape[0])
z = torch.from_numpy(z).type(torch.FloatTensor)
if (not fid_num):
return (- 1)
indices = list(range(self.rank, fid_num, self.world_size))
self.logger.init_pbar()
fake_feature_list = []
task1 = self.logger.add_pbar_task('Fake', total=fid_num)
for batch_idx in range(0, len(indices), self.val_batch_size):
sub_indices = indices[batch_idx:(batch_idx + self.val_batch_size)]
batch_size = len(sub_indices)
if (z is None):
code = torch.randn(batch_size, self.z_space_dim).cuda()
else:
code = z[sub_indices].cuda()
with torch.no_grad():
if ('generator_smooth' in self.models):
G = self.models['generator_smooth']
else:
G = self.models['generator']
fake_images = G(code)['image']
fake_feature_list.append(extract_feature(self.inception_model, fake_images))
self.logger.update_pbar(task1, (batch_size * self.world_size))
np.save(f'{self.work_dir}/fake_fid_features_{self.rank}.npy', np.concatenate(fake_feature_list, axis=0))
cached_fid_file = f'{self.work_dir}/real_fid{fid_num}.npy'
do_real_test = ((not os.path.exists(cached_fid_file)) or ignore_cache)
if do_real_test:
real_feature_list = []
task2 = self.logger.add_pbar_task('Real', total=fid_num)
for batch_idx in range(0, len(indices), self.val_batch_size):
sub_indices = indices[batch_idx:(batch_idx + self.val_batch_size)]
batch_size = len(sub_indices)
data = next(self.val_loader)
for key in data:
data[key] = data[key][:batch_size].cuda(torch.cuda.current_device(), non_blocking=True)
with torch.no_grad():
real_images = data['image']
real_feature_list.append(extract_feature(self.inception_model, real_images))
self.logger.update_pbar(task2, (batch_size * self.world_size))
np.save(f'{self.work_dir}/real_fid_features_{self.rank}.npy', np.concatenate(real_feature_list, axis=0))
dist.barrier()
if (self.rank != 0):
return (- 1)
self.logger.close_pbar()
fake_feature_list.clear()
for rank in range(self.world_size):
fake_feature_list.append(np.load(f'{self.work_dir}/fake_fid_features_{rank}.npy'))
os.remove(f'{self.work_dir}/fake_fid_features_{rank}.npy')
fake_features = np.concatenate(fake_feature_list, axis=0)
assert ((fake_features.ndim == 2) and (fake_features.shape[0] == fid_num))
feature_dim = fake_features.shape[1]
pad = (fid_num % self.world_size)
if pad:
pad = (self.world_size - pad)
fake_features = np.pad(fake_features, ((0, pad), (0, 0)))
fake_features = fake_features.reshape(self.world_size, (- 1), feature_dim)
fake_features = fake_features.transpose(1, 0, 2)
fake_features = fake_features.reshape((- 1), feature_dim)[:fid_num]
if do_real_test:
real_feature_list.clear()
for rank in range(self.world_size):
real_feature_list.append(np.load(f'{self.work_dir}/real_fid_features_{rank}.npy'))
os.remove(f'{self.work_dir}/real_fid_features_{rank}.npy')
real_features = np.concatenate(real_feature_list, axis=0)
assert (real_features.shape == (fid_num, feature_dim))
real_features = np.pad(real_features, ((0, pad), (0, 0)))
real_features = real_features.reshape(self.world_size, (- 1), feature_dim)
real_features = real_features.transpose(1, 0, 2)
real_features = real_features.reshape((- 1), feature_dim)[:fid_num]
np.save(cached_fid_file, real_features)
else:
real_features = np.load(cached_fid_file)
assert (real_features.shape == (fid_num, feature_dim))
fid_value = compute_fid(fake_features, real_features)
return fid_value |
def main(args):
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', use_fast=True, cache_dir='./cache')
stop_ids = get_stop_ids(tokenizer)
docids = np.load(os.path.join(args.input_path, 'docids.npy'))
data = h5py.File(os.path.join(args.input_path, 'tildev2_index.hdf5'), 'r')
doc_file = data['documents'][:]
out_file = open(args.output_file, 'w', encoding='utf-8')
assert (len(docids) == len(doc_file))
direct_index = {}
max_token_impact = 0
for (i, docid) in tqdm(enumerate(docids), desc='Creating direct index.....'):
(token_scores, token_ids) = doc_file[i]
assert (len(token_scores) == len(token_ids))
direct_index[docid] = {}
for (idx, token_id) in enumerate(token_ids):
tok = tokenizer.convert_ids_to_tokens(int(token_id))
if (tok not in direct_index[docid].keys()):
direct_index[docid][tok] = token_scores[idx]
elif (token_scores[idx] > direct_index[docid][tok]):
direct_index[docid][tok] = token_scores[idx]
max_token_impact = max(max_token_impact, token_scores[idx])
del doc_file
quantizer = uniform_quantizer(args.quantize_bits, max_token_impact)
for (i, docid) in tqdm(enumerate(docids), desc='Quantizing and writing json file....'):
for term in direct_index[docid]:
score = direct_index[docid][term]
direct_index[docid][term] = quantizer.quantize(score)
out_file.write(generate_json(docid, direct_index[docid]))
out_file.write('\n')
out_file.close() |
class DCGAN_G_nobn(nn.Module):
def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0):
super(DCGAN_G_nobn, self).__init__()
self.ngpu = ngpu
assert ((isize % 16) == 0), 'isize has to be a multiple of 16'
(cngf, tisize) = ((ngf // 2), 4)
while (tisize != isize):
cngf = (cngf * 2)
tisize = (tisize * 2)
main = nn.Sequential()
main.add_module('initial:{0}-{1}:convt'.format(nz, cngf), nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False))
main.add_module('initial:{0}:relu'.format(cngf), nn.ReLU(True))
(csize, cndf) = (4, cngf)
while (csize < (isize // 2)):
main.add_module('pyramid:{0}-{1}:convt'.format(cngf, (cngf // 2)), nn.ConvTranspose2d(cngf, (cngf // 2), 4, 2, 1, bias=False))
main.add_module('pyramid:{0}:relu'.format((cngf // 2)), nn.ReLU(True))
cngf = (cngf // 2)
csize = (csize * 2)
for t in range(n_extra_layers):
main.add_module('extra-layers-{0}:{1}:conv'.format(t, cngf), nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False))
main.add_module('extra-layers-{0}:{1}:relu'.format(t, cngf), nn.ReLU(True))
main.add_module('final:{0}-{1}:convt'.format(cngf, nc), nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))
main.add_module('final:{0}:tanh'.format(nc), nn.Softmax())
self.main = main
def forward(self, input):
if (isinstance(input.data, torch.cuda.FloatTensor) and (self.ngpu > 1)):
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output |
class Opinion():
def __init__(self, source=None, target=None, polar_expression=None, polarity=None, intensity=None):
self.source = source
self.target = target
self.polar_expression = polar_expression
self.polarity = polarity
self.intensity = intensity
def normalize_polarity(self, polarity):
if ('positive' in polarity.lower()):
return 'Positive'
if ('negative' in polarity.lower()):
return 'Negative'
if ('neutral' in polarity.lower()):
return 'Neutral'
if ('both' in polarity.lower()):
return 'Neutral'
def normalize_intensity(self, intensity):
if ('high' in intensity):
return 'Strong'
if ('extreme' in intensity):
return 'Strong'
if ('low' in intensity):
return 'Weak'
if ('medium' in intensity):
return 'Average'
if ('neutral' in intensity):
return 'Average'
if ('Standard' in intensity):
return 'Average'
def to_dict(self):
opinion_dict = {'Source': self.source.to_dict(), 'Target': self.target.to_dict(), 'Polar_expression': self.polar_expression.to_dict(), 'Polarity': self.normalize_polarity(self.polarity), 'Intensity': self.normalize_intensity(self.intensity)}
return opinion_dict |
def validate_data(run_id: int, flow_name: str, data_paths: dict):
context = ge.data_context.DataContext()
for (data_name, data_path) in data_paths.items():
data = pd.read_parquet(data_path, engine='pyarrow')
context.run_checkpoint(checkpoint_name='intent_checkpoint', batch_request={'datasource_name': 's3_parquet', 'data_connector_name': 'runtime_data_connector', 'data_asset_name': data_name, 'runtime_parameters': {'batch_data': data}, 'batch_identifiers': {'run_id': run_id, 'data_name': data_name}}, run_name='-'.join([flow_name, str(run_id), data_name]), run_time=datetime.utcnow(), expectation_suite_name=data_name)
context.build_data_docs()
context.open_data_docs() |
def write_schema_locs(schema_loc, name, dataset_loc):
with open(os.path.join(dataset_loc, ('%s/%s_encode.txt' % (name, name))), 'r') as f:
lines = f.readlines()
with open(os.path.join(dataset_loc, ('%s/%s_schema_locations.txt' % (name, name))), 'w') as f:
for l in lines:
f.write((schema_loc + '\n')) |
class CWRUSlice(object):
num_classes = 10
inputchannel = 1
def __init__(self, data_dir, normlizetype):
self.data_dir = data_dir
self.normlizetype = normlizetype
def data_preprare(self, test=False):
if (len(os.path.basename(self.data_dir).split('.')) == 2):
with open(self.data_dir, 'rb') as fo:
list_data = pickle.load(fo, encoding='bytes')
else:
list_data = get_files(self.data_dir, test)
with open(os.path.join(self.data_dir, 'CWRUSlice.pkl'), 'wb') as fo:
pickle.dump(list_data, fo)
if test:
test_dataset = dataset(list_data=list_data, test=True, transform=None)
return test_dataset
else:
data_pd = pd.DataFrame({'data': list_data[0], 'label': list_data[1]})
(train_pd, val_pd) = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd['label'])
train_dataset = dataset(list_data=train_pd, transform=data_transforms('train', self.normlizetype))
val_dataset = dataset(list_data=val_pd, transform=data_transforms('val', self.normlizetype))
return (train_dataset, val_dataset) |
def _conv_layer(input, weights, bias, pad, stride, i, ops, net):
pad = pad[0]
stride = stride[0]
input = tf.pad(input, [[0, 0], [pad[0], pad[1]], [pad[2], pad[3]], [0, 0]], 'CONSTANT')
w = tf.Variable(weights, name=('w' + str(i)), dtype='float32')
b = tf.Variable(bias, name=('bias' + str(i)), dtype='float32')
ops.append(w)
ops.append(b)
net[('weights' + str(i))] = w
net[('b' + str(i))] = b
conv = tf.nn.conv2d(input, w, strides=[1, stride[0], stride[1], 1], padding='VALID', name=('conv' + str(i)))
return tf.nn.bias_add(conv, b, name=('add' + str(i))) |
class MysqlDb(object):
def __init__(self):
self._host = global_settings.mysql_host
self._port = global_settings.mysql_port
self._db = global_settings.mysql_db
self._user = global_settings.mysql_user
self._passwd = global_settings.mysql_password
self._charset = 'utf8'
self._connect()
def _connect(self):
self._conn = connect(host=self._host, port=self._port, user=self._user, passwd=self._passwd, db=self._db, charset=self._charset, cursorclass=cursors.DictCursor)
self._cursor = self._conn.cursor()
def _set_db(self, db):
self._close()
self._db = db
self._connect()
def _close(self):
self._cursor.close()
self._conn.close()
def transaction(self):
try:
(yield)
self._conn.commit()
except Exception as e:
self._conn.rollback()
raise e
def fetch_one(self, sql, params=None):
escape_sql = escape_string(sql)
print(f'escape sql: {escape_sql}')
self._cursor.execute(escape_sql, params)
return self._cursor.fetchone()
def fetch_all(self, sql, params=None):
escape_sql = escape_string(sql)
print(f'escape sql: {escape_sql}')
self._cursor.execute(escape_sql, params)
return self._cursor.fetchall()
def insert(self, sql, params):
return self._edit(sql, params)
def update(self, sql, params):
return self._edit(sql, params)
def delete(self, sql, params):
return self._edit(sql, params)
def _edit(self, sql, params):
return self._cursor.execute(sql, params) |
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'hit/csrc')
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))
source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu'))
sources = (main_file + source_cpu)
extension = CppExtension
extra_compile_args = {'cxx': []}
define_macros = []
if ((torch.cuda.is_available() and (CUDA_HOME is not None)) or (os.getenv('FORCE_CUDA', '0') == '1')):
extension = CUDAExtension
sources += source_cuda
define_macros += [('WITH_CUDA', None)]
extra_compile_args['nvcc'] = ['-O3', '-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [extension('hit._custom_cuda_ext', sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args), make_cython_ext(name='soft_nms_cpu', module='detector.nms', sources=['src/soft_nms_cpu.pyx']), make_cuda_ext(name='nms_cpu', module='detector.nms', sources=['src/nms_cpu.cpp']), make_cuda_ext(name='nms_cuda', module='detector.nms', sources=['src/nms_cuda.cpp', 'src/nms_kernel.cu'])]
return ext_modules |
class AllreduceAutoScalerTest(unittest.TestCase):
def setUp(self) -> None:
mock_k8s_client()
def test_execute_job_optimization_plan(self):
params = MockK8sAllreduceJobArgs()
params.initilize()
manager = create_job_manager(params, SpeedMonitor())
manager._init_nodes()
for worker in manager._job_nodes[NodeType.WORKER].values():
worker.status = NodeStatus.RUNNING
manager._scaler.scale = mock.MagicMock(return_value=True)
auto_scaler = AllreduceTrainingAutoScaler(manager._job_resource, manager._job_nodes, manager._job_optimizer, manager._speed_monitor, manager._worker_manager, manager._scaler)
alive_num = auto_scaler._get_alive_worker_num()
self.assertEqual(alive_num, 16) |
class SingleFeatureExtractor():
def __init__(self, model: UNet, feature_name: str) -> None:
super().__init__()
self._model = model
self._feature_name = feature_name
assert (self._feature_name in model.arch_elements), self._feature_name
self._feature_extractor: _FeatureCollector = None
self._hook_handler = None
self.__bind_done__ = False
def bind(self):
logger.opt(depth=3).trace(f'Binding {self.__class__.__name__}{self._feature_name}')
model = self._model
extractor = _FeatureCollector()
handler = getattr(model, ('_' + self._feature_name)).register_forward_hook(extractor)
self._feature_extractor = extractor
self._hook_handler = handler
self.__bind_done__ = True
def remove(self):
logger.opt(depth=3).trace(f'Remove {self.__class__.__name__}{self._feature_name}')
self._hook_handler.remove()
self.__bind_done__ = False
def __enter__(self):
self.bind()
return self
def __exit__(self, *args, **kwargs):
self.remove()
def clear(self):
self._feature_extractor.clear()
def feature(self):
collected_feature_dict = self._feature_extractor.feature
if (len(collected_feature_dict) > 0):
return torch.cat(list(collected_feature_dict.values()), dim=0)
raise RuntimeError('no feature has been recorded.')
def set_enable(self, enable=True):
self._feature_extractor.set_enable(enable=enable)
def enable_register(self, enable=True):
prev_state = self._feature_extractor.enable
logger.opt(depth=3).trace(f"{('enable' if enable else 'disable')} recording")
self.set_enable(enable)
(yield)
logger.opt(depth=3).trace(f'restore previous recording status')
self.set_enable(prev_state) |
_config
def model_lifelong_finetune_cifar():
n_channels_out = 3
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'GenericSidetuneNetwork', 'base_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'base_class': 'ResnetiCifar44NoLinear', 'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', 'base_kwargs': {'eval_only': False}, 'use_baked_encoding': False, 'side_class': 'FCN4Reshaped', 'side_kwargs': {'eval_only': False}, 'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth'}, 'use_baked_encoding': False, 'transfer_class': 'nn.Linear', 'transfer_kwargs': {'in_features': 64, 'out_features': 10}}}}
del n_channels_out |
def initialise_structure_sim():
M_pos = 1.0
M_neg = (- 1.0)
cube_pos_width = 200
cube_neg_width = 200
sim_name = 'structure'
return (M_pos, M_neg, cube_pos_width, cube_neg_width, sim_name) |
def cosine_distance(a, b):
norm_a = tf.nn.l2_normalize(a, axis=1)
norm_b = tf.nn.l2_normalize(b, axis=1)
prod = tf.matmul(norm_a, norm_b, adjoint_b=True)
return (1 - prod) |
def test_freq_in_Gyr():
(vofid, rofid) = (200.0, 8.0)
assert (numpy.fabs((((2.0 * conversion.freq_in_Gyr(vofid, rofid)) / conversion.freq_in_Gyr((2.0 * vofid), rofid)) - 1.0)) < (10.0 ** (- 10.0))), 'freq_in_Gyr did not work as expected'
assert (numpy.fabs((((0.5 * conversion.freq_in_Gyr(vofid, rofid)) / conversion.freq_in_Gyr(vofid, (2 * rofid))) - 1.0)) < (10.0 ** (- 10.0))), 'freq_in_Gyr did not work as expected'
return None |
def dedup(lst):
new_lst = []
for item in lst:
if (not item):
continue
elif (item in new_lst):
continue
else:
new_lst.append(item)
return new_lst |
class FaceBox(nn.Module):
input_size = 1024
def __init__(self):
super(FaceBox, self).__init__()
self.conv1 = nn.Conv2d(3, 24, kernel_size=7, stride=4, padding=3)
self.conv1 = init_model(self.conv1)
self.bn1 = nn.BatchNorm2d(24)
self.bn1 = init_model(self.bn1)
self.conv2 = nn.Conv2d(48, 64, kernel_size=5, stride=2, padding=2)
self.conv2 = init_model(self.conv2)
self.bn2 = nn.BatchNorm2d(64)
self.bn2 = init_model(self.bn2)
self.inception1 = Inception()
self.inception2 = Inception()
self.inception3 = Inception()
self.conv3_1 = conv_bn_relu(128, 128, kernel_size=1)
self.conv3_2 = conv_bn_relu(128, 256, kernel_size=3, stride=2, padding=1)
self.conv4_1 = conv_bn_relu(256, 128, kernel_size=1)
self.conv4_2 = conv_bn_relu(128, 256, kernel_size=3, stride=2, padding=1)
self.multilbox = MultiBoxLayer()
def forward(self, x):
hs = []
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(torch.cat((F.relu(x), F.relu((- x))), 1))
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
x = self.conv2(x)
x = self.bn2(x)
x = F.relu(torch.cat((F.relu(x), F.relu((- x))), 1))
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
x = self.inception1(x)
x = self.inception2(x)
x = self.inception3(x)
hs.append(x)
x = self.conv3_1(x)
x = self.conv3_2(x)
hs.append(x)
x = self.conv4_1(x)
x = self.conv4_2(x)
hs.append(x)
(loc_preds, conf_preds) = self.multilbox(hs)
return (loc_preds, conf_preds) |
def parse_args_and_arch(parser: argparse.ArgumentParser, input_args: List[str]=None, parse_known: bool=False, suppress_defaults: bool=False, modify_parser: Optional[Callable[([argparse.ArgumentParser], None)]]=None):
if suppress_defaults:
args = parse_args_and_arch(parser, input_args=input_args, parse_known=parse_known, suppress_defaults=False)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for (k, v) in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(**{k: v for (k, v) in vars(args).items() if (v is not None)})
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument('--user-dir', default=None)
(usr_args, _) = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if (modify_parser is not None):
modify_parser(parser)
(args, _) = parser.parse_known_args(input_args)
if hasattr(args, 'arch'):
model_specific_group = parser.add_argument_group('Model-specific configuration', argument_default=argparse.SUPPRESS)
if (args.arch in ARCH_MODEL_REGISTRY):
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif (args.arch in MODEL_REGISTRY):
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
if hasattr(args, 'task'):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, 'use_bmuf', False):
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
from fairseq.registry import REGISTRIES
for (registry_name, REGISTRY) in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if (choice is not None):
cls = REGISTRY['registry'][choice]
if hasattr(cls, 'add_args'):
cls.add_args(parser)
elif hasattr(cls, '__dataclass'):
gen_parser_from_dataclass(parser, cls.__dataclass())
if (modify_parser is not None):
modify_parser(parser)
if parse_known:
(args, extra) = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
if ((hasattr(args, 'batch_size_valid') and (args.batch_size_valid is None)) or (not hasattr(args, 'batch_size_valid'))):
args.batch_size_valid = args.batch_size
if (hasattr(args, 'max_tokens_valid') and (args.max_tokens_valid is None)):
args.max_tokens_valid = args.max_tokens
if getattr(args, 'memory_efficient_fp16', False):
args.fp16 = True
if getattr(args, 'memory_efficient_bf16', False):
args.bf16 = True
args.tpu = getattr(args, 'tpu', False)
args.bf16 = getattr(args, 'bf16', False)
if args.bf16:
args.tpu = True
if (args.tpu and args.fp16):
raise ValueError('Cannot combine --fp16 and --tpu, use --bf16 on TPUs')
if (getattr(args, 'seed', None) is None):
args.seed = 1
args.no_seed_provided = True
else:
args.no_seed_provided = False
if (getattr(args, 'update_epoch_batch_itr', None) is None):
if hasattr(args, 'grouped_shuffling'):
args.update_epoch_batch_itr = args.grouped_shuffling
else:
args.grouped_shuffling = False
args.update_epoch_batch_itr = False
if (hasattr(args, 'arch') and (args.arch in ARCH_CONFIG_REGISTRY)):
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return (args, extra)
else:
return args |
def validate_onnx_model(platform, model_file, input_file, mace_out_file, input_names, input_shapes, input_data_formats, output_names, output_shapes, output_data_formats, validation_threshold, input_data_types, backend, log_file):
print('validate on onnxruntime.')
import onnx
import onnxruntime as onnxrt
if (not os.path.isfile(model_file)):
util.MaceLogger.error(VALIDATION_MODULE, (("Input graph file '" + model_file) + "' does not exist!"))
model = onnx.load(model_file)
remove_initializer_from_input(model)
model_outputs = set()
for output in model.graph.output:
model_outputs.add(output.name)
for output_name in output_names:
if (output_name not in model_outputs):
layer_value_info = onnx.helper.ValueInfoProto()
layer_value_info.name = output_name
model.graph.output.append(layer_value_info)
input_dict = {}
for i in range(len(input_names)):
input_value = load_data(util.formatted_file_name(input_file, input_names[i]), input_data_types[i])
input_value = input_value.reshape(input_shapes[i])
if ((input_data_formats[i] == DataFormat.NHWC) and (len(input_shapes[i]) == 4)):
input_value = input_value.transpose((0, 3, 1, 2))
input_dict[input_names[i]] = input_value
sess = onnxrt.InferenceSession(model.SerializeToString())
output_values = sess.run(output_names, input_dict)
for i in range(len(output_names)):
value = output_values[i].flatten()
output_file_name = util.formatted_file_name(mace_out_file, output_names[i])
mace_out_value = load_data(output_file_name)
(mace_out_value, real_output_shape, real_output_data_format) = get_real_out_value_shape_df(platform, mace_out_value, output_shapes[i], output_data_formats[i])
compare_output(output_names[i], mace_out_value, value, validation_threshold, log_file, real_output_shape, real_output_data_format) |
def LotsOfAgents(random_seed):
return FlatlandConfig(height=40, width=60, n_agents=20, n_cities=6, grid_distribution_of_cities=False, max_rails_between_cities=2, max_rail_in_cities=4, observation_builder_config=None, reward_config=None, malfunction_rate=(1.0 / 200), random_seed=random_seed, greedy=True) |
def kernel_divergence(, x, , y, k=('energy', None), heatmaps=None, **params):
ma_x = conv(k, x, x, )
mb_x = conv(k, x, y, )
mb_y = conv(k, y, y, )
cost = (0.5 * (scal(, (ma_x - (2 * mb_x))) + scal(, mb_y)))
if (heatmaps is None):
return cost
elif (heatmaps == True):
global grid
grid = grid.type_as(x)
heats = Heatmaps((conv(k, grid, x, ) - conv(k, grid, y, )))
return (cost, heats)
else:
return (cost, None) |
def euclidean_distance(a, b):
(N, D) = (tf.shape(a)[0], tf.shape(a)[1])
M = tf.shape(b)[0]
a = tf.tile(tf.expand_dims(a, axis=1), (1, M, 1))
b = tf.tile(tf.expand_dims(b, axis=0), (N, 1, 1))
return tf.reduce_mean(tf.square((a - b)), axis=2) |
def syntactic_feature_processor(sentence_objs, feature, **kwArgs):
nr = globals()[feature.title()](sentence_objs)
return nr.handle() |
class simulator():
def __init__(self):
a = np.linspace((- 2), 2, 11)
self.X = np.array(list(product(a, a)))
self.t = vlmop2_minus(self.X)
def __call__(self, action):
return self.t[action] |
_module()
class HRFDataset(CustomDataset):
CLASSES = ('background', 'vessel')
PALETTE = [[120, 120, 120], [6, 230, 230]]
def __init__(self, **kwargs):
super(HRFDataset, self).__init__(img_suffix='.png', seg_map_suffix='.png', reduce_zero_label=False, **kwargs)
assert self.file_client.exists(self.img_dir) |
class FilenameIterator(object):
def __init__(self, dirname, batch_size):
self.dirname = dirname
self.batch_size = batch_size
self.lock = threading.Lock()
self.files = list({filename[:(- 4)] for filename in os.listdir(dirname)})
self.i = 0
def __iter__(self):
return self
def next(self):
with self.lock:
if (self.i == len(self.files)):
self.i = 0
batch = self.files[self.i:(self.i + self.batch_size)]
if (len(batch) < self.batch_size):
self.i = 0
else:
self.i += self.batch_size
return batch |
class TurtlebotNode(object):
_SENSOR_READ_RETRY_COUNT = 5
def __init__(self, default_port='/dev/ttyUSB0', default_update_rate=30.0):
self.default_port = default_port
self.default_update_rate = default_update_rate
self.robot = Turtlebot()
self.sensor_handler = None
self.sensor_state = TurtlebotSensorState()
self.req_cmd_vel = None
rospy.init_node('turtlebot')
self._init_params()
self._init_pubsub()
self._pos2d = Pose2D()
self._diagnostics = TurtlebotDiagnostics()
if self.has_gyro:
from create_node.gyro import TurtlebotGyro
self._gyro = TurtlebotGyro()
else:
self._gyro = None
dynamic_reconfigure.server.Server(TurtleBotConfig, self.reconfigure)
def start(self):
log_once = True
while (not rospy.is_shutdown()):
try:
self.robot.start(self.port, robot_types.ROBOT_TYPES[self.robot_type].baudrate)
break
except serial.serialutil.SerialException as ex:
msg = ('Failed to open port %s. Error: %s Please make sure the Create cable is plugged into the computer. \n' % (self.port, ex.message))
self._diagnostics.node_status(msg, 'error')
if log_once:
log_once = False
rospy.logerr(msg)
else:
sys.stderr.write(msg)
time.sleep(3.0)
self.sensor_handler = robot_types.ROBOT_TYPES[self.robot_type].sensor_handler(self.robot)
self.robot.safe = True
if rospy.get_param('~bonus', False):
bonus(self.robot)
self.robot.control()
with open(connected_file(), 'w') as f:
f.write('1')
s = TurtlebotSensorState()
try:
self.sense(s)
except Exception:
pass
def _init_params(self):
self.port = rospy.get_param('~port', self.default_port)
self.robot_type = rospy.get_param('~robot_type', 'create')
self.update_rate = rospy.get_param('~update_rate', self.default_update_rate)
self.drive_mode = rospy.get_param('~drive_mode', 'twist')
self.has_gyro = rospy.get_param('~has_gyro', True)
self.odom_angular_scale_correction = rospy.get_param('~odom_angular_scale_correction', 1.0)
self.odom_linear_scale_correction = rospy.get_param('~odom_linear_scale_correction', 1.0)
self.cmd_vel_timeout = rospy.Duration(rospy.get_param('~cmd_vel_timeout', 0.6))
self.stop_motors_on_bump = rospy.get_param('~stop_motors_on_bump', True)
self.min_abs_yaw_vel = rospy.get_param('~min_abs_yaw_vel', None)
self.max_abs_yaw_vel = rospy.get_param('~max_abs_yaw_vel', None)
self.publish_tf = rospy.get_param('~publish_tf', False)
self.odom_frame = rospy.get_param('~odom_frame', 'odom')
self.base_frame = rospy.get_param('~base_frame', 'base_footprint')
self.operate_mode = rospy.get_param('~operation_mode', 3)
rospy.loginfo(('serial port: %s' % self.port))
rospy.loginfo(('update_rate: %s' % self.update_rate))
rospy.loginfo(('drive mode: %s' % self.drive_mode))
rospy.loginfo(('has gyro: %s' % self.has_gyro))
def _init_pubsub(self):
self.joint_states_pub = rospy.Publisher('joint_states', JointState, queue_size=10)
self.odom_pub = rospy.Publisher('odom', Odometry, queue_size=10)
self.sensor_state_pub = rospy.Publisher('~sensor_state', TurtlebotSensorState, queue_size=10)
self.sensor_state_map_pub = rospy.Publisher('map_time_stamp', TurtlebotSensorState, queue_size=10)
self.operating_mode_srv = rospy.Service('~set_operation_mode', SetTurtlebotMode, self.set_operation_mode)
self.digital_output_srv = rospy.Service('~set_digital_outputs', SetDigitalOutputs, self.set_digital_outputs)
if (self.drive_mode == 'twist'):
self.cmd_vel_sub = rospy.Subscriber('cmd_vel', Twist, self.cmd_vel)
self.drive_cmd = self.robot.direct_drive
elif (self.drive_mode == 'drive'):
self.cmd_vel_sub = rospy.Subscriber('cmd_vel', Drive, self.cmd_vel)
self.drive_cmd = self.robot.drive
elif (self.drive_mode == 'turtle'):
self.cmd_vel_sub = rospy.Subscriber('cmd_vel', Turtle, self.cmd_vel)
self.drive_cmd = self.robot.direct_drive
else:
rospy.logerr(('unknown drive mode :%s' % self.drive_mode))
self.transform_broadcaster = None
if self.publish_tf:
self.transform_broadcaster = tf.TransformBroadcaster()
def reconfigure(self, config, level):
self.update_rate = config['update_rate']
self.drive_mode = config['drive_mode']
self.has_gyro = config['has_gyro']
if self.has_gyro:
self._gyro.reconfigure(config, level)
self.odom_angular_scale_correction = config['odom_angular_scale_correction']
self.odom_linear_scale_correction = config['odom_linear_scale_correction']
self.cmd_vel_timeout = rospy.Duration(config['cmd_vel_timeout'])
self.stop_motors_on_bump = config['stop_motors_on_bump']
self.min_abs_yaw_vel = config['min_abs_yaw_vel']
self.max_abs_yaw_vel = config['max_abs_yaw_vel']
return config
def cmd_vel(self, msg):
if ((self.min_abs_yaw_vel is not None) and (msg.angular.z != 0.0) and (abs(msg.angular.z) < self.min_abs_yaw_vel)):
msg.angular.z = (self.min_abs_yaw_vel if (msg.angular.z > 0.0) else (- self.min_abs_yaw_vel))
if ((self.max_abs_yaw_vel is not None) and (self.max_abs_yaw_vel > 0.0) and (msg.angular.z != 0.0) and (abs(msg.angular.z) > self.max_abs_yaw_vel)):
msg.angular.z = (self.max_abs_yaw_vel if (msg.angular.z > 0.0) else (- self.max_abs_yaw_vel))
if (self.drive_mode == 'twist'):
ts = (msg.linear.x * 1000)
tw = ((msg.angular.z * (robot_types.ROBOT_TYPES[self.robot_type].wheel_separation / 2)) * 1000)
if (ts > 0):
ts = min(ts, (MAX_WHEEL_SPEED - abs(tw)))
else:
ts = max(ts, (- (MAX_WHEEL_SPEED - abs(tw))))
self.req_cmd_vel = (int((ts - tw)), int((ts + tw)))
elif (self.drive_mode == 'turtle'):
ts = (msg.linear * 1000)
tw = ((msg.angular * (robot_types.ROBOT_TYPES[self.robot_type].wheel_separation / 2)) * 1000)
self.req_cmd_vel = (int((ts - tw)), int((ts + tw)))
elif (self.drive_mode == 'drive'):
self.req_cmd_vel = ((msg.velocity * 1000), (msg.radius * 1000))
def set_operation_mode(self, req):
if (not self.robot.sci):
rospy.logwarn('Create : robot not connected yet, sci not available')
return SetTurtlebotModeResponse(False)
self.operate_mode = req.mode
if (req.mode == 1):
self._robot_run_passive()
elif (req.mode == 2):
self._robot_run_safe()
elif (req.mode == 3):
self._robot_run_full()
else:
rospy.logerr('Requested an invalid mode.')
return SetTurtlebotModeResponse(False)
return SetTurtlebotModeResponse(True)
def _robot_run_passive(self):
rospy.loginfo('Setting turtlebot to passive mode.')
self._set_digital_outputs([0, 0, 0])
self.robot.passive()
def _robot_reboot(self):
msg = 'Soft-rebooting turtlebot to passive mode.'
rospy.logdebug(msg)
self._diagnostics.node_status(msg, 'warn')
self._set_digital_outputs([0, 0, 0])
self.robot.soft_reset()
time.sleep(2.0)
def _robot_run_safe(self):
rospy.loginfo('Setting turtlebot to safe mode.')
self.robot.safe = True
self.robot.control()
b1 = ((self.sensor_state.user_digital_inputs & 2) / 2)
b2 = ((self.sensor_state.user_digital_inputs & 4) / 4)
self._set_digital_outputs([1, b1, b2])
def _robot_run_full(self):
rospy.loginfo('Setting turtlebot to full mode.')
self.robot.safe = False
self.robot.control()
b1 = ((self.sensor_state.user_digital_inputs & 2) / 2)
b2 = ((self.sensor_state.user_digital_inputs & 4) / 4)
self._set_digital_outputs([1, b1, b2])
def _set_digital_outputs(self, outputs):
assert (len(outputs) == 3), 'Expecting 3 output states.'
byte = 0
for (output, state) in enumerate(outputs):
byte += ((2 ** output) * int(state))
self.robot.set_digital_outputs(byte)
self.sensor_state.user_digital_outputs = byte
def set_digital_outputs(self, req):
if (not self.robot.sci):
raise Exception('Robot not connected, SCI not available')
outputs = [req.digital_out_0, req.digital_out_1, req.digital_out_2]
self._set_digital_outputs(outputs)
return SetDigitalOutputsResponse(True)
def sense(self, sensor_state):
self.sensor_handler.get_all(sensor_state)
if self._gyro:
self._gyro.update_calibration(sensor_state)
def spin(self):
s = self.sensor_state
odom = Odometry(header=rospy.Header(frame_id=self.odom_frame), child_frame_id=self.base_frame)
js = JointState(name=['left_wheel_joint', 'right_wheel_joint', 'front_castor_joint', 'back_castor_joint'], position=[0, 0, 0, 0], velocity=[0, 0, 0, 0], effort=[0, 0, 0, 0])
r = rospy.Rate(self.update_rate)
last_cmd_vel = (0, 0)
last_cmd_vel_time = rospy.get_rostime()
last_js_time = rospy.Time(0)
sensor_read_retry_count = 0
while (not rospy.is_shutdown()):
last_time = s.header.stamp
curr_time = rospy.get_rostime()
try:
self.sense(s)
transform = self.compute_odom(s, last_time, odom)
js.header.stamp = (curr_time + rospy.Duration(1))
except select.error:
continue
except DriverError:
if (sensor_read_retry_count > 0):
rospy.logwarn(('Failed to read sensor package. %d retries left.' % sensor_read_retry_count))
sensor_read_retry_count -= 1
continue
else:
raise
sensor_read_retry_count = self._SENSOR_READ_RETRY_COUNT
if ((s.charging_sources_available > 0) and (s.oi_mode == 1) and (s.charging_state in [0, 5]) and (s.charge < (0.93 * s.capacity))):
rospy.loginfo('going into soft-reboot and exiting driver')
self._robot_reboot()
rospy.loginfo('exiting driver')
break
if ((s.charging_sources_available > 0) and (s.oi_mode == 3) and (s.charging_state in [0, 5]) and (s.charge < (0.15 * s.capacity))):
rospy.loginfo('going into soft-reboot and exiting driver')
self._robot_reboot()
rospy.loginfo('exiting driver')
break
self.sensor_state_pub.publish(s)
self.sensor_state_map_pub.publish(s)
self.odom_pub.publish(odom)
if self.publish_tf:
self.publish_odometry_transform(odom)
if (curr_time > (last_js_time + rospy.Duration(1))):
self.joint_states_pub.publish(js)
last_js_time = curr_time
self._diagnostics.publish(s, self._gyro)
if self._gyro:
self._gyro.publish(s, last_time)
if (self.req_cmd_vel is not None):
if ((s.oi_mode != self.operate_mode) and (s.charging_sources_available != 1)):
if (self.operate_mode == 2):
self._robot_run_safe()
else:
self._robot_run_full()
req_cmd_vel = self.check_bumpers(s, self.req_cmd_vel)
self.req_cmd_vel = None
last_cmd_vel_time = last_time
else:
if ((last_time - last_cmd_vel_time) > self.cmd_vel_timeout):
last_cmd_vel = (0, 0)
req_cmd_vel = self.check_bumpers(s, last_cmd_vel)
self.drive_cmd(*req_cmd_vel)
last_cmd_vel = req_cmd_vel
r.sleep()
def check_bumpers(self, s, cmd_vel):
forward = ((cmd_vel[0] + cmd_vel[1]) > 0)
if (self.stop_motors_on_bump and (s.bumps_wheeldrops > 0) and forward):
return (0, 0)
else:
return cmd_vel
def compute_odom(self, sensor_state, last_time, odom):
current_time = sensor_state.header.stamp
dt = (current_time - last_time).to_sec()
if ((abs(sensor_state.distance) > 1.0) or (abs(sensor_state.angle) > 1.0)):
raise Exception(('Distance, angle displacement too big, invalid readings from robot. Distance: %.2f, Angle: %.2f' % (sensor_state.distance, sensor_state.angle)))
d = (sensor_state.distance * self.odom_linear_scale_correction)
angle = (sensor_state.angle * self.odom_angular_scale_correction)
x = (cos(angle) * d)
y = ((- sin(angle)) * d)
last_angle = self._pos2d.theta
self._pos2d.x += ((cos(last_angle) * x) - (sin(last_angle) * y))
self._pos2d.y += ((sin(last_angle) * x) + (cos(last_angle) * y))
self._pos2d.theta += angle
odom_quat = (0.0, 0.0, sin((self._pos2d.theta / 2.0)), cos((self._pos2d.theta / 2.0)))
transform = ((self._pos2d.x, self._pos2d.y, 0.0), odom_quat)
odom.header.stamp = current_time
odom.pose.pose = Pose(Point(self._pos2d.x, self._pos2d.y, 0.0), Quaternion(*odom_quat))
odom.twist.twist = Twist(Vector3((d / dt), 0, 0), Vector3(0, 0, (angle / dt)))
if ((sensor_state.requested_right_velocity == 0) and (sensor_state.requested_left_velocity == 0) and (sensor_state.distance == 0)):
odom.pose.covariance = ODOM_POSE_COVARIANCE2
odom.twist.covariance = ODOM_TWIST_COVARIANCE2
else:
odom.pose.covariance = ODOM_POSE_COVARIANCE
odom.twist.covariance = ODOM_TWIST_COVARIANCE
return transform
def publish_odometry_transform(self, odometry):
self.transform_broadcaster.sendTransform((odometry.pose.pose.position.x, odometry.pose.pose.position.y, odometry.pose.pose.position.z), (odometry.pose.pose.orientation.x, odometry.pose.pose.orientation.y, odometry.pose.pose.orientation.z, odometry.pose.pose.orientation.w), odometry.header.stamp, odometry.child_frame_id, odometry.header.frame_id) |
def find_masks_best_matching_each_gt_mask(masks, n_gt_masks):
ious = []
for i in range(n_gt_masks):
candidates = [x[1] for x in masks if (x[0] == i)]
if (len(candidates) > 0):
idx = np.argmax(candidates)
ious.append(candidates[idx])
else:
ious.append(0)
return ious |
class TFRemBertForMultipleChoice(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def fragment_by_mmpa(mol, mol_name, mol_smiles, min_cuts, max_cuts, min_frag_size, min_link_size):
mmpa_results = []
for i in range(min_cuts, (max_cuts + 1)):
mmpa_results += FragmentMol(mol, minCuts=i, maxCuts=i, maxCutBonds=100, pattern='[#6+0;!$(*=,#[!#6])]!!=!#[*]', resultsAsMols=False)
filtered_mmpa_results = []
for (linker_smiles, fragments_smiles) in mmpa_results:
if (check_mmpa_linker(linker_smiles, min_link_size) and check_mmpa_fragments(fragments_smiles, min_frag_size)):
filtered_mmpa_results.append([mol_name, mol_smiles, linker_smiles, fragments_smiles, 'mmpa'])
return filtered_mmpa_results |
def test_handle_waiters():
run_cell('a = 1')
run_cell('b = 2 * a')
run_cell('a = 2')
run_cell('logging.info(b)')
run_cell('logging.info(b)')
deps = set(compute_unparsed_slice(4).keys())
assert (deps == {1, 2, 4}), ('got %s' % deps)
slice_size = num_stmts_in_slice(4)
assert (slice_size == 3), ('got %d' % slice_size) |
class Map(dict):
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for (k, v) in arg.items():
self[k] = v
if kwargs:
for (k, v) in kwargs.iteritems():
self[k] = v
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Map, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Map, self).__delitem__(key)
del self.__dict__[key] |
class SimpleNetBN2D(SimpleNet2):
NORM_TYPE = 'BN'
CHANNELS = [None, 32, 64, 128, 256]
TR_CHANNELS = [None, 32, 64, 64, 128] |
class MIGTConfig(ModelConfig):
n_embeddings: int = 1024
n_head: int = 12
d_model: int = 768
dropout: float = 0.1
n_layer: int = 12
weight_decay: float = 0.01
label_smoothing: float = 0.0
learning_rate: float = 0.00064
batch_size: int = 64
gradient_clip_val: float = 0.0
sequence_size: int = 20
token_image_size: int = 8
total_steps: int = 300000
n_loss_skip: int = 4
augment_poses: Literal[('no', 'relative', 'simple', 'advanced')] = 'relative'
use_dynamic_pose_loss: bool = False
localization_weight: Schedule = Schedule.from_str('1')
image_generation_weight: float = 1.0
pose_multiplier: float = 1.0
random_pose_multiplier: float = 1.0
def model_type(self):
return 'transformer' |
class RandomForestComponentTest(BaseRegressionComponentTest):
__test__ = True
res = dict()
res['default_boston'] = 0.
res['boston_n_calls'] = 9
res['default_boston_iterative'] = 0.
res['default_boston_sparse'] = 0.
res['default_boston_iterative_sparse'] = 0.
res['default_diabetes'] = 0.
res['diabetes_n_calls'] = 9
res['default_diabetes_iterative'] = 0.
res['default_diabetes_sparse'] = 0.
res['default_diabetes_iterative_sparse'] = 0.
sk_mod = sklearn.ensemble.RandomForestRegressor
module = RandomForest
step_hyperparameter = {'name': 'n_estimators', 'value': module.get_max_iter()} |
def schedule(blocknum, blocksize, totalsize):
if (totalsize == 0):
percent = 0
else:
percent = (min(1.0, ((blocknum * blocksize) / totalsize)) * 100)
progressbar(percent) |
def query_cot(data: dict, key: str, cot_temperature: float, backbone: str):
query_message = get_cot_prompt(data)
if (backbone == 'gpt4'):
model_name = 'gpt-4'
elif (backbone == 'chatgpt'):
model_name = 'gpt-3.5-turbo'
start_time = time.time()
completions = []
while True:
try:
cot_solution = openai.ChatCompletion.create(api_key=key, model=model_name, max_tokens=500, stop='\n\n\n', messages=query_message, temperature=cot_temperature, top_p=1.0, n=1)
except Exception as e:
cot_solution = None
if (cot_solution is not None):
completions.extend([choice['message']['content'] for choice in cot_solution['choices']])
completions = completions[:1]
return completions
else:
sleep_time = random.uniform(3, 5)
time.sleep(sleep_time)
if ((time.time() - start_time) > 60):
return None |
def make_plot(ax, s, color, label, alpha=1.0, lw=1.0):
xvals = pd.to_datetime((((10000 * 2020) + (100 * s.index.month)) + s.index.day).astype(str))
ax.plot(xvals, s, label=label, color=color, alpha=alpha, lw=lw) |
def get_pydot_graph(caffe_net):
pydot_graph = pydot.Dot(caffe_net.name, graph_type='digraph', rankdir='BT')
pydot_nodes = {}
pydot_edges = []
d = get_enum_name_by_value()
for layer in caffe_net.layers:
name = layer.name
layertype = d[layer.type]
if ((len(layer.bottom) == 1) and (len(layer.top) == 1) and (layer.bottom[0] == layer.top[0])):
pydot_nodes[((name + '_') + layertype)] = pydot.Node(('%s (%s)' % (name, layertype)), **NEURON_LAYER_STYLE)
else:
pydot_nodes[((name + '_') + layertype)] = pydot.Node(('%s (%s)' % (name, layertype)), **LAYER_STYLE)
for bottom_blob in layer.bottom:
pydot_nodes[(bottom_blob + '_blob')] = pydot.Node(('%s' % bottom_blob), **BLOB_STYLE)
pydot_edges.append(((bottom_blob + '_blob'), ((name + '_') + layertype)))
for top_blob in layer.top:
pydot_nodes[(top_blob + '_blob')] = pydot.Node(('%s' % top_blob))
pydot_edges.append((((name + '_') + layertype), (top_blob + '_blob')))
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edge in pydot_edges:
pydot_graph.add_edge(pydot.Edge(pydot_nodes[edge[0]], pydot_nodes[edge[1]]))
return pydot_graph |
def run_infer(batchsize, dtype, x_test, model_file):
print(('Running %s model...' % dtype))
with tf.compat.v1.Session() as sess:
print('load graph')
with tf.io.gfile.GFile(model_file, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
graph_nodes = [n for n in graph_def.node]
names = []
with tf.Graph().as_default() as graph:
for op in graph.get_operations():
print(('Operation Name :' + op.name))
print(('Tensor Stats :' + str(op.values())))
concrete_function = get_concrete_function(graph_def=graph_def, inputs=['input:0'], outputs=['predict:0'], print_graph=True)
batches = x_test.reshape(((x_test.shape[0] // batchsize), batchsize, 224, 224, 3))
totaltime = 0
for batch in batches:
bt = time.time()
_frozen_graph_predictions = concrete_function(input=tf.constant(batch))
et = time.time()
totaltime = (totaltime + (et - bt))
throughput = (x_test.shape[0] / (et - bt))
print('max throughput(fps):', throughput)
times = 100
single_test = x_test[:1]
bt = 0
warmup = 20
for i in range(times):
if (i == warmup):
bt = time.time()
_frozen_graph_predictions = concrete_function(input=tf.constant(single_test))
et = time.time()
latency = (((et - bt) * 1000) / (times - warmup))
print('latency(ms):', latency)
return (throughput, latency) |
def tex_coords(*side, top_only=False, split=False):
result = []
if split:
if top_only:
return side
else:
result += tex_coord(*side, split=split, side_n=1)
result += tex_coord(*side, split=split, side_n=2)
result += tex_coord(*side, split=split, side_n=0)
result += tex_coord(*side, split=split, side_n=0)
result += tex_coord(*side, split=split, side_n=3)
result += tex_coord(*side, split=split, side_n=3)
else:
side = tex_coord(*side)
for _ in range((1 if top_only else 6)):
result.extend(side)
return result |
(jit, static_argnames=('edges', 'node_idx'))
def continuous_input_volatility_prediction_error(attributes: Dict, edges: Edges, node_idx: int) -> Dict:
expected_precision_input = attributes[node_idx]['expected_precision']
value_parent_idx = edges[node_idx].value_parents[0]
noise_prediction_error = (((expected_precision_input / attributes[value_parent_idx]['precision']) + (expected_precision_input * (attributes[node_idx]['temp']['value_prediction_error'] ** 2))) - 1)
attributes[node_idx]['temp']['volatility_prediction_error'] = noise_prediction_error
return attributes |
def run_layers_validate(flags, args, original_conf):
model_name = flags.model_name
original_model_dir = (((flags.output + '/') + original_conf['library_name']) + '/model')
model_dir = '/tmp/micro_run/model'
device.execute(('mkdir -p %s' % model_dir))
device.execute(('cp -p %s/%s.pb %s' % (original_model_dir, model_name, model_dir)))
params_file_path = ('%s/%s.data' % (original_model_dir, model_name))
output_configs = layers_validate.get_layers(model_dir, model_name, flags.layers)
for i in range(len(output_configs)):
sub_model_conf = gen_sub_model_conf(output_configs[i], flags, original_conf)
print(output_configs[i]['model_file_path'])
with open(output_configs[i]['model_file_path'], 'rb') as model_file:
net_def = mace_pb2.NetDef()
net_def.ParseFromString(model_file.read())
with open(params_file_path, 'rb') as params_file:
weights = bytearray(params_file.read())
micro_conf = config_parser.normalize_model_config(sub_model_conf)
MicroConverter(micro_conf, net_def, weights, model_name).gen_code()
build_engine(model_name, micro_conf[ModelKeys.data_type])
for (graph_name, graph_config) in micro_conf[ModelKeys.subgraphs].items():
run_model_with_conf(flags, args, model_name, graph_config) |
class OneWordMLPLabelProbe(Probe):
yaml_tag = '!OneWordMLPLabelProbe'
def __init__(self, args, model_dim, label_space_size, zero_features=False):
print('Constructing OneWordLinearLabelProbe')
super(OneWordMLPLabelProbe, self).__init__()
self.args = args
self.model_dim = model_dim
self.label_space_size = label_space_size
self.linear = nn.Linear(self.model_dim, 100)
self.linear2 = nn.Linear(100, self.label_space_size)
self.print_param_count()
dropout = 0.0
self.dropout = nn.Dropout(p=dropout)
print('Applying dropout {}'.format(dropout))
self.zero_features = zero_features
self.to(args['device'])
def forward(self, batch):
if self.zero_features:
batch = torch.zeros_like(batch)
batch = self.linear(batch)
batch = self.linear2(torch.nn.functional.gelu(batch))
return batch |
_manager.LOSSES.add_module
class SegmentLoss(nn.Module):
def __init__(self, pred_n, target_n, device='cpu'):
super().__init__()
self.init_opts = locals()
self.pred_n = pred_n
self.target_n = target_n
def forward(self, outputs, side):
pred = outputs[self.pred_n.format(side)]
target = outputs[self.target_n.format(side)].to(torch.long).squeeze(1)
loss_map = F.cross_entropy(pred, target, reduction='none')
return loss_map |
def test_fit_function():
(num_classes, train_ds, val_ds) = dataset_generation()
model_default = model_init(num_classes)
history_default = model_default.fit(train_ds, epochs=3, validation_data=val_ds)
if (LooseVersion(tf.__version__) >= LooseVersion('2.10.0')):
return
model_multiprocess = model_init(num_classes)
history_multiprocess = model_multiprocess.fit(train_ds, epochs=3, validation_data=val_ds, num_processes=2, backend='multiprocessing')
assert ((history_default.history['accuracy'][(- 1)] - history_multiprocess.history['accuracy'][(- 1)]) <= 0.1) |
class Discriminator(nn.Module):
def __init__(self, in_nc, nf, norm='bn', activation='lrelu'):
super(Discriminator, self).__init__()
global_model = []
global_model += [B.conv_block(in_nc, nf, 5, stride=2, padding=2, norm=norm, activation=activation), B.conv_block(nf, (2 * nf), 5, stride=2, padding=2, norm=norm, activation=activation), B.conv_block((2 * nf), (4 * nf), 5, stride=2, padding=2, norm=norm, activation=activation), B.conv_block((4 * nf), (8 * nf), 5, stride=2, padding=2, norm=norm, activation=activation), B.conv_block((8 * nf), (8 * nf), 5, stride=2, padding=2, norm=norm, activation=activation), B.conv_block((8 * nf), (8 * nf), 5, stride=2, padding=2, norm=norm, activation=activation)]
self.global_model = nn.Sequential(*global_model)
self.local_fea1 = B.conv_block(in_nc, nf, 5, stride=2, padding=2, norm=norm, activation=activation)
self.local_fea2 = B.conv_block(nf, (2 * nf), 5, stride=2, padding=2, norm=norm, activation=activation)
self.local_fea3 = B.conv_block((2 * nf), (4 * nf), 5, stride=2, padding=2, norm=norm, activation=activation)
self.local_fea4 = B.conv_block((4 * nf), (8 * nf), 5, stride=2, padding=2, norm=norm, activation=activation)
self.local_fea5 = B.conv_block((8 * nf), (8 * nf), 5, stride=2, padding=2, norm=norm, activation=activation)
self.global_classifier = nn.Linear(((512 * 4) * 4), 512)
self.local_classifier = nn.Linear(((512 * 4) * 4), 512)
self.classifier = nn.Sequential(nn.LeakyReLU(0.2), nn.Linear(1024, 1))
def forward(self, x_local, x_global):
out_local_fea1 = self.local_fea1(x_local)
out_local_fea2 = self.local_fea2(out_local_fea1)
out_local_fea3 = self.local_fea3(out_local_fea2)
out_local_fea4 = self.local_fea4(out_local_fea3)
out_local_fea5 = self.local_fea5(out_local_fea4)
out_local = out_local_fea5.view(out_local_fea5.size(0), (- 1))
out_local = self.local_classifier(out_local)
out_global = self.global_model(x_global)
out_global = out_global.view(out_global.size(0), (- 1))
out_global = self.global_classifier(out_global)
out = torch.cat([out_local, out_global], dim=1)
out = self.classifier(out)
return (out, [out_local_fea1, out_local_fea2, out_local_fea3, out_local_fea4, out_local_fea5]) |
class GAProcessor(DataProcessor):
def get_train_examples(self, path):
tf.logging.info(path)
return self._create_examples(self._read_tsv(path), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['A', 'B', 'Neither']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = tokenization.convert_to_unicode(line[0])
text_a = tokenization.convert_to_unicode(line[1])
a_coref = tokenization.convert_to_unicode(line[6][0])
b_coref = tokenization.convert_to_unicode(line[9][0])
P_offset = int(tokenization.convert_to_unicode(line[3]))
A_offset = int(tokenization.convert_to_unicode(line[5]))
B_offset = int(tokenization.convert_to_unicode(line[8]))
char_offsets = [P_offset, A_offset, B_offset]
assert (text_a[P_offset:(P_offset + 2)] == tokenization.convert_to_unicode(line[2])[:2])
label = tokenization.convert_to_unicode('Neither')
if (a_coref == tokenization.convert_to_unicode('T')):
label = tokenization.convert_to_unicode('A')
elif (b_coref == tokenization.convert_to_unicode('T')):
label = tokenization.convert_to_unicode('B')
examples.append(InputExample(guid=guid, text_a=text_a, char_offsets=char_offsets, label=label))
return examples |
def init_torch_seeds(seed=0):
torch.manual_seed(seed)
if (seed == 0):
(cudnn.benchmark, cudnn.deterministic) = (False, True)
else:
(cudnn.benchmark, cudnn.deterministic) = (True, False) |
def test_numpy_dataset():
dataset = NumpyDataset([X, Y, Z])
check_dataset(dataset)
get_dataloaders(dataset) |
def test_keep_alive_return_value(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert (capture == 'Allocating parent.')
with capture:
p.returnChild()
assert (ConstructorStats.detail_reg_inst() == (n_inst + 1))
assert (capture == '\n Allocating child.\n Releasing child.\n ')
with capture:
del p
assert (ConstructorStats.detail_reg_inst() == n_inst)
assert (capture == 'Releasing parent.')
with capture:
p = m.Parent()
assert (capture == 'Allocating parent.')
with capture:
p.returnChildKeepAlive()
assert (ConstructorStats.detail_reg_inst() == (n_inst + 2))
assert (capture == 'Allocating child.')
with capture:
del p
assert (ConstructorStats.detail_reg_inst() == n_inst)
assert (capture == '\n Releasing parent.\n Releasing child.\n ')
p = m.Parent()
assert (ConstructorStats.detail_reg_inst() == (n_inst + 1))
with capture:
m.Parent.staticFunction(p)
assert (ConstructorStats.detail_reg_inst() == (n_inst + 2))
assert (capture == 'Allocating child.')
with capture:
del p
assert (ConstructorStats.detail_reg_inst() == n_inst)
assert (capture == '\n Releasing parent.\n Releasing child.\n ') |
class UnsupWalkLoss(Loss):
def __init__(self, neg_weight=1.0, weight=None, **kwargs):
super(UnsupWalkLoss, self).__init__(weight=weight, batch_axis=None, **kwargs)
self._neg_weight = neg_weight
def hybrid_forward(self, F, node_emb, pos_emb, neg_emb):
pos_innerproduct = F.sum(F.broadcast_mul(node_emb, pos_emb), axis=1)
neg_innerproduct = F.dot(node_emb, neg_emb, transpose_b=True)
pos_loss = F.log((1.0 + F.exp(((- 1.0) * pos_innerproduct))))
neg_loss = F.log((1.0 + F.exp(neg_innerproduct)))
loss = (F.sum(pos_loss) + (self._neg_weight * F.sum(neg_loss)))
return loss |
class Node():
__slots__ = ('hg', 'n', 'graph_key', 'nid_path', 'size', 'local_score', 'forward_score', 'mean', 'count', 'leaf_score')
def __init__(self, hg, nid_path, size, local_score, forward_score):
self.hg = hg
self.n = hg.get_num_nodes()
self.graph_key = hash(frozenset(hg.nodes))
self.nid_path = nid_path
self.size = size
self.local_score = local_score
self.forward_score = forward_score
self.count = 0
self.mean = float('inf')
self.leaf_score = None
def update(self, x):
self.count += 1
delta = (x - self.mean)
self.mean += (delta / self.count)
phi = math.log2(self.mean)
phi -= ((phi / self.count) ** 0.5)
self.leaf_score = phi
def __hash__(self):
return hash((self.graph_key, self.size))
def __lt__(self, other):
return (self.leaf_score < other.leaf_score)
def __repr__(self):
return f'<Node(|V|={self.n}, fscore={math.log2(self.forward_score)}, lscore={self.leaf_score}, count={self.count}, id={id(self)})>' |
def test_has_globals() -> None:
assert os.path.isdir(ROOT)
assert (today == timestamp.split('')[0])
assert (len(timestamp) == 19)
assert (WANDB_PATH.count('/') == 1) |
def bloom_detokenize(ctx: c_void_p, token_id: c_int) -> str:
c_chars = _lib.detokenize_api(ctx, token_id)
s = c_chars.decode('utf-8')
return s |
def reflectance_loss(texture, mask):
mask = mask.reshape([1, mask.shape[0], 1])
texture_mean = (torch.sum((mask * texture), dim=1, keepdims=True) / torch.sum(mask))
loss = (torch.sum((((texture - texture_mean) * mask) ** 2)) / (texture.shape[0] * torch.sum(mask)))
return loss |
def get_activation_by_name(activation_name, activation_id=None):
import bigdl.dllib.nn.layer as BLayer
activation = None
activation_name = activation_name.lower()
if (activation_name == 'tanh'):
activation = BLayer.Tanh()
elif (activation_name == 'sigmoid'):
activation = BLayer.Sigmoid()
elif (activation_name == 'hard_sigmoid'):
activation = BLayer.HardSigmoid()
elif (activation_name == 'relu'):
activation = BLayer.ReLU()
elif (activation_name == 'softmax'):
activation = BLayer.SoftMax()
elif (activation_name == 'softplus'):
activation = BLayer.SoftPlus(beta=1.0)
elif (activation_name == 'softsign'):
activation = BLayer.SoftSign()
elif (activation_name == 'linear'):
activation = BLayer.Identity()
else:
invalidInputError(False, ('Unsupported activation type: %s' % activation_name))
if (not activation_id):
activation.set_name(activation_id)
return activation |
class Distribution(object):
def dim(self):
raise NotImplementedError
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
raise NotImplementedError
def kl(self, old_dist_info, new_dist_info):
raise NotImplementedError
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
raise NotImplementedError
def likelihood_ratio(self, x_var, old_dist_info, new_dist_info):
raise NotImplementedError
def entropy_sym(self, dist_info_vars):
raise NotImplementedError
def entropy(self, dist_info):
raise NotImplementedError
def log_likelihood_sym(self, x_var, dist_info_vars):
raise NotImplementedError
def log_likelihood(self, xs, dist_info):
raise NotImplementedError
def sample(self, dist_info):
raise NotImplementedError
def dist_info_specs(self):
raise NotImplementedError
def dist_info_keys(self):
return [k for (k, _) in self.dist_info_specs] |
def test_intersection_with_broadcasting_module() -> None:
box1 = BoxTensor(torch.tensor([[[1, 1], [3, 5]], [[1, 1], [3, 3]]]).float())
box2 = BoxTensor(torch.tensor([[2, 0], [6, 2]]).float())
res = BoxTensor(torch.tensor([[[2, 1], [3, 2]], [[2, 1], [3, 2]]]).float())
assert (res == HardIntersection()(box1, box2))
box1 = BoxTensor(torch.tensor([[[1, 1], [3, 5]], [[1, 1], [3, 3]]]).float())
box2 = BoxTensor(torch.tensor([[2, 0], [6, 2]]).float())
assert (res == HardIntersection()(box2, box1)) |
class PerceptualCorrectness(nn.Module):
def __init__(self, layer=['rel1_1', 'relu2_1', 'relu3_1', 'relu4_1']):
super(PerceptualCorrectness, self).__init__()
self.add_module('vgg', VGG19())
self.layer = layer
self.eps = 1e-08
self.resample = Resample2d(4, 1, sigma=2)
def __call__(self, target, source, flow_list, used_layers, mask=None, use_bilinear_sampling=True):
used_layers = sorted(used_layers, reverse=True)
(self.target_vgg, self.source_vgg) = (self.vgg(target), self.vgg(source))
loss = 0
for i in range(len(flow_list)):
loss += self.calculate_loss(flow_list[i], self.layer[used_layers[i]], mask, use_bilinear_sampling)
return loss
def calculate_loss(self, flow, layer, mask=None, use_bilinear_sampling=False):
target_vgg = self.target_vgg[layer]
source_vgg = self.source_vgg[layer]
[b, c, h, w] = target_vgg.shape
flow = F.interpolate(flow, [h, w])
target_all = target_vgg.view(b, c, (- 1))
source_all = source_vgg.view(b, c, (- 1)).transpose(1, 2)
source_norm = (source_all / (source_all.norm(dim=2, keepdim=True) + self.eps))
target_norm = (target_all / (target_all.norm(dim=1, keepdim=True) + self.eps))
try:
correction = torch.bmm(source_norm, target_norm)
except:
print('An exception occurred')
print(source_norm.shape)
print(target_norm.shape)
(correction_max, max_indices) = torch.max(correction, dim=1)
if use_bilinear_sampling:
input_sample = self.bilinear_warp(source_vgg, flow).view(b, c, (- 1))
else:
input_sample = self.resample(source_vgg, flow).view(b, c, (- 1))
correction_sample = F.cosine_similarity(input_sample, target_all)
loss_map = torch.exp(((- correction_sample) / (correction_max + self.eps)))
if (mask is None):
loss = (torch.mean(loss_map) - torch.exp(torch.tensor((- 1)).type_as(loss_map)))
else:
mask = F.interpolate(mask, size=(target_vgg.size(2), target_vgg.size(3)))
mask = mask.view((- 1), (target_vgg.size(2) * target_vgg.size(3)))
loss_map = (loss_map - torch.exp(torch.tensor((- 1)).type_as(loss_map)))
loss = (torch.sum((mask * loss_map)) / (torch.sum(mask) + self.eps))
return loss
def bilinear_warp(self, source, flow):
[b, c, h, w] = source.shape
x = (torch.arange(w).view(1, (- 1)).expand(h, (- 1)).type_as(source).float() / (w - 1))
y = (torch.arange(h).view((- 1), 1).expand((- 1), w).type_as(source).float() / (h - 1))
grid = torch.stack([x, y], dim=0)
grid = grid.unsqueeze(0).expand(b, (- 1), (- 1), (- 1))
grid = ((2 * grid) - 1)
flow = ((2 * flow) / torch.tensor([w, h]).view(1, 2, 1, 1).expand(b, (- 1), h, w).type_as(flow))
grid = (grid + flow).permute(0, 2, 3, 1)
input_sample = F.grid_sample(source, grid).view(b, c, (- 1))
return input_sample |
class FastSpeech2CriterionConfig(FairseqDataclass):
ctc_weight: float = field(default=0.0, metadata={'help': 'weight for CTC loss'}) |
def get_opinions(base_file, markable_file):
polarity_flip_dict = {'positive': 'negative', 'negative': 'positive', 'neutral': 'negative'}
increase_strength_dict = {'average': 'strong', 'weak': 'average', 'strong': 'strong'}
decrease_strength_dict = {'average': 'weak', 'weak': 'weak', 'strong': 'average'}
new = {}
new['sent_id'] = base_file.split('/')[(- 1)][:(- 10)]
new['bdir'] = re.findall('DarmstadtServiceReviewCorpus/(.*)/basedata', base_file)[0]
base_xml = open(base_file).read().encode('utf8')
mark_xml = open(markable_file).read().encode('utf8')
base_root = fromstring(base_xml, parser)
mark_root = fromstring(mark_xml, parser)
tokens = {}
spans = {}
markups = {}
text = ''
span_idx = 0
for i in base_root:
idx = i.get('id')
token = i.text
tokens[idx] = token
text += (token + ' ')
begin_span = span_idx
end_span = (span_idx + len(token))
spans[idx] = (begin_span, end_span)
span_idx += (len(token) + 1)
for i in mark_root:
idx = i.get('id')
markups[idx] = i
opinions = []
for m in markups.values():
if (m.get('annotation_type') == 'opinionexpression'):
idx = m.get('id')
hspan = m.get('opinionholder')
exp_span = m.get('span')
tspan = m.get('opiniontarget')
label = m.get('polarity')
modifier = m.get('opinionmodifier')
intensity = m.get('strength')
if (hspan == 'empty'):
holder = [[], []]
elif (hspan is None):
holder = [[], []]
elif (';' in hspan):
hspan = hspan.split(';')[0]
holder_span = markups[hspan].get('span')
holder_span = expand_span(holder_span)
holder_tokens = ' '.join([tokens[i] for i in holder_span])
hld_off1 = spans[holder_span[0]][0]
hld_off2 = spans[holder_span[(- 1)]][1]
holder = [[holder_tokens], ['{0}:{1}'.format(hld_off1, hld_off2)]]
else:
holder_span = markups[hspan].get('span')
holder_span = expand_span(holder_span)
holder_tokens = ' '.join([tokens[i] for i in holder_span])
hld_off1 = spans[holder_span[0]][0]
hld_off2 = spans[holder_span[(- 1)]][1]
holder = [[holder_tokens], ['{0}:{1}'.format(hld_off1, hld_off2)]]
if ((modifier != 'empty') and (modifier is not None)):
if (';' in modifier):
mod_tokens = ''
mod_offs = ''
modifiers = modifier.split(';')
for modifier in modifiers:
modifier = markups[modifier]
change = modifier.get('modifier')
modifier_span = modifier.get('span')
modifier_span = expand_span(modifier_span)
mod_toks = ' '.join([tokens[i] for i in modifier_span])
mod_off1 = spans[modifier_span[0]][0]
mod_off2 = spans[modifier_span[(- 1)]][1]
mod_tokens += (mod_toks + ';')
offs = '{0}:{1}'.format(mod_off1, mod_off2)
mod_offs += (offs + ';')
if (change == 'negation'):
label = polarity_flip_dict[label]
elif (change == 'increase'):
intensity = increase_strength_dict[intensity]
elif (change == 'decrease'):
intensity = decrease_strength_dict[intensity]
else:
pass
mod_offs = mod_offs[:(- 1)]
mod_tokens = mod_tokens[:(- 1)]
else:
modifier = markups[modifier]
change = modifier.get('modifier')
modifier_span = modifier.get('span')
modifier_span = expand_span(modifier_span)
mod_tokens = ' '.join([tokens[i] for i in modifier_span])
mod_off1 = spans[modifier_span[0]][0]
mod_off2 = spans[modifier_span[(- 1)]][1]
mod_offs = '{0}:{1}'.format(mod_off1, mod_off2)
if (change == 'negation'):
label = polarity_flip_dict[label]
elif (change == 'increase'):
intensity = increase_strength_dict[intensity]
elif (change == 'decrease'):
intensity = decrease_strength_dict[intensity]
else:
pass
exp_span = expand_span(exp_span)
exp_tokens = ' '.join([tokens[i] for i in exp_span])
exp_off1 = spans[exp_span[0]][0]
exp_off2 = spans[exp_span[(- 1)]][1]
if ((modifier != 'empty') and (modifier is not None)):
expression = [[mod_tokens, exp_tokens], ['{0}'.format(mod_offs), '{0}:{1}'.format(exp_off1, exp_off2)]]
else:
expression = [[exp_tokens], ['{0}:{1}'.format(exp_off1, exp_off2)]]
if (tspan == 'empty'):
target = [[], []]
elif (tspan is None):
target = [[], []]
elif (';' in tspan):
tspans = tspan.split(';')
for tsp in tspans:
target_span = markups[tsp].get('span')
target_span = expand_span(target_span)
target_tokens = ' '.join([tokens[i] for i in target_span])
trg_off1 = spans[target_span[0]][0]
trg_off2 = spans[target_span[(- 1)]][1]
target = [[target_tokens], ['{0}:{1}'.format(trg_off1, trg_off2)]]
opinions.append({'Source': holder, 'Target': target, 'Polar_expression': expression, 'Polarity': label.title(), 'Intensity': intensity.title()})
else:
target_span = markups[tspan].get('span')
target_span = expand_span(target_span)
target_tokens = ' '.join([tokens[i] for i in target_span])
trg_off1 = spans[target_span[0]][0]
trg_off2 = spans[target_span[(- 1)]][1]
target = [[target_tokens], ['{0}:{1}'.format(trg_off1, trg_off2)]]
opinions.append({'Source': holder, 'Target': target, 'Polar_expression': expression, 'Polarity': label.title(), 'Intensity': intensity.title()})
new['text'] = text
new['opinions'] = opinions
return new |
def cityscapes_classes():
return ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle'] |
def tok_preprocess(text: List):
output = []
output_stem = []
for e in text:
o = re.sub('[^a-zA-Z0-9]+', ' ', e)
tokens = re.split('\\s+', o)
tokens = [(stemmer.stem(x) if (len(x) > 3) else x) for x in tokens]
o_stem = ' '.join(tokens)
output.append(o)
output_stem.append(o_stem)
return (output, output_stem) |
def parse_args():
parser = argparse.ArgumentParser(description='Re-evaluate results')
parser.add_argument('detection_file', type=str)
parser.add_argument('--output_dir', help='results directory', type=str)
parser.add_argument('--imdb', dest='imdb_name', help='dataset to re-evaluate', default='voc_2007_test', type=str)
parser.add_argument('--matlab', dest='matlab_eval', help='use matlab for evaluation', action='store_true')
parser.add_argument('--comp', dest='comp_mode', help='competition mode', action='store_true')
parser.add_argument('--nms', dest='apply_nms', help='apply nms', action='store_true')
if (len(sys.argv) == 1):
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args |
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True, verification=False):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(nn.Linear(((512 * 8) * 8), 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes))
if verification:
self.classifier = nn.Sequential(*list(self.classifier.children())[:(- 1)])
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0) |
def comp_avg_seg_dur(labs_list):
n_frms = 0
n_segs = 0
for labs in labs_list:
labs = np.array(labs)
edges = np.zeros(len(labs)).astype(bool)
edges[0] = True
edges[1:] = (labs[1:] != labs[:(- 1)])
n_frms += len(edges)
n_segs += edges.astype(int).sum()
return (n_frms / n_segs) |
def _try_register_habitat_sim():
try:
import habitat_sim
has_habitat_sim = True
except ImportError as e:
has_habitat_sim = False
habitat_sim_import_error = e
if has_habitat_sim:
from habitat.sims.habitat_simulator.actions import HabitatSimV1ActionSpaceConfiguration
from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim
else:
_simulator(name='Sim-v0')
class HabitatSimImportError(Simulator):
def __init__(self, *args, **kwargs):
raise habitat_sim_import_error |
def train_model(train_critic, opt, model, critic, train_iter, valid_iter, fields, optimR, lr_schedulerR, optimT, lr_schedulerT, optimC, lr_schedulerC, start_epoch_at):
train_loss = nmt.NMTLossCompute(model.generator, fields['tgt'].vocab)
valid_loss = nmt.NMTLossCompute(model.generator, fields['tgt'].vocab)
if use_cuda:
train_loss = train_loss.cuda()
valid_loss = valid_loss.cuda()
shard_size = opt.train_shard_size
trainer = nmt.Trainer(opt, model, train_iter, valid_iter, train_loss, valid_loss, optimR, shard_size)
scorer = nmt.Scorer(model, fields['tgt'].vocab, fields['src'].vocab, train_loss, opt)
num_train_epochs = opt.num_train_epochs
print('start training...')
global_step = 0
for step_epoch in range((start_epoch_at + 1), num_train_epochs):
if (step_epoch >= opt.start_decay_at):
lr_schedulerR.step()
if (lr_schedulerT is not None):
lr_schedulerT.step()
if (lr_schedulerC is not None):
lr_schedulerC.step()
total_stats = Statistics()
report_stats = Statistics()
for (step_batch, batch) in enumerate(train_iter):
global_step += 1
if ((global_step % 6) == ((- 1) % global_step)):
T_turn = True
C_turn = False
R_turn = False
else:
T_turn = False
C_turn = False
R_turn = True
if train_critic:
T_turn = False
C_turn = True
R_turn = False
if C_turn:
model.template_generator.eval()
model.response_generator.eval()
critic.train()
optimC.optimizer.zero_grad()
(src_inputs, src_lengths) = batch.src
(tgt_inputs, tgt_lengths) = batch.tgt
(ref_src_inputs, ref_src_lengths) = batch.ref_src
(ref_tgt_inputs, ref_tgt_lengths) = batch.ref_tgt
(I_word, I_word_length) = batch.I
(D_word, D_word_length) = batch.D
(preds, ev) = model.template_generator(I_word, I_word_length, D_word, D_word_length, ref_tgt_inputs, ref_tgt_lengths, return_ev=True)
preds = preds.squeeze(2)
(template, template_lengths) = model.template_generator.do_mask_and_clean(preds, ref_tgt_inputs, ref_tgt_lengths)
((response, response_length), logp) = sample(model.response_generator, src_inputs, None, template, src_lengths, None, template_lengths, max_len=20)
enc_embedding = model.response_generator.enc_embedding
dec_embedding = model.response_generator.dec_embedding
inds = np.arange(len(tgt_lengths))
np.random.shuffle(inds)
inds_tensor = Variable(torch.LongTensor(inds).cuda())
random_tgt = tgt_inputs.index_select(1, inds_tensor)
random_tgt_len = [tgt_lengths[i] for i in inds]
(x, y, z) = critic(enc_embedding(src_inputs), src_lengths, dec_embedding(tgt_inputs), tgt_lengths, dec_embedding(response), response_length, dec_embedding(random_tgt), random_tgt_len)
loss = torch.mean((- x))
loss.backward()
optimC.step()
stats = Statistics()
elif T_turn:
model.template_generator.train()
model.response_generator.eval()
critic.eval()
stats = scorer.update(batch, optimT, 'T', sample, critic)
elif R_turn:
if (not (model.__class__.__name__ == 'jointTemplateResponseGenerator')):
model.template_generator.eval()
model.response_generator.train()
critic.eval()
if ((global_step % 2) == 0):
stats = trainer.update(batch)
else:
stats = scorer.update(batch, optimR, 'R', sample, critic)
else:
stats = trainer.update(batch)
report_stats.update(stats)
total_stats.update(stats)
report_func(opt, global_step, step_epoch, step_batch, len(train_iter), total_stats.start_time, optimR.lr, report_stats)
if (critic is not None):
critic.save_checkpoint(step_epoch, opt, os.path.join(opt.out_dir, ('checkpoint_epoch_critic%d.pkl' % step_epoch)))
print(('Train perplexity: %g' % total_stats.ppl()))
valid_stats = trainer.validate()
print(('Validation perplexity: %g' % valid_stats.ppl()))
trainer.epoch_step(step_epoch, out_dir=opt.out_dir)
model.train() |
class AttnUpDecoderBlock2D(nn.Module):
def __init__(self, in_channels: int, out_channels: int, resolution_idx: Optional[int]=None, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attention_head_dim: int=1, output_scale_factor: float=1.0, add_upsample: bool=True, temb_channels: Optional[int]=None):
super().__init__()
resnets = []
attentions = []
if (attention_head_dim is None):
logger.warn(f'It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `out_channels`: {out_channels}.')
attention_head_dim = out_channels
for i in range(num_layers):
input_channels = (in_channels if (i == 0) else out_channels)
resnets.append(ResnetBlock2D(in_channels=input_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
attentions.append(Attention(out_channels, heads=(out_channels // attention_head_dim), dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=(resnet_groups if (resnet_time_scale_shift != 'spatial') else None), spatial_norm_dim=(temb_channels if (resnet_time_scale_shift == 'spatial') else None), residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True))
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
self.resolution_idx = resolution_idx
def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor]=None, scale: float=1.0) -> torch.FloatTensor:
for (resnet, attn) in zip(self.resnets, self.attentions):
hidden_states = resnet(hidden_states, temb=temb, scale=scale)
cross_attention_kwargs = {'scale': scale}
hidden_states = attn(hidden_states, temb=temb, **cross_attention_kwargs)
if (self.upsamplers is not None):
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, scale=scale)
return hidden_states |
def atom_feature_vector_to_dict(atom_feature):
[atomic_num_idx, chirality_idx, degree_idx, formal_charge_idx, num_h_idx, number_radical_e_idx, hybridization_idx, is_aromatic_idx, is_in_ring_idx] = atom_feature
feature_dict = {'atomic_num': allowable_features['possible_atomic_num_list'][atomic_num_idx], 'chirality': allowable_features['possible_chirality_list'][chirality_idx], 'degree': allowable_features['possible_degree_list'][degree_idx], 'formal_charge': allowable_features['possible_formal_charge_list'][formal_charge_idx], 'num_h': allowable_features['possible_numH_list'][num_h_idx], 'num_rad_e': allowable_features['possible_number_radical_e_list'][number_radical_e_idx], 'hybridization': allowable_features['possible_hybridization_list'][hybridization_idx], 'is_aromatic': allowable_features['possible_is_aromatic_list'][is_aromatic_idx], 'is_in_ring': allowable_features['possible_is_in_ring_list'][is_in_ring_idx]}
return feature_dict |
def get_models(args):
sigmoid_flag = 1
if (args.gan == 'lsgan'):
sigmoid_flag = 0
if (args.model == 'scribbler'):
netG = scribbler.Scribbler(5, 3, 32)
elif (args.model == 'texturegan'):
netG = texturegan.TextureGAN(5, 3, 32)
elif (args.model == 'pix2pix'):
netG = define_G(5, 3, 32)
elif (args.model == 'scribbler_dilate_128'):
netG = scribbler_dilate_128.ScribblerDilate128(5, 3, 32)
else:
print((args.model + ' not support. Using Scribbler model'))
netG = scribbler.Scribbler(5, 3, 32)
if (args.color_space == 'lab'):
netD = discriminator.Discriminator(1, 32, sigmoid_flag)
netD_local = discriminator.LocalDiscriminator(2, 32, sigmoid_flag)
elif (args.color_space == 'rgb'):
netD = discriminator.Discriminator(3, 32, sigmoid_flag)
if (args.load == (- 1)):
netG.apply(weights_init)
else:
load_network(netG, 'G', args.load_epoch, args.load, args)
if (args.load_D == (- 1)):
netD.apply(weights_init)
else:
load_network(netD, 'D', args.load_epoch, args.load_D, args)
load_network(netD_local, 'D_local', args.load_epoch, args.load_D, args)
return (netG, netD, netD_local) |
def simxGetUISlider(clientID, uiHandle, uiButtonID, operationMode):
position = ct.c_int()
return (c_GetUISlider(clientID, uiHandle, uiButtonID, ct.byref(position), operationMode), position.value) |
class AnnoList(MutableSequence):
TYPE_INT32 = 5
TYPE_FLOAT = 2
TYPE_STRING = 9
def __init__(self, data=None):
super(AnnoList, self).__init__()
self.attribute_desc = {}
self.attribute_val_to_str = {}
if (not (data is None)):
self._list = list(data)
else:
self._list = list()
def add_attribute(self, name, dtype):
_adesc = AnnoList_pb2.AttributeDesc()
_adesc.name = name
if self.attribute_desc:
_adesc.id = (max((self.attribute_desc[d].id for d in self.attribute_desc)) + 1)
else:
_adesc.id = 0
if (dtype == int):
_adesc.dtype = AnnoList.TYPE_INT32
elif ((dtype == float) or (dtype == np.float32)):
_adesc.dtype = AnnoList.TYPE_FLOAT
elif (dtype == str):
_adesc.dtype = AnnoList.TYPE_STRING
else:
print('unknown attribute type: ', dtype)
assert False
self.attribute_desc[name] = _adesc
def add_attribute_val(self, aname, vname, val):
assert (aname in self.attribute_desc)
if all(((val_desc.id != val) for val_desc in self.attribute_desc[aname].val_to_str)):
val_desc = self.attribute_desc[aname].val_to_str.add()
val_desc.id = val
val_desc.s = vname
if (not (aname in self.attribute_val_to_str)):
self.attribute_val_to_str[aname] = {}
assert (not (val in self.attribute_val_to_str[aname]))
self.attribute_val_to_str[aname][val] = vname
def attribute_get_value_str(self, aname, val):
if ((aname in self.attribute_val_to_str) and (val in self.attribute_val_to_str[aname])):
return self.attribute_val_to_str[aname][val]
else:
return str(val)
def save(self, fname):
save(fname, self)
def __len__(self):
return len(self._list)
def __getitem__(self, ii):
if isinstance(ii, slice):
res = AnnoList()
res.attribute_desc = self.attribute_desc
res._list = self._list[ii]
return res
else:
return self._list[ii]
def __delitem__(self, ii):
del self._list[ii]
def __setitem__(self, ii, val):
return self._list[ii]
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<AnnoList %s>' % self._list)
def insert(self, ii, val):
self._list.insert(ii, val)
def append(self, val):
list_idx = len(self._list)
self.insert(list_idx, val) |
class Parents(_Relation):
def __init__(self, bind_name: str, matchers: List[SymbolMatcher], exact: bool) -> None:
super().__init__(bind_name, MatcherRelation.PARENTS, matchers, exact) |
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2):
m = np.square((mu1 - mu2)).sum()
(s, _) = sp.linalg.sqrtm(np.dot(sigma1, sigma2), disp=False)
dist = (m + np.trace(((sigma1 + sigma2) - (2 * s))))
return np.real(dist) |
def setup(args):
cfg = get_cfg()
add_dataset_config(cfg)
add_scenegraph_config(cfg)
assert (cfg.MODEL.ROI_SCENEGRAPH_HEAD.MODE in ['predcls', 'sgls', 'sgdet']), 'Mode {} not supported'.format(cfg.MODEL.ROI_SCENEGRaGraph.MODE)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
register_datasets(cfg)
default_setup(cfg, args)
setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name='LSDA')
return cfg |
class SalObjDataset(data.Dataset):
def __init__(self, image_root, gt_root, trainsize):
self.trainsize = trainsize
self.images = [(image_root + f) for f in os.listdir(image_root) if f.endswith('.jpg')]
self.gts = [(gt_root + f) for f in os.listdir(gt_root) if (f.endswith('.jpg') or f.endswith('.png'))]
self.images = sorted(self.images)
self.gts = sorted(self.gts)
self.filter_files()
self.size = len(self.images)
self.img_transform = transforms.Compose([transforms.Resize((self.trainsize, self.trainsize)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
self.gt_transform = transforms.Compose([transforms.Resize((self.trainsize, self.trainsize)), transforms.ToTensor()])
def __getitem__(self, index):
image = self.rgb_loader(self.images[index])
gt = self.binary_loader(self.gts[index])
image = self.img_transform(image)
gt = self.gt_transform(gt)
gt = gt.ge(0.5).float()
conn = sal2conn(gt.squeeze())
return (image, gt, conn)
def filter_files(self):
assert (len(self.images) == len(self.gts))
images = []
gts = []
for (img_path, gt_path) in zip(self.images, self.gts):
img = Image.open(img_path)
gt = Image.open(gt_path)
if (img.size == gt.size):
images.append(img_path)
gts.append(gt_path)
self.images = images
self.gts = gts
def rgb_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def binary_loader(self, path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('L')
def resize(self, img, gt):
assert (img.size == gt.size)
(w, h) = img.size
if ((h < self.trainsize) or (w < self.trainsize)):
h = max(h, self.trainsize)
w = max(w, self.trainsize)
return (img.resize((w, h), Image.BILINEAR), gt.resize((w, h), Image.NEAREST))
else:
return (img, gt)
def __len__(self):
return self.size |
def test_prefixsum_idx():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[3] = 3.0
assert (tree.find_prefixsum_idx(0.0) == 2)
assert (tree.find_prefixsum_idx(0.5) == 2)
assert (tree.find_prefixsum_idx(0.99) == 2)
assert (tree.find_prefixsum_idx(1.01) == 3)
assert (tree.find_prefixsum_idx(3.0) == 3)
assert (tree.find_prefixsum_idx(4.0) == 3) |
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--ntest', type=int, default=float('inf'), help='# of test examples.')
self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--how_many', type=int, default=3000, help='how many test images to run')
self.parser.add_argument('--start_frame', type=int, default=0, help='frame index to start inference on')
self.parser.add_argument('--n_frames_total', type=int, default=30, help='the overall number of frames in a sequence to train with')
self.isTrain = False |
_config
def imitation_learning():
cfg = {}
cfg['learner'] = {'model': 'PolicyWithBase', 'lr': 0.0002, 'optimizer_kwargs': {'weight_decay': 3.8e-07}, 'model_kwargs': {'base': None, 'action_space': spaces.Discrete(3), 'base_class': 'NaivelyRecurrentACModule', 'base_kwargs': {'use_gru': False, 'internal_state_size': 512, 'perception_unit': None, 'perception_unit_class': 'RLSidetuneWrapper', 'perception_unit_kwargs': {'n_frames': 4, 'n_map_channels': 3, 'use_target': True, 'blind': False, 'extra_kwargs': {'main_perception_network': 'TaskonomyFeaturesOnlyNet', 'sidetune_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'normalize_pre_transfer': False, 'base_class': None, 'base_weights_path': None, 'base_kwargs': {}, 'side_class': 'FCN5', 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'side_weights_path': None}}}}}}
cfg['training'] = {'sources': ['rgb_filled', 'map', 'target', 'taskonomy'], 'sources_as_dict': True, 'targets': ['action']} |
def main():
args = parse_args()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
raw_datasets = get_dataset(args)
config = get_config(args)
tokenizer = get_tokenizer(args)
model = get_model(args, config)
tokenized_datasets = tokenize_dataset(args, model, raw_datasets, tokenizer)
lm_datasets = process_dataset(args, tokenized_datasets, tokenizer)
train_dataset = lm_datasets['train']
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
dataloader_args = {'shuffle': True, 'collate_fn': default_data_collator, 'batch_size': args.total_train_batch_size, 'pin_memory': True, 'num_workers': args.dataloader_num_workers, 'persistent_workers': (args.dataloader_num_workers > 0)}
(status, result, best_strategy) = auto_accelerate(model, torch.optim.AdamW, train_dataset, loss_func=my_loss_func, prepare_input=my_prepare_input, model_input_format='unpack_dict', optim_args={'lr': args.learning_rate}, optim_param_func=partial(optim_param_func, args=args), dataloader_args=dataloader_args, excluded=[], included=[], verbose=True)
assert status, 'auto_accelerate failed'
print('Best strategy is:', best_strategy) |
def pad_sequence_from_left(sequences: Sequence[Tensor], batch_first: bool=False, padding_value: float=0.0):
sequences = tuple((sequence.flip(0) for sequence in sequences))
padded_sequence = torch._C._nn.pad_sequence(sequences, batch_first, padding_value)
padded_sequence = padded_sequence.flip(int(batch_first))
return padded_sequence |
class Lidar3DWrapper():
def __init__(self, optimizer: Optimizer):
self.cloud_publisher = rospy.Publisher('/transformed_clouds', PointCloud2, queue_size=5)
self.state_index = 1
self.optimizer = optimizer
self.submap_clouds = []
self.correspondence_threshold = 1.0
self.icp_noise_model = gtsam.noiseModel.Diagonal.Sigmas(np.ones((6,)))
self.baseTlidar = gtsam.Pose3()
self.lidar3d_lock = Lock()
def preprocess_measurement(self, laser_msg: PointCloud2, min_range=0, max_range=np.inf):
point_vector_list = point_cloud2.read_points_list(laser_msg)
points = [[point_vec.x, point_vec.y, point_vec.z] for point_vec in point_vector_list if (min_range <= np.linalg.norm([point_vec.x, point_vec.y, point_vec.z]) <= max_range)]
point_cloud = np.array(points).T
return point_cloud
def create_lidar_factor(self, a: int, b: int, cloud_a: np.ndarray, cloud_b: np.ndarray, aTb_estimate=None):
if (not aTb_estimate):
if self.optimizer.results.exists(X(b)):
wTa = self.optimizer.results.atPose3(X(a))
wTb = self.optimizer.results.atPose3(X(b))
aTb_estimate = wTa.between(wTb)
elif ((a == 0) and (b == 1)):
aTb_estimate = gtsam.Pose3()
else:
wTp = self.optimizer.results.atPose3(X((a - 1)))
wTq = self.optimizer.results.atPose3(X((b - 1)))
aTb_estimate = wTp.between(wTq)
aTb_matrix = pygicp.align_points(cloud_a.T, cloud_b.T, max_correspondence_distance=self.correspondence_threshold, initial_guess=aTb_estimate.matrix(), k_correspondences=15, num_threads=2)
aTb = gtsam.Pose3(aTb_matrix)
factor = gtsam.BetweenFactorPose3(X(a), X(b), aTb, self.icp_noise_model)
wTa = self.optimizer.results.atPose3(X(a))
wTb_estimate = wTa.compose(aTb)
return (factor, wTb_estimate)
def lidar_callback(self, msg: PointCloud2, imu=None):
aTb_estimate = None
(index_a, index_b) = ((self.state_index - 1), self.state_index)
if (imu and (len(self.submap_clouds) > 0)):
aTb_estimate = imu.create_and_add_factor(index_a, index_b)
min_range = rospy.get_param('/lidar3d/min_range')
max_range = rospy.get_param('/lidar3d/max_range')
cloud_b = self.preprocess_measurement(msg, min_range=min_range, max_range=max_range)
cloud_b = self.baseTlaser.transformFrom(cloud_b)
if (len(self.submap_clouds) == 0):
self.submap_clouds.append(cloud_b)
return
cloud_a = self.submap_clouds[(- 1)]
(factor, wTb_estimate) = self.create_lidar_factor(index_a, index_b, cloud_a, cloud_b, aTb_estimate)
self.optimizer.add_factor(factor, (X(index_b), wTb_estimate))
self.optimizer.optimize()
self.submap_clouds.append(cloud_b)
with self.lidar3d_lock:
current_state = self.state_index
submap = self.submap_clouds.copy()
if (len(submap) > 2):
self.create_skip_connections(submap, current_state)
self.optimizer.optimize()
if (len(submap) == rospy.get_param('/lidar3d/submap_length')):
self.publish_transformed_cloud(submap[0], ((current_state - len(submap)) + 1))
self.state_index += 1
def create_skip_connections(self, submap, current_index):
submap_length = len(submap)
for i in range((submap_length - 2)):
index_a = ((current_index - i) - 2)
index_b = current_index
self.create_skip_connection(submap, index_a, index_b)
def create_skip_connection(self, clouds, index_a, index_b):
(cloud_a, cloud_b) = (clouds[(- ((index_b - index_a) + 1))], clouds[(- 1)])
(factor, _) = self.create_lidar_factor(index_a, index_b, cloud_a, cloud_b)
self.optimizer.add_factor(factor)
def publish_transformed_cloud(self, bTcloud, index):
wTb = self.optimizer.results.atPose3(X(index))
wTcloud = wTb.transformFrom(bTcloud)
wTcloud = wTcloud.T
header = Header()
header.stamp = rospy.Time.now()
header.frame_id = 'map'
pcd = point_cloud2.create_cloud_xyz32(header, wTcloud)
self.cloud_publisher.publish(pcd)
def initialize_params(self):
self.submap_clouds = deque([], rospy.get_param('/lidar3d/submap_length'))
icp_noise_sigmas = rospy.get_param('/lidar3d/icp_noise')
self.icp_noise_model = gtsam.noiseModel.Diagonal.Sigmas(np.array([np.deg2rad(icp_noise_sigmas[0]), np.deg2rad(icp_noise_sigmas[1]), np.deg2rad(icp_noise_sigmas[2]), icp_noise_sigmas[0], icp_noise_sigmas[1], icp_noise_sigmas[2]]))
self.correspondence_threshold = rospy.get_param('/lidar3d/correspondence_threshold')
tf_buffer = tf2_ros.Buffer()
tf2_ros.TransformListener(tf_buffer)
rospy.sleep(1)
baseTlaser = tf_buffer.lookup_transform('base_link', 'laser', rospy.Time())
translation = np.array([baseTlaser.transform.translation.x, baseTlaser.transform.translation.y, baseTlaser.transform.translation.z])
quaternion = np.array([baseTlaser.transform.rotation.w, baseTlaser.transform.rotation.x, baseTlaser.transform.rotation.y, baseTlaser.transform.rotation.z])
self.baseTlaser = gtsam.Pose3(gtsam.Rot3.Quaternion(*quaternion), translation) |
class BoundingBoxHead(nn.Module):
def __init__(self, features: Tuple[Tuple[(int, int)]]=((256, 64), (64, 16), (16, 4)), activation: Type=nn.PReLU) -> None:
super(BoundingBoxHead, self).__init__()
self.layers = []
for (index, feature) in enumerate(features):
if (index < (len(features) - 1)):
self.layers.extend([nn.Linear(in_features=feature[0], out_features=feature[1]), activation()])
else:
self.layers.append(nn.Linear(in_features=feature[0], out_features=feature[1]))
self.layers = nn.Sequential(*self.layers)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return self.layers(input) |
def load_file(filename):
corpus = []
f = open(filename, 'r')
for line in f:
doc = []
hashtag = False
for word in line.strip().split():
if (word == '#'):
hashtag = True
elif hashtag:
doc.append(('#' + word))
hashtag = False
else:
doc.append(word)
corpus.append(doc)
f.close()
return corpus |
_grad()
def evaluate(model, loader, device):
model.eval()
(embeds, labels) = ([], [])
(dists, targets) = (None, None)
for data in loader:
(samples, _labels) = (data[0].to(device), data[1])
out = model(samples)
embeds.append(out)
labels.append(_labels)
embeds = torch.cat(embeds, dim=0)
labels = torch.cat(labels, dim=0)
dists = torch.cdist(embeds, embeds)
labels = labels.unsqueeze(0)
targets = (labels == labels.t())
mask = (torch.ones(dists.size()).triu() - torch.eye(dists.size(0)))
dists = dists[(mask == 1)]
targets = targets[(mask == 1)]
(threshold, accuracy) = find_best_threshold(dists, targets, device)
print('accuracy: {:.3f}%, threshold: {:.2f}'.format(accuracy, threshold)) |
def get_valid_stats(args, trainer, stats):
if (('nll_loss' in stats) and ('ppl' not in stats)):
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
stats['num_updates'] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, 'best'):
key = 'best_{0}'.format(args.best_checkpoint_metric)
best_function = (max if args.maximize_best_checkpoint_metric else min)
stats[key] = best_function(checkpoint_utils.save_checkpoint.best, stats[args.best_checkpoint_metric])
return stats |
def make_cand_array(cand_dict):
if (not cand_dict):
return np.zeros(7)
encoded_pid = gen_pid_encoding.get(abs(cand_dict['pid']), 1)
return np.array([encoded_pid, cand_dict['charge'], cand_dict.get('pt', 0), cand_dict['eta'], np.sin(cand_dict['phi']), np.cos(cand_dict['phi']), cand_dict.get('energy', 0)]) |
def pixels_to_box_corners(row_pixel: int, column_pixel: int, length_in_pixels: float, width_in_pixels: float, yaw_in_radians: float) -> np.ndarray:
coord_tuple = ((column_pixel, row_pixel), (length_in_pixels, width_in_pixels), (((- yaw_in_radians) * 180) / np.pi))
box = cv2.boxPoints(coord_tuple)
return box |
_task('frm_text_to_speech')
class FrmTextToSpeechTask(TextToSpeechTask):
def add_args(parser):
TextToSpeechTask.add_args(parser)
parser.add_argument('--do_chunk', action='store_true', help='train on chunks')
parser.add_argument('--chunk_bound', default=(- 1), type=int)
parser.add_argument('--chunk_init', default=50, type=int)
parser.add_argument('--chunk_incr', default=5, type=int)
parser.add_argument('--add_eos', action='store_true')
parser.add_argument('--dedup', action='store_true')
parser.add_argument('--ref_fpu', default=(- 1), type=float)
def load_dataset(self, split, **unused_kwargs):
is_train_split = split.startswith('train')
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = FrmTextToSpeechDatasetCreator.from_tsv(self.args.data, self.data_cfg, split, self.src_dict, pre_tokenizer, bpe_tokenizer, is_train_split=is_train_split, n_frames_per_step=self.args.n_frames_per_step, speaker_to_id=self.speaker_to_id, do_chunk=self.args.do_chunk, chunk_bound=self.args.chunk_bound, chunk_init=self.args.chunk_init, chunk_incr=self.args.chunk_incr, add_eos=self.args.add_eos, dedup=self.args.dedup, ref_fpu=self.args.ref_fpu) |
def collect_rules(root_path):
rules = []
for (cur, _, _) in os.walk(root_path):
build_path = os.path.join(cur, 'BUILD.bazel')
if os.path.exists(build_path):
rules.extend(read_build(('//' + cur)))
return rules |
def make_sparse_convmodule(in_channels, out_channels, kernel_size, indice_key, stride=1, padding=0, conv_type='SubMConv3d', norm_cfg=None, order=('conv', 'norm', 'act')):
assert (isinstance(order, tuple) and (len(order) <= 3))
assert ((set(order) | {'conv', 'norm', 'act'}) == {'conv', 'norm', 'act'})
conv_cfg = dict(type=conv_type, indice_key=indice_key)
layers = list()
for layer in order:
if (layer == 'conv'):
if (conv_type not in ['SparseInverseConv3d', 'SparseInverseConv2d', 'SparseInverseConv1d']):
layers.append(build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False))
else:
layers.append(build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, bias=False))
elif (layer == 'norm'):
layers.append(build_norm_layer(norm_cfg, out_channels)[1])
elif (layer == 'act'):
layers.append(nn.ReLU(inplace=True))
layers = spconv.SparseSequential(*layers)
return layers |
def main(args):
checkpoint = torch.load(args.load_model)
if (args.config is not None):
with open(args.config, 'r') as f:
params = yaml.load(f, Loader=yaml.FullLoader)
else:
params = checkpoint['params']
params['data']['batch_size'] = args.batch_size
model_type = params['model']['type']
model = End2EndModel[model_type](params['model'])
if ('frontend' in checkpoint):
model.frontend.load_state_dict(checkpoint['frontend'])
logger.info('[FrontEnd] Load the frontend checkpoint!')
model.encoder.load_state_dict(checkpoint['encoder'])
logger.info('[Encoder] Load the encoder checkpoint!')
if ('decoder' in checkpoint):
model.decoder.load_state_dict(checkpoint['decoder'])
logger.info('[Decoder] Load the decoder checkpoint!')
if ('joint' in checkpoint):
model.joint.load_state_dict(checkpoint['joint'])
logger.info('[JointNet] Load the joint net of transducer checkpoint!')
if ('look_ahead_conv' in checkpoint):
model.lookahead_conv.load_state_dict(checkpoint['look_ahead_conv'])
logger.info('[LookAheadConvLayer] Load the external lookaheadconvlayer checkpoint!')
if ('ctc' in checkpoint):
model.assistor.load_state_dict(checkpoint['ctc'])
logger.info('[CTC Assistor] Load the ctc assistor checkpoint!')
logger.info(('Finished! Loaded pre-trained model from %s' % args.load_model))
model.eval()
if (args.ngpu > 0):
model.cuda()
if (args.load_language_model is not None):
lm_chkpt = torch.load(args.load_language_model)
lm_parms = lm_chkpt['params']
lm_type = lm_parms['model']['type']
lm = LanguageModel[lm_type](lm_parms['model'])
lm.load_state_dict(lm_chkpt['model'])
logger.info(('Load pre-trained language model from %s' % args.load_language_model))
lm.eval()
if (args.ngpu > 0):
lm.cuda()
else:
lm = None
lm_type = None
data_loader = FeatureLoader(params, args.decode_set, is_eval=True)
idx2unit = data_loader.dataset.idx2unit
recognizer = build_recognizer(model_type, model, lm, args, idx2unit)
totals = len(data_loader.dataset)
expdir = os.path.join('egs', params['data']['name'], 'exp', params['train']['save_name'])
decoder_folder_name = ['decode']
decoder_folder_name.append(args.decode_set)
decoder_folder_name.append(args.mode)
if (args.mode != 'greedy'):
decoder_folder_name.append(('%d' % args.beam_width))
if (args.load_language_model is not None):
decoder_folder_name.append(('%s_%.2f' % (lm_type, args.lm_weight)))
if (args.ctc_weight > 0.0):
decoder_folder_name.append(('ctc_weight_%.3f' % args.ctc_weight))
if (args.ngram_lm is not None):
decoder_folder_name.append(('ngram_alpha%.2f_beta%.2f' % (args.alpha, args.beta)))
if args.apply_rescoring:
decoder_folder_name.append('rescore')
decoder_folder_name.append(('rw_%.2f' % args.rescore_weight))
if args.apply_lm_rescoring:
decoder_folder_name.append('lm_rescore')
decoder_folder_name.append(('rw_%.2f' % args.rescore_weight))
try:
ep = re.search('from(\\d{1,3})to(\\d{1,3})', args.load_model).groups()
decoder_folder_name.append('_'.join(list(ep)))
except:
ep = re.search('epoch.(\\d{1,3}).pt', args.load_model).groups()[0]
decoder_folder_name.append(('epoch_%s' % ep))
if args.debug:
decoder_folder_name.append(('debug_%d_samples' % args.num_sample))
if (args.suffix is not None):
decoder_folder_name.append(args.suffix)
decode_dir = os.path.join(expdir, '_'.join(decoder_folder_name))
if (not os.path.exists(decode_dir)):
os.makedirs(decode_dir)
writer = open(os.path.join(decode_dir, 'predict.txt'), 'w')
detail_writer = open(os.path.join(decode_dir, 'predict.log'), 'w')
top_n_false_tokens = 0
false_tokens = 0
total_tokens = 0
accu_time = 0
total_frames = 0
for (step, (utt_id, inputs, targets)) in enumerate(data_loader.loader):
if (args.ngpu > 0):
inputs = map_to_cuda(inputs)
enc_inputs = inputs['inputs']
enc_mask = inputs['mask']
if (args.batch_size == 1):
total_frames += enc_inputs.size(1)
st = time.time()
(preds, scores) = recognizer.recognize(enc_inputs, enc_mask)
et = time.time()
span = (et - st)
accu_time += span
truths = targets['targets']
truths_length = targets['targets_length']
for b in range(len(preds)):
n = ((step * args.batch_size) + b)
truth = [idx2unit[i.item()] for i in truths[b][1:truths_length[b]]]
if args.piece2word:
truth = ''.join(truth).replace('', ' ')
else:
truth = ' '.join(truth)
print_info = ('[%d / %d ] %s - truth : %s' % (n, totals, utt_id[b], truth))
logger.info(print_info)
detail_writer.write((print_info + '\n'))
total_tokens += len(truth.split())
nbest_min_false_tokens = .0
for i in range(len(preds[b])):
pred = preds[b][i]
if args.piece2word:
pred = ''.join(preds[b][i].split()).replace('', ' ')
_truth = truth.replace('<PESN> ', '').replace('<VIET> ', '').replace('<SWAH> ', '')
_pred = pred.replace('<PESN> ', '').replace('<VIET> ', '').replace('<SWAH> ', '')
n_diff = editdistance.eval(_truth.split(), _pred.split())
if (i == 0):
false_tokens += n_diff
nbest_min_false_tokens = min(nbest_min_false_tokens, n_diff)
print_info = ('[%d / %d ] %s - pred-%2d (%3.4f) : %s' % (n, totals, utt_id[b], i, float(scores.cpu()[(b, i)]), pred))
logger.info(print_info)
detail_writer.write((print_info + '\n'))
writer.write((((utt_id[b] + ' ') + preds[b][0]) + '\n'))
top_n_false_tokens += nbest_min_false_tokens
detail_writer.write('\n')
if (args.debug and (((step + 1) * args.batch_size) >= args.num_sample)):
break
writer.close()
detail_writer.close()
with open(os.path.join(decode_dir, 'RESULT'), 'w') as w:
wer = ((false_tokens / total_tokens) * 100)
logger.info(('The WER is %.3f.' % wer))
topn_wer = ((top_n_false_tokens / total_tokens) * 100)
logger.info(('The top %d WER is %.3f' % (args.nbest, topn_wer)))
w.write(('The Model Chkpt: %s \n' % args.load_model))
if (model_type == 'ctc'):
w.write(('Decode Mode: %s \n' % args.mode))
w.write(('The WER is %.3f. \n' % wer))
if (args.batch_size == 1):
rtf = ((accu_time / total_frames) * 100)
logger.info(('The RTF is %.6f' % rtf))
w.write(('The RTF is %.6f' % rtf)) |
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
imagenet_scope = nasnet.nasnet_large_arg_scope()
with arg_scope(imagenet_scope):
with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
return sc |
class double_conv_circular(nn.Module):
def __init__(self, in_ch, out_ch, group_conv, dilation=1):
super(double_conv_circular, self).__init__()
if group_conv:
self.conv1 = nn.Sequential(nn.Conv2d(in_ch, out_ch, 3, padding=(1, 0), groups=min(out_ch, in_ch)), nn.BatchNorm2d(out_ch), nn.LeakyReLU(inplace=True))
self.conv2 = nn.Sequential(nn.Conv2d(out_ch, out_ch, 3, padding=(1, 0), groups=out_ch), nn.BatchNorm2d(out_ch), nn.LeakyReLU(inplace=True))
else:
self.conv1 = nn.Sequential(nn.Conv2d(in_ch, out_ch, 3, padding=(1, 0)), nn.BatchNorm2d(out_ch), nn.LeakyReLU(inplace=True))
self.conv2 = nn.Sequential(nn.Conv2d(out_ch, out_ch, 3, padding=(1, 0)), nn.BatchNorm2d(out_ch), nn.LeakyReLU(inplace=True))
def forward(self, x):
x = F.pad(x, (1, 1, 0, 0), mode='circular')
x = self.conv1(x)
x = F.pad(x, (1, 1, 0, 0), mode='circular')
x = self.conv2(x)
return x |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.