code stringlengths 101 5.91M |
|---|
class Encoder_Background(nn.Module):
def __init__(self, num_hiddens, num_residual_layers, num_residual_hiddens, ds_content, T, suf_method='avg_pool'):
super(Encoder_Background, self).__init__()
self._ds_m = ds_content
self._num_hiddens = num_hiddens
self._num_residual_layers = num_residual_layers
self._num_residual_hiddens = num_residual_hiddens
input_channels = 3
self._layers = nn.ModuleList()
for i in range(ds_content):
curlayer = nn.Sequential(nn.Conv2d(input_channels, self._num_hiddens, kernel_size=5, stride=2, padding=2), nn.BatchNorm2d(self._num_hiddens), nn.GELU(), nn.Conv2d(self._num_hiddens, self._num_hiddens, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(self._num_hiddens), nn.GELU())
input_channels = self._num_hiddens
self._layers.append(curlayer)
self._suf_method = suf_method.lower()
if (self._suf_method == 'avg_pool'):
self._suf_layer = nn.AvgPool1d(T)
elif (self._suf_method == 'conv'):
self._suf_layer = nn.Sequential(nn.Conv2d((T * self._num_hiddens), self._num_hiddens, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(self._num_hiddens), nn.GELU())
else:
assert ValueError(f"No Implementation for Encoder_Content's suf_layer: {self._suf_method}!")
self._residual = ResidualStack(self._num_hiddens, self._num_residual_layers, self._num_residual_hiddens, True)
def forward(self, x_bg):
(B, T, C, H, W) = x_bg.shape
xs = rearrange(x_bg, 'b t c h w -> (b t) c h w')
for i in range(self._ds_m):
h = self._layers[i](xs)
xs = F.relu(h)
(_, D, HS, WS) = xs.shape
if (self._suf_method == 'avg_pool'):
xs = rearrange(xs, '(b t) d hs ws -> (b hs ws) d t', b=B)
zs = self._suf_layer(xs)
zs = torch.squeeze(zs)
z = rearrange(zs, '(b hs ws) d -> b d hs ws', b=B, d=D, hs=HS, ws=WS)
elif (self._suf_method == 'conv'):
xs = rearrange(xs, '(b t) d hs ws -> b (t d) hs ws', b=B)
z = self._suf_layer(xs)
else:
z = None
assert ValueError(f"No Implementation for Encoder_Content's suf_layer: {self._suf_method}!")
z_ = self._residual(z)
z_ = z_.unsqueeze(1)
return z_ |
def get_launcher(distributed=False):
num_gpus = (min(2, get_gpu_count()) if distributed else 1)
master_port = os.environ.get('DS_TEST_PORT', DEFAULT_MASTER_PORT)
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}'.split() |
def load(config):
cls_name = config.model.name
try:
cls = globals()[cls_name]
return cls(config)
except KeyError:
raise Exception('No such model: {}'.format(cls_name)) |
def get_lines_from_clustering(img_edges, mask_extract_contour, mask_plane, mask_number, output_directory, ksize=51):
if (not os.path.exists(output_directory)):
os.mkdir(output_directory)
edge_candidate_clusters = get_edge_candidate_clusters_from_mask(np.copy(img_edges), mask_extract_contour, mask_number, ksize=ksize, output_directory=output_directory)
logger.debug('Found {} edges. Checking if we should split across edges.'.format(len(edge_candidate_clusters)))
edge_candidate_clusters = split_edge_candidate_clusters(np.copy(img_edges), mask_extract_contour, edge_candidate_clusters)
logger.debug('Found {} edges after splitting big edges.'.format(len(edge_candidate_clusters)))
lines = compute_lines_from_edge_candidate_clusters(img_edges.copy(), edge_candidate_clusters, mask_plane, mask_extract_contour, mask_number, output_directory)
return lines |
def split_corpus(path, shard_size):
with open(path, 'rb') as f:
if (shard_size <= 0):
(yield f.readlines())
else:
while True:
shard = list(islice(f, shard_size))
if (not shard):
break
(yield shard) |
class Dataset():
def __init__(self, root='/home/paul/datasets', dataset='market1501'):
self.dataset = dataset
self.root = root
def train_path(self):
if ((self.dataset == 'market1501') or (self.dataset == 'duke')):
return os.path.join(self.root, self.dataset, 'bounding_box_train')
elif (self.dataset == 'cuhk03'):
return os.path.join(self.root, self.dataset, 'bounding_box_train')
elif (self.dataset == 'viper'):
return os.path.join(self.root, self.dataset, 'bounding_box_train')
else:
raise ValueError(('Unknown train set for %s' % self.dataset))
def test_path(self):
if ((self.dataset == 'market1501') or (self.dataset == 'duke')):
return os.path.join(self.root, self.dataset, 'bounding_box_test')
elif ((self.dataset == 'cuhk03') or (self.dataset == 'viper')):
return os.path.join(self.root, self.dataset, 'bounding_box_test')
else:
raise ValueError(('Unknown test set for %s' % self.dataset))
def gallery_path(self):
return self.testset()
def query_path(self):
if ((self.dataset == 'market1501') or (self.dataset == 'duke')):
return os.path.join(self.root, self.dataset, 'query')
elif ((self.dataset == 'cuhk03') or (self.dataset == 'viper')):
return os.path.join(self.root, self.dataset, 'query')
else:
raise ValueError(('Unknown query set for %s' % self.dataset))
def gan_path(self):
return os.path.join('/home/paul/generated', self.dataset)
def dataset_path(self):
return os.path.join(self.root, self.dataset)
def n_classe(self):
if (self.dataset == 'market1501'):
return 751
elif (self.dataset == 'duke'):
return 702
elif (self.dataset == 'cuhk03'):
return 767
elif (self.dataset == 'viper'):
return 316
else:
raise ValueError(('Unknown n_classe set for %s' % self.dataset))
def root_path(self):
return self.root
def gt_set(self):
if (self.dataset == 'market1501'):
return os.path.join(self.root, self.dataset, 'gt_bbox')
else:
raise ValueError(('Unknown hand-drawn bounding boxes for %s' % self.dataset))
def train_list(self):
if ((self.dataset == 'market1501') or (self.dataset == 'duke') or (self.dataset == 'cuhk03')):
train_list = os.path.join(self.root, self.dataset, 'train.list')
elif (self.dataset == 'viper'):
train_list = os.path.join(self.root, self.dataset, 'train.list')
else:
raise ValueError(('Unknown train bounding boxes for %s' % self.dataset))
if (not os.path.exists(train_list)):
raise FileNotFoundError(('%s not found' % train_list))
return train_list
def cluster_path(self):
if ((self.dataset == 'market1501') or (self.dataset == 'duke') or (self.dataset == 'cuhk03') or (self.dataset == 'viper')):
return os.path.join('/home/paul', 'clustering', self.dataset)
else:
raise ValueError(('Unknown cluster path for %s' % self.dataset))
def n_training_set(self):
if (self.dataset == 'market1501'):
data_list = glob.glob(os.path.join(self.train_path(), '*.jpg'))
n = len(data_list)
assert (n == 12936)
elif (self.dataset == 'duke'):
n = 16522
else:
raise ValueError(('Unknow training set size for %s' % self.dataset))
return n
def n_gan_set(self):
if (self.dataset == 'market1501'):
data_list = glob.glob(os.path.join(self.gan_path(), '*.jpg'))
n = len(data_list)
else:
raise ValueError(('Unknow generated set size for %s' % self.dataset))
return n
def test_num(self):
if (self.dataset == 'market1501'):
return 19732
elif (self.dataset == 'duke'):
return 17661
elif (self.dataset == 'cuhk03'):
return 6751
elif (self.dataset == 'viper'):
return 316
else:
raise ValueError(('Unknown test num for % dataset' % self.dataset))
def query_num(self):
if (self.dataset == 'market1501'):
return 3368
elif (self.dataset == 'duke'):
return 2228
elif (self.dataset == 'cuhk03'):
return 6751
elif (self.dataset == 'viper'):
return 316
else:
raise ValueError(('Unknown query num for % dataset' % self.dataset)) |
def parse_hypothesis(hyp, char_list):
tokenid_as_list = list(map(int, hyp['yseq'][1:]))
token_as_list = [char_list[idx] for idx in tokenid_as_list]
score = float(hyp['score'])
tokenid = ' '.join([str(idx) for idx in tokenid_as_list])
token = ' '.join(token_as_list)
text = ''.join(token_as_list).replace('<space>', ' ')
return (text, token, tokenid, score) |
def rot_theta(th):
return np.array([[np.cos(th), 0, (- np.sin(th)), 0], [0, 1, 0, 0], [np.sin(th), 0, np.cos(th), 0], [0, 0, 0, 1]], dtype=np.float32) |
def build_lr(input_shape, output_size):
model = Sequential([Flatten(input_shape=input_shape), Dense(output_size), Activation('softmax')])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model |
def train_one(save_path, config, log_file_dir, index, logfile_level, console_level, device):
if log_file_dir:
logging.basicConfig(filename=log_file_dir.replace('tensorboard', 'programlog'), level=logfile_level)
console = logging.StreamHandler()
console.setLevel(console_level)
logging.getLogger().addHandler(console)
print(('training at %s started' % index))
return TraderTrainer(config, save_path=save_path, device=device).train_net(log_file_dir=log_file_dir, index=index) |
class BaseGraphMultiLayer(HybridBlock):
def __init__(self, out_units, aggregator_args_list, dropout_rate_list, graph_type='homo', in_units=None, first_embed_units=256, dense_connect=False, every_layer_l2_normalization=False, l2_normalization=False, output_inner_result=False, prefix=None, params=None):
super(BaseGraphMultiLayer, self).__init__(prefix=prefix, params=params)
self._aggregator_args_list = aggregator_args_list
self._dropout_rate_list = dropout_rate_list
self._dense_connect = dense_connect
self._first_embed_units = first_embed_units
self._every_layer_l2_normalization = every_layer_l2_normalization
self._l2_normalization = l2_normalization
self._graph_type = graph_type
if self._l2_normalization:
self._l2_normalization_layer = L2Normalization(axis=(- 1))
self._output_inner_result = output_inner_result
print('graph type', graph_type)
with self.name_scope():
if (graph_type == 'homo'):
self.embed = nn.Dense(self._first_embed_units, flatten=False)
self.aggregators = nn.HybridSequential()
self.dropout_layers = nn.HybridSequential()
for (args, dropout_rate) in zip(aggregator_args_list, dropout_rate_list):
self.aggregators.add(parse_aggregator_from_desc(args))
self.dropout_layers.add(nn.Dropout(dropout_rate))
self.out_layer = nn.Dense(units=out_units, flatten=False) |
def floordiv(dividend, divisor, rounding_mode='trunc'):
if _torch_version_div_indexing:
return torch.div(dividend, divisor, rounding_mode=rounding_mode)
else:
return (dividend // divisor) |
class CamembertForMaskedLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class CategoricalParametricDistribution(ParametricDistribution):
def __init__(self, num_actions: int):
postprocessor = IdentityBijector()
super().__init__(param_size=num_actions, postprocessor=postprocessor, event_ndims=0)
def create_dist(self, parameters: chex.Array) -> CategoricalDistribution:
return CategoricalDistribution(logits=parameters) |
class DeviceOptions(Enum):
AUTO = auto()
CPU = auto()
GPU = auto()
XPU = auto()
HPU = auto()
CUDA = auto() |
_task('sentence_ranking')
class SentenceRankingTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', metavar='FILE', help='file prefix for data')
parser.add_argument('--num-classes', type=int, help='number of sentences to be ranked')
parser.add_argument('--init-token', type=int, help='add token at the beginning of each batch item')
parser.add_argument('--separator-token', type=int, help='add separator token between inputs')
parser.add_argument('--no-shuffle', action='store_true')
parser.add_argument('--shorten-method', default='none', choices=['none', 'truncate', 'random_crop'], help='if not none, shorten sequences that exceed --tokens-per-sample')
parser.add_argument('--shorten-data-split-list', default='', help='comma-separated list of dataset splits to apply shortening to, e.g., "train,valid" (default: all dataset splits)')
parser.add_argument('--max-option-length', type=int, help='max length for each option')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
def load_dictionary(cls, args, filename, source=True):
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
def setup_task(cls, args, **kwargs):
assert (args.criterion == 'sentence_ranking'), 'Must set --criterion=sentence_ranking'
data_dict = cls.load_dictionary(args, os.path.join(args.data, 'input0', 'dict.txt'), source=True)
logger.info('[input] dictionary: {} types'.format(len(data_dict)))
return SentenceRankingTask(args, data_dict)
def load_dataset(self, split, combine=False, **kwargs):
def get_path(type, split):
return os.path.join(self.args.data, type, split)
def make_dataset(type, dictionary):
split_path = get_path(type, split)
dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine)
return dataset
input0 = make_dataset('input0', self.source_dictionary)
input_options = [make_dataset('input{idx}'.format(idx=(idx + 1)), self.source_dictionary) for idx in range(self.args.num_classes)]
if (self.args.separator_token is not None):
input0 = PrependTokenDataset(input0, self.args.separator_token)
src_tokens = []
for input_option in input_options:
if (self.args.init_token is not None):
input_option = PrependTokenDataset(input_option, self.args.init_token)
if (self.args.max_option_length is not None):
input_option = TruncateDataset(input_option, self.args.max_option_length)
src_token = ConcatSentencesDataset(input_option, input0)
src_token = maybe_shorten_dataset(src_token, split, self.args.shorten_data_split_list, self.args.shorten_method, self.args.max_positions, self.args.seed)
src_tokens.append(src_token)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_tokens[0]))
dataset = {'id': IdDataset(), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_tokens[0], reduce=True)}
for src_token_idx in range(len(src_tokens)):
dataset.update({'net_input{idx}'.format(idx=(src_token_idx + 1)): {'src_tokens': RightPadDataset(src_tokens[src_token_idx], pad_idx=self.source_dictionary.pad()), 'src_lengths': NumelDataset(src_tokens[src_token_idx], reduce=False)}})
label_path = '{}.label'.format(get_path('label', split))
if os.path.exists(label_path):
with open(label_path) as h:
dataset.update(target=RawLabelDataset([int(x.strip()) for x in h.readlines()]))
nested_dataset = NestedDictionaryDataset(dataset, sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])])
if self.args.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(nested_dataset, sort_order=[shuffle])
logger.info('Loaded {0} with #samples: {1}'.format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(getattr(args, 'ranking_head_name', 'sentence_classification_head'), num_classes=1)
return model
def max_positions(self):
return self.args.max_positions
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary |
_task('sentence_ranking')
class SentenceRankingTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('data', metavar='FILE', help='file prefix for data')
parser.add_argument('--num-classes', type=int, help='number of sentences to be ranked')
parser.add_argument('--init-token', type=int, help='add token at the beginning of each batch item')
parser.add_argument('--separator-token', type=int, help='add separator token between inputs')
parser.add_argument('--no-shuffle', action='store_true')
parser.add_argument('--shorten-method', default='none', choices=['none', 'truncate', 'random_crop'], help='if not none, shorten sequences that exceed --tokens-per-sample')
parser.add_argument('--shorten-data-split-list', default='', help='comma-separated list of dataset splits to apply shortening to, e.g., "train,valid" (default: all dataset splits)')
parser.add_argument('--max-option-length', type=int, help='max length for each option')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
def load_dictionary(cls, args, filename, source=True):
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
def setup_task(cls, args, **kwargs):
assert (args.criterion == 'sentence_ranking'), 'Must set --criterion=sentence_ranking'
data_dict = cls.load_dictionary(args, os.path.join(args.data, 'input0', 'dict.txt'), source=True)
logger.info('[input] dictionary: {} types'.format(len(data_dict)))
return SentenceRankingTask(args, data_dict)
def load_dataset(self, split, combine=False, **kwargs):
def get_path(type, split):
return os.path.join(self.args.data, type, split)
def make_dataset(type, dictionary):
split_path = get_path(type, split)
dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine)
return dataset
input0 = make_dataset('input0', self.source_dictionary)
input_options = [make_dataset('input{idx}'.format(idx=(idx + 1)), self.source_dictionary) for idx in range(self.args.num_classes)]
if (self.args.separator_token is not None):
input0 = PrependTokenDataset(input0, self.args.separator_token)
src_tokens = []
for input_option in input_options:
if (self.args.init_token is not None):
input_option = PrependTokenDataset(input_option, self.args.init_token)
if (self.args.max_option_length is not None):
input_option = TruncateDataset(input_option, self.args.max_option_length)
src_token = ConcatSentencesDataset(input_option, input0)
src_token = maybe_shorten_dataset(src_token, split, self.args.shorten_data_split_list, self.args.shorten_method, self.args.max_positions, self.args.seed)
src_tokens.append(src_token)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_tokens[0]))
dataset = {'id': IdDataset(), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_tokens[0], reduce=True)}
for src_token_idx in range(len(src_tokens)):
dataset.update({'net_input{idx}'.format(idx=(src_token_idx + 1)): {'src_tokens': RightPadDataset(src_tokens[src_token_idx], pad_idx=self.source_dictionary.pad()), 'src_lengths': NumelDataset(src_tokens[src_token_idx], reduce=False)}})
label_path = '{}.label'.format(get_path('label', split))
if os.path.exists(label_path):
with open(label_path) as h:
dataset.update(target=RawLabelDataset([int(x.strip()) for x in h.readlines()]))
nested_dataset = NestedDictionaryDataset(dataset, sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])])
if self.args.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(nested_dataset, sort_order=[shuffle])
logger.info('Loaded {0} with #samples: {1}'.format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(getattr(args, 'ranking_head_name', 'sentence_classification_head'), num_classes=1)
return model
def max_positions(self):
return self.args.max_positions
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary |
class BlockDiagMat():
def __init__(self, A, B):
(self.A, self.B) = (A, B)
def shape(self):
mats = [self.A, self.B]
return (sum([m.shape[0] for m in mats]), sum([m.shape[1] for m in mats]))
def sqrt_dims(self):
mats = [self.A, self.B]
return sum([m.sqrt_dims for m in mats])
def _get_rhs_slices(self, X):
X1 = tf.slice(X, begin=tf.zeros((2,), tf.int32), size=tf.stack([self.A.shape[1], (- 1)]))
X2 = tf.slice(X, begin=tf.stack([self.A.shape[1], 0]), size=(- tf.ones((2,), tf.int32)))
return (X1, X2)
def get(self):
tl_shape = tf.stack([self.A.shape[0], self.B.shape[1]])
br_shape = tf.stack([self.B.shape[0], self.A.shape[1]])
top = tf.concat([self.A.get(), tf.zeros(tl_shape, float_type)], axis=1)
bottom = tf.concat([tf.zeros(br_shape, float_type), self.B.get()], axis=1)
return tf.concat([top, bottom], axis=0)
def logdet(self):
return (self.A.logdet() + self.B.logdet())
def matmul(self, X):
(X1, X2) = self._get_rhs_slices(X)
top = self.A.matmul(X1)
bottom = self.B.matmul(X2)
return tf.concat([top, bottom], axis=0)
def solve(self, X):
(X1, X2) = self._get_rhs_slices(X)
top = self.A.solve(X1)
bottom = self.B.solve(X2)
return tf.concat([top, bottom], axis=0)
def inv(self):
return BlockDiagMat(self.A.inv(), self.B.inv())
def trace_KiX(self, X):
(X1, X2) = (tf.slice(X, [0, 0], self.A.shape), tf.slice(X, self.A.shape, [(- 1), (- 1)]))
top = self.A.trace_KiX(X1)
bottom = self.B.trace_KiX(X2)
return (top + bottom)
def get_diag(self):
return tf.concat([self.A.get_diag(), self.B.get_diag()], axis=0)
def inv_diag(self):
return tf.concat([self.A.inv_diag(), self.B.inv_diag()], axis=0)
def matmul_sqrt(self, X):
(X1, X2) = self._get_rhs_slices(X)
top = self.A.matmul_sqrt(X1)
bottom = self.B.matmul_sqrt(X2)
return tf.concat([top, bottom], axis=0)
def matmul_sqrt_transpose(self, X):
X1 = tf.slice(X, begin=tf.zeros((2,), tf.int32), size=tf.stack([self.A.sqrt_dims, (- 1)]))
X2 = tf.slice(X, begin=tf.stack([self.A.sqrt_dims, 0]), size=(- tf.ones((2,), tf.int32)))
top = self.A.matmul_sqrt_transpose(X1)
bottom = self.B.matmul_sqrt_transpose(X2)
return tf.concat([top, bottom], axis=0) |
class Brightness(object):
def __call__(self, x, magnitude):
return ImageEnhance.Brightness(x).enhance((1 + (magnitude * random.choice([(- 1), 1])))) |
def test_build_global_dist(monkeypatch, tmpdir):
monkeypatch.chdir(MAIN_DIR)
monkeypatch.setenv('PYBIND11_GLOBAL_SDIST', '1')
subprocess.run([sys.executable, '-m', 'build', '--sdist', '--outdir', str(tmpdir)], check=True)
(sdist,) = tmpdir.visit('*.tar.gz')
with tarfile.open(str(sdist), 'r:gz') as tar:
start = (tar.getnames()[0] + '/')
version = start[16:(- 1)]
simpler = {n.split('/', 1)[(- 1)] for n in tar.getnames()[1:]}
setup_py = read_tz_file(tar, 'setup.py')
pyproject_toml = read_tz_file(tar, 'pyproject.toml')
pkgconfig = read_tz_file(tar, 'pybind11/share/pkgconfig/pybind11.pc')
cmake_cfg = read_tz_file(tar, 'pybind11/share/cmake/pybind11/pybind11Config.cmake')
assert ('set(pybind11_INCLUDE_DIR "${PACKAGE_PREFIX_DIR}/include")' in cmake_cfg.decode('utf-8'))
files = {f'pybind11/{n}' for n in all_files}
files |= sdist_files
files |= {f'pybind11_global{n}' for n in local_sdist_files}
assert (simpler == files)
with open(os.path.join(MAIN_DIR, 'tools', 'setup_global.py.in'), 'rb') as f:
contents = string.Template(f.read().decode()).substitute(version=version, extra_cmd='').encode('utf-8')
assert (setup_py == contents)
with open(os.path.join(MAIN_DIR, 'tools', 'pyproject.toml'), 'rb') as f:
contents = f.read()
assert (pyproject_toml == contents)
simple_version = '.'.join(version.split('.')[:3])
pkgconfig_expected = PKGCONFIG.format(VERSION=simple_version).encode('utf-8')
assert (normalize_line_endings(pkgconfig) == pkgconfig_expected) |
def download_dataset_qm9(datadir, dataname, splits=None, calculate_thermo=True, exclude=True, cleanup=True):
gdb9dir = join(*[datadir, dataname])
os.makedirs(gdb9dir, exist_ok=True)
logging.info('Downloading and processing GDB9 dataset. Output will be in directory: {}.'.format(gdb9dir))
logging.info('Beginning download of GDB9 dataset!')
gdb9_url_data = '
gdb9_tar_data = join(gdb9dir, 'dsgdb9nsd.xyz.tar.bz2')
urllib.request.urlretrieve(gdb9_url_data, filename=gdb9_tar_data)
logging.info('GDB9 dataset downloaded successfully!')
if (splits is None):
splits = gen_splits_gdb9(gdb9dir, cleanup)
gdb9_data = {}
for (split, split_idx) in splits.items():
gdb9_data[split] = process_xyz_files(gdb9_tar_data, process_xyz_gdb9, file_idx_list=split_idx, stack=True)
if calculate_thermo:
therm_energy = get_thermo_dict(gdb9dir, cleanup)
for (split_idx, split_data) in gdb9_data.items():
gdb9_data[split_idx] = add_thermo_targets(split_data, therm_energy)
logging.info('Saving processed data:')
for (split, data) in gdb9_data.items():
savedir = join(gdb9dir, (split + '.npz'))
np.savez_compressed(savedir, **data)
logging.info('Processing/saving complete!') |
class P2SROManagerStub(object):
def __init__(self, channel):
self.CheckNumPlayers = channel.unary_unary('/P2SROManager/CheckNumPlayers', request_serializer=p2sro__manager__pb2.NumPlayers.SerializeToString, response_deserializer=p2sro__manager__pb2.Confirmation.FromString)
self.GetManagerMetaData = channel.unary_unary('/P2SROManager/GetManagerMetaData', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=p2sro__manager__pb2.Metadata.FromString)
self.GetLogDir = channel.unary_unary('/P2SROManager/GetLogDir', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=p2sro__manager__pb2.String.FromString)
self.ClaimNewActivePolicyForPlayer = channel.unary_unary('/P2SROManager/ClaimNewActivePolicyForPlayer', request_serializer=p2sro__manager__pb2.NewActivePolicyRequest.SerializeToString, response_deserializer=p2sro__manager__pb2.PolicySpecJson.FromString)
self.SubmitNewActivePolicyMetadata = channel.unary_unary('/P2SROManager/SubmitNewActivePolicyMetadata', request_serializer=p2sro__manager__pb2.PolicyMetadataRequest.SerializeToString, response_deserializer=p2sro__manager__pb2.PolicySpecJson.FromString)
self.CanActivePolicyBeSetAsFixedNow = channel.unary_unary('/P2SROManager/CanActivePolicyBeSetAsFixedNow', request_serializer=p2sro__manager__pb2.PlayerAndPolicyNum.SerializeToString, response_deserializer=p2sro__manager__pb2.Confirmation.FromString)
self.IsPolicyFixed = channel.unary_unary('/P2SROManager/IsPolicyFixed', request_serializer=p2sro__manager__pb2.PlayerAndPolicyNum.SerializeToString, response_deserializer=p2sro__manager__pb2.Confirmation.FromString)
self.SetActivePolicyAsFixed = channel.unary_unary('/P2SROManager/SetActivePolicyAsFixed', request_serializer=p2sro__manager__pb2.PolicyMetadataRequest.SerializeToString, response_deserializer=p2sro__manager__pb2.PolicySpecJson.FromString)
self.GetCopyOfLatestData = channel.unary_unary('/P2SROManager/GetCopyOfLatestData', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=p2sro__manager__pb2.P2SROStatusResponse.FromString)
self.SubmitEmpiricalPayoffResult = channel.unary_unary('/P2SROManager/SubmitEmpiricalPayoffResult', request_serializer=p2sro__manager__pb2.PayoffResult.SerializeToString, response_deserializer=p2sro__manager__pb2.Confirmation.FromString)
self.RequestExternalEval = channel.unary_unary('/P2SROManager/RequestExternalEval', request_serializer=p2sro__manager__pb2.EvalRequest.SerializeToString, response_deserializer=p2sro__manager__pb2.Confirmation.FromString) |
def is_tqdm_exists(callbacks):
for callback in callbacks:
if isinstance(callback, TqdmCallback):
return True
return False |
def optimizer_creator(model, config):
return optim.SGD(model.fc.parameters(), lr=config['lr'], momentum=config['momentum']) |
class refNMTModel(nn.Module):
def __init__(self, enc_embedding, dec_embedding, encoder_src, encoder_ref, decoder_ref, decoder, generator, fields):
super(refNMTModel, self).__init__()
self.enc_embedding = enc_embedding
self.dec_embedding = dec_embedding
self.encoder_src = encoder_src
self.encoder_ref = encoder_ref
self.decoder_ref = decoder_ref
self.decoder = decoder
self.generator = generator
self.fields = fields
def forward(self, src_inputs, tgt_inputs, ref_src_inputs, ref_tgt_inputs, src_lengths, ref_src_lengths, ref_tgt_lengths):
(ref_values, enc_hidden, ref_keys, ref_mask, src_context, src_mask) = self.encode(src_inputs, ref_src_inputs, ref_tgt_inputs, src_lengths, ref_src_lengths, ref_tgt_lengths, None)
dec_init_hidden = self.init_decoder_state(enc_hidden, ref_values)
(dec_outputs, dec_hiddens, attn) = self.decode(tgt_inputs, ref_keys, ref_values, dec_init_hidden, ref_mask, src_context, src_mask)
return (dec_outputs, attn, ref_keys)
def encode(self, src_inputs, ref_src_inputs, ref_tgt_inputs, src_lengths, ref_src_lengths, ref_tgt_lengths, hidden=None):
emb_src = self.enc_embedding(src_inputs)
embs_ref_src = [self.enc_embedding(ref_src_input) for ref_src_input in ref_src_inputs]
embs_ref_tgt = [self.dec_embedding(ref_tgt_input) for ref_tgt_input in ref_tgt_inputs]
(ref_values, ref_keys, ref_mask) = ([], [], [])
for (emb_ref_src, emb_ref_tgt, ref_src_length, ref_tgt_length) in zip(embs_ref_src, embs_ref_tgt, ref_src_lengths, ref_tgt_lengths):
(ref_src_context, enc_ref_hidden) = self.encoder_src(emb_ref_src, ref_src_length, None)
ref_src_mask = sequence_mask(ref_src_length)
(ref_key, _, _) = self.decoder_ref(emb_ref_tgt, ref_src_context, enc_ref_hidden, ref_src_mask)
(ref_value, _) = self.encoder_ref(emb_ref_tgt, ref_tgt_length, None)
ref_msk = sequence_mask([(x - 1) for x in ref_tgt_length])
ref_values.append(ref_value[1:])
ref_keys.append(ref_key[:(- 1)])
ref_mask.append(ref_msk)
ref_values = torch.cat(ref_values, 0)
ref_keys = torch.cat(ref_keys, 0)
ref_mask = torch.cat(ref_mask, 1)
(src_context, enc_hidden) = self.encoder_src(emb_src, src_lengths, None)
src_mask = sequence_mask(src_lengths)
return (ref_values, enc_hidden, ref_keys, ref_mask, src_context, src_mask)
def init_decoder_state(self, enc_hidden, context):
return enc_hidden
def decode(self, input, context_key, context_value, state, context_mask, src_context, src_mask):
emb = self.dec_embedding(input)
(dec_outputs, dec_hiddens, attn) = self.decoder(emb, context_key, context_value, state, context_mask, src_context, src_mask)
return (dec_outputs, dec_hiddens, attn)
def save_checkpoint(self, epoch, opt, filename):
torch.save({'encoder_src_dict': self.encoder_src.state_dict(), 'encoder_ref_dict': self.encoder_ref.state_dict(), 'decoder_ref_dict': self.decoder_ref.state_dict(), 'decoder_dict': self.decoder.state_dict(), 'enc_embedding_dict': self.enc_embedding.state_dict(), 'dec_embedding_dict': self.dec_embedding.state_dict(), 'generator_dict': self.generator.state_dict(), 'opt': opt, 'epoch': epoch}, filename)
def load_checkpoint(self, filename):
ckpt = torch.load(filename)
self.enc_embedding.load_state_dict(ckpt['enc_embedding_dict'])
self.dec_embedding.load_state_dict(ckpt['dec_embedding_dict'])
self.encoder_src.load_state_dict(ckpt['encoder_src_dict'])
self.encoder_ref.load_state_dict(ckpt['encoder_ref_dict'])
self.decoder.load_state_dict(ckpt['decoder_dict'])
self.decoder_ref.load_state_dict(ckpt['decoder_ref_dict'])
self.generator.load_state_dict(ckpt['generator_dict'])
epoch = ckpt['epoch']
return epoch |
_config
def ilgsn_side_frozen():
cfg = {}
cfg['learner'] = {'model_kwargs': {'base_kwargs': {'perception_unit_kwargs': {'extra_kwargs': {'side_kwargs': {'eval_only': True}}}}}} |
def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch):
tokens = torch.LongTensor(list(range(epoch_size))).view(1, (- 1))
tokens_ds = data.TokenBlockDataset(tokens, sizes=[tokens.size((- 1))], block_size=1, pad=0, eos=1, include_targets=False)
trainer = mock_trainer(epoch, num_updates, iterations_in_epoch)
dataset = data.LanguagePairDataset(tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False)
epoch_itr = data.EpochBatchIterator(dataset=dataset, collate_fn=dataset.collater, batch_sampler=[[i] for i in range(epoch_size)])
return (trainer, epoch_itr) |
def star_function(summary_pdf, name_string, abundances, cube, elements_to_trace, gas_reservoir, number_of_models_overplotted):
stars_at_end = 28.0
std = 2.0
dt = (cube['time'][1] - cube['time'][0])
probability = np.log(float(gaussian(cube['stars'][(- 1)], stars_at_end, std)))
if (number_of_models_overplotted > 1):
if os.path.isfile('output/comparison/sfr.npy'):
old = np.load('output/comparison/sfr.npy')
old = list(old)
else:
old = []
old.append(np.array((cube['sfr'] * (1.0 / dt))))
np.save('output/comparison/sfr', old)
if os.path.isfile('output/comparison/infall.npy'):
old1 = np.load('output/comparison/infall.npy')
old1 = list(old1)
else:
old1 = []
old1.append(np.array((cube['infall'] * (1.0 / dt))))
np.save('output/comparison/infall', old1)
if os.path.isfile('output/comparison/gas_mass.npy'):
old2 = np.load('output/comparison/gas_mass.npy')
old2 = list(old2)
else:
old2 = []
old2.append(np.array(cube['gas']))
np.save('output/comparison/gas_mass', old2)
if os.path.isfile('output/comparison/star_mass.npy'):
old3 = np.load('output/comparison/star_mass.npy')
old3 = list(old3)
else:
old3 = []
old3.append(np.array(cube['stars']))
np.save('output/comparison/star_mass', old3)
if os.path.isfile('output/comparison/remnant_mass.npy'):
old4 = np.load('output/comparison/remnant_mass.npy')
old4 = list(old4)
else:
old4 = []
old4.append(np.array(cube['mass_in_remnants']))
np.save('output/comparison/remnant_mass', old4)
if os.path.isfile('output/comparison/corona_mass.npy'):
old5 = np.load('output/comparison/corona_mass.npy')
old5 = list(old5)
else:
old5 = []
old5.append(np.array(gas_reservoir['gas']))
np.save('output/comparison/corona_mass', old5)
if os.path.isfile('output/comparison/feedback_mass.npy'):
old6 = np.load('output/comparison/feedback_mass.npy')
old6 = list(old6)
else:
old6 = []
old6.append(np.array((cube['feedback'] * (1.0 / dt))))
np.save('output/comparison/feedback_mass', old6)
if summary_pdf:
plt.clf()
fig = plt.figure(figsize=(11.69, 8.27), dpi=100)
ax = fig.add_subplot(111)
plt.plot(cube['time'], cube['gas'], label=('gas %.2f' % cube['gas'][(- 1)]), color='b')
plt.plot(cube['time'], cube['stars'], label=('stars (only thin disc including remnants) %.2f' % cube['stars'][(- 1)]), color='r')
plt.plot(cube['time'], cube['mass_in_remnants'], label=('remnants %.2f' % cube['mass_in_remnants'][(- 1)]), color='k')
plt.plot(cube['time'], gas_reservoir['gas'], label=('corona %.2f' % gas_reservoir['gas'][(- 1)]), color='y')
if (number_of_models_overplotted > 1):
for item in old2:
plt.plot(cube['time'], np.array(item), linestyle='-', color='b', alpha=0.2)
for item in old3:
plt.plot(cube['time'], np.array(item), linestyle='-', color='r', alpha=0.2)
for item in old4:
plt.plot(cube['time'], np.array(item), linestyle='-', color='g', alpha=0.2)
for item in old5:
plt.plot(cube['time'], np.array(item), linestyle='-', color='y', alpha=0.2)
plt.grid('on')
plt.yscale('log')
plt.ylabel('M$_\\odot$')
plt.xlabel('time in Gyr')
plt.title(('ln(probability) star content (normed to pmax) = %.2f' % probability))
plt.legend(loc='best', numpoints=1).get_frame().set_alpha(0.5)
plt.savefig(('stars_%s.png' % name_string))
plt.clf()
fig = plt.figure(figsize=(11.69, 8.27), dpi=100)
ax = fig.add_subplot(111)
plt.plot(cube['time'], (cube['infall'] * (1.0 / dt)), linestyle='-', color='r', label=('infall %.2f' % sum(cube['infall'])))
plt.plot(cube['time'], (cube['sfr'] * (1.0 / dt)), linestyle='-', color='b', label=('sfr %.2f' % sum(cube['sfr'])))
if (number_of_models_overplotted > 1):
for item in old:
plt.plot(cube['time'], np.array(item), linestyle='-', color='b', alpha=0.2)
for item in old1:
plt.plot(cube['time'], np.array(item), linestyle='-', color='r', alpha=0.2)
plt.grid('on')
plt.ylabel('M$_\\odot$Gyr$^{-1}$')
plt.xlabel('time in Gyr')
plt.title(('ln(probability) star content (normed to pmax) = %.2f' % probability))
plt.legend(loc='best', numpoints=1).get_frame().set_alpha(0.5)
plt.savefig(('infall_%s.png' % name_string))
return [0.0] |
def produce_combinations(array):
arr_len = len(array)
for i in range(arr_len):
combination = (array[0:i] + array[(i + 1):arr_len])
(yield combination) |
def exponential_decay(step, rate, decay_steps, start_step=0):
return (rate ** (max(((step - start_step) + decay_steps), 0) // decay_steps)) |
class LinesMask(Mask):
def __init__(self, config):
super().__init__(config)
mask_file = config.get('filename')
if (mask_file is None):
raise MaskError("Missing argument 'filename' required by LinesMask")
try:
mask = Table.read(mask_file, names=('type', 'wave_min', 'wave_max', 'frame'), format='ascii')
mask['log_wave_min'] = np.log10(mask['wave_min'])
mask['log_wave_max'] = np.log10(mask['wave_max'])
select_rest_frame_mask = (mask['frame'] == 'RF')
select_obs_mask = (mask['frame'] == 'OBS')
self.mask_rest_frame = mask[select_rest_frame_mask]
self.mask_obs_frame = mask[select_obs_mask]
except (OSError, ValueError) as error:
raise MaskError(f'Error loading SkyMask. Unable to read mask file. File {mask_file}') from error
def apply_mask(self, forest):
w = np.ones(forest.log_lambda.size, dtype=bool)
mask_idx_ranges = np.searchsorted(forest.log_lambda, [self.mask_obs_frame['log_wave_min'], self.mask_obs_frame['log_wave_max']]).T
for (idx1, idx2) in mask_idx_ranges:
w[idx1:idx2] = 0
log_lambda_rest_frame = (forest.log_lambda - np.log10((1.0 + forest.z)))
mask_idx_ranges = np.searchsorted(log_lambda_rest_frame, [self.mask_rest_frame['log_wave_min'], self.mask_rest_frame['log_wave_max']]).T
for (idx1, idx2) in mask_idx_ranges:
w[idx1:idx2] = 0
for param in Forest.mask_fields:
self._masker(forest, param, w) |
_config
def srl_features():
uuid = 'habitat_alexnet_feature'
cfg = {}
cfg['learner'] = {'perception_network': 'BaseModelAutoEncoder', 'perception_network_kwargs': {'n_map_channels': 1, 'use_target': False}}
cfg['env'] = {'env_specific_kwargs': {'target_dim': 6}, 'transform_fn_pre_aggregation': "\n TransformFactory.independent(\n {\n 'taskonomy': rescale_centercrop_resize((3,224,224)),\n },\n keep_unnamed=True)\n ".translate(remove_whitespace), 'transform_fn_post_aggregation': "\n TransformFactory.independent(\n {{\n 'taskonomy':srl_features_transform('{load_path}'),\n 'map':identity_transform(),\n 'global_pos':identity_transform(),\n }},\n keep_unnamed=False)\n ".translate(remove_whitespace).format(load_path='/mnt/share/midlevel_control/baselines/srl_models/HabitatPlanning/forward_inverse/srl_model.pth')} |
class _ProjectorHeadBase(nn.Module):
def __init__(self, *, input_dim: int, output_dim: int, head_type: str, normalize: bool, pool_name='adaptive_avg', spatial_size=(1, 1)):
super().__init__()
self._input_dim = input_dim
self._output_dim = output_dim
assert _check_head_type(head_type=head_type)
self._head_type = head_type
self._normalize = normalize
assert _check_pool_name(pool_name=pool_name)
self._pool_name = pool_name
self._spatial_size = _pair(spatial_size)
self._pooling_module = get_pool_component(self._pool_name, self._spatial_size)
def _record_message(self):
return f"Initializing {self.__class__.__name__} with {self._head_type} dense head ({self._input_dim}:{self._output_dim}), {(' normalization ' if self._normalize else '')}{(f'{self._pool_name} with {self._spatial_size}' if ('adaptive' in self._pool_name) else '')} " |
def _get_detector_cfg(fname):
config = _get_config_module(fname)
config.model.class_list = None
model = copy.deepcopy(config.model)
return model |
def get_index(num_domain=2):
index = []
for i in range(num_domain):
for j in range((i + 1), (num_domain + 1)):
index.append((i, j))
return index |
def test_transformer_decoder(num_layers=2, embed_dims=8, num_heads=2, feedforward_channels=8, num_key=10, num_query=5, batch_size=1):
module = TransformerDecoder(num_layers, embed_dims, num_heads, feedforward_channels)
query = torch.rand(num_query, batch_size, embed_dims)
memory = torch.rand(num_key, batch_size, embed_dims)
out = module(query, memory)
assert (out.shape == (1, num_query, batch_size, embed_dims))
query_pos = torch.rand(num_query, batch_size, embed_dims)
out = module(query, memory, query_pos=query_pos)
assert (out.shape == (1, num_query, batch_size, embed_dims))
memory_pos = torch.rand(num_key, batch_size, embed_dims)
out = module(query, memory, memory_pos, query_pos)
assert (out.shape == (1, num_query, batch_size, embed_dims))
memory_key_padding_mask = (torch.rand(batch_size, num_key) > 0.5)
out = module(query, memory, memory_pos, query_pos, memory_key_padding_mask=memory_key_padding_mask)
assert (out.shape == (1, num_query, batch_size, embed_dims))
target_key_padding_mask = (torch.rand(batch_size, num_query) > 0.5)
out = module(query, memory, memory_pos, query_pos, memory_key_padding_mask=memory_key_padding_mask, target_key_padding_mask=target_key_padding_mask)
assert (out.shape == (1, num_query, batch_size, embed_dims))
memory_attn_mask = (torch.rand(num_query, num_key) > 0.5)
out = module(query, memory, memory_pos, query_pos, memory_attn_mask, None, memory_key_padding_mask, target_key_padding_mask)
assert (out.shape == (1, num_query, batch_size, embed_dims))
target_attn_mask = (torch.rand(num_query, num_query) > 0.5)
out = module(query, memory, memory_pos, query_pos, memory_attn_mask, target_attn_mask, memory_key_padding_mask, target_key_padding_mask)
assert (out.shape == (1, num_query, batch_size, embed_dims))
order = ('norm', 'selfattn', 'norm', 'multiheadattn', 'norm', 'ffn')
module = TransformerDecoder(num_layers, embed_dims, num_heads, feedforward_channels, order=order)
out = module(query, memory, memory_pos, query_pos, memory_attn_mask, target_attn_mask, memory_key_padding_mask, target_key_padding_mask)
assert (out.shape == (1, num_query, batch_size, embed_dims))
module = TransformerDecoder(num_layers, embed_dims, num_heads, feedforward_channels, order=order, return_intermediate=True)
out = module(query, memory, memory_pos, query_pos, memory_attn_mask, target_attn_mask, memory_key_padding_mask, target_key_padding_mask)
assert (out.shape == (num_layers, num_query, batch_size, embed_dims)) |
def librosa_exists():
try:
__import__('librosa')
except ImportError:
return False
else:
return True |
class TestEclipseRetrieval(unittest.TestCase):
def test_hd209458b(self):
def wfc3():
wavelengths = (1e-06 * np.array([1.1279, 1.1467, 1.1655, 1.1843, 1.2031, 1.2218, 1.2406, 1.2594, 1.2782, 1.2969, 1.3157, 1.3345, 1.3533, 1.3721, 1.3908, 1.4096, 1.4284, 1.4472, 1.466, 1.4848, 1.5035, 1.5223, 1.5411, 1.5599, 1.5786, 1.5974, 1.6162, 1.635]))
wavelength_bins = [[(w - 9.5e-09), (w + 9.5e-09)] for w in wavelengths]
depths = (1e-06 * (96 + np.array([(- 96), (- 18), 28, (- 3), (- 7), (- 45), (- 32), 3, (- 16), 53, 31, 12, (- 21), (- 27), (- 40), 8, (- 2), (- 29), (- 64), 9, 132, 115, 47, 3, 39, (- 16), (- 64), 14])))
errors = (1e-06 * np.array([47, 50, 45, 44, 43, 50, 42, 42, 42, 41, 41, 41, 41, 41, 42, 42, 42, 42, 43, 43, 43, 43, 44, 45, 45, 46, 46, 74]))
errors = np.sqrt(((errors ** 2) + (3.9e-05 ** 2)))
return (np.array(wavelength_bins), depths, errors)
def spitzer():
wave_bins = []
depths = []
errors = []
wave_bins.append([3.2, 4.0])
wave_bins.append([4.0, 5.0])
wave_bins.append([5.1, 6.3])
wave_bins.append([6.6, 9.0])
wave_bins.append([13.5, 18.5])
wave_bins.append([20.8, 26.1])
depths = (np.array([0.1466, 0.1787, 0.31, 0.344, 0.391, 0.598]) * 0.01)
errors = (np.array([0.004, 0.0038, 0.034, 0.0036, 0.022, 0.038]) * 0.01)
return ((1e-06 * np.array(wave_bins)), depths, errors)
(wfc3_bins, wfc3_depths, wfc3_errors) = wfc3()
(spitzer_bins, spitzer_depths, spitzer_errors) = spitzer()
bins = np.concatenate([wfc3_bins, spitzer_bins])
depths = np.concatenate([wfc3_depths, spitzer_depths])
errors = np.concatenate([wfc3_errors, spitzer_errors])
R_guess = (1.13 * R_jup)
Rs = (0.75 * R_sun)
retriever = CombinedRetriever()
fit_info = retriever.get_default_fit_info(Rs=Rs, Mp=(1.13 * M_jup), Rp=R_guess, logZ=0, CO_ratio=0.53, log_cloudtop_P=np.inf, log_scatt_factor=0, scatt_slope=4, error_multiple=1, T_star=5052, T=1295, T0=1295, log_P1=2.4, alpha1=2, alpha2=2, log_P3=6, T3=1295, profile_type='parametric')
fit_info.add_gaussian_fit_param('Rs', (0.01 * R_sun))
fit_info.add_gaussian_fit_param('Mp', (0.01 * M_jup))
fit_info.add_uniform_fit_param('Rp', (0.9 * R_guess), (1.1 * R_guess))
fit_info.add_uniform_fit_param('logZ', (- 1), 3)
fit_info.add_uniform_fit_param('error_multiple', 0.5, 5)
fit_info.add_uniform_fit_param('T0', 1000, 3000)
fit_info.add_uniform_fit_param('log_P1', 1, 4)
fit_info.add_uniform_fit_param('alpha1', 0.1, 4)
fit_info.add_uniform_fit_param('alpha2', 0.1, 4)
fit_info.add_uniform_fit_param('log_P3', 5, 7)
fit_info.add_uniform_fit_param('T3', 1000, 3000)
result = retriever.run_multinest(None, None, None, bins, depths, errors, fit_info, plot_best=False, maxcall=200)
result = retriever.run_multinest(np.array([[1.1e-06, 1.6e-06]]), np.array([((R_guess ** 2) / (Rs ** 2))]), np.array([5e-05]), bins, depths, errors, fit_info, plot_best=False, maxcall=200) |
_pipeline_test
class TQAPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
_tensorflow_probability
_pandas
_tf
_torch
def test_small_model_tf(self):
model_id = 'lysandre/tiny-tapas-random-wtq'
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
self.assertIsInstance(model.config.aggregation_labels, dict)
self.assertIsInstance(model.config.no_aggregation_label_index, int)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(table={'actors': ['brad pitt', 'leonardo di caprio', 'george clooney'], 'age': ['56', '45', '59'], 'number of movies': ['87', '53', '69'], 'date of birth': ['7 february 1967', '10 june 1996', '28 november 1967']}, query='how many movies has george clooney played in?')
self.assertEqual(outputs, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'})
outputs = table_querier(table={'actors': ['brad pitt', 'leonardo di caprio', 'george clooney'], 'age': ['56', '45', '59'], 'number of movies': ['87', '53', '69'], 'date of birth': ['7 february 1967', '10 june 1996', '28 november 1967']}, query=['how many movies has george clooney played in?', 'how old is he?', "what's his date of birth?"])
self.assertEqual(outputs, [{'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}])
outputs = table_querier(table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']}, query=['What repository has the largest number of stars?', 'Given that the numbers of stars defines if a repository is active, what repository is the most active?', 'What is the number of repositories?', 'What is the average number of stars?', 'What is the total amount of stars?'])
self.assertEqual(outputs, [{'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}])
with self.assertRaises(ValueError):
table_querier(query='What does it do with empty context ?', table=None)
with self.assertRaises(ValueError):
table_querier(query='What does it do with empty context ?', table='')
with self.assertRaises(ValueError):
table_querier(query='What does it do with empty context ?', table={})
with self.assertRaises(ValueError):
table_querier(table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']})
with self.assertRaises(ValueError):
table_querier(query='', table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']})
with self.assertRaises(ValueError):
table_querier(query=None, table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']})
_torch
def test_small_model_pt(self):
model_id = 'lysandre/tiny-tapas-random-wtq'
model = AutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
self.assertIsInstance(model.config.aggregation_labels, dict)
self.assertIsInstance(model.config.no_aggregation_label_index, int)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(table={'actors': ['brad pitt', 'leonardo di caprio', 'george clooney'], 'age': ['56', '45', '59'], 'number of movies': ['87', '53', '69'], 'date of birth': ['7 february 1967', '10 june 1996', '28 november 1967']}, query='how many movies has george clooney played in?')
self.assertEqual(outputs, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'})
outputs = table_querier(table={'actors': ['brad pitt', 'leonardo di caprio', 'george clooney'], 'age': ['56', '45', '59'], 'number of movies': ['87', '53', '69'], 'date of birth': ['7 february 1967', '10 june 1996', '28 november 1967']}, query=['how many movies has george clooney played in?', 'how old is he?', "what's his date of birth?"])
self.assertEqual(outputs, [{'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}])
outputs = table_querier(table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']}, query=['What repository has the largest number of stars?', 'Given that the numbers of stars defines if a repository is active, what repository is the most active?', 'What is the number of repositories?', 'What is the average number of stars?', 'What is the total amount of stars?'])
self.assertEqual(outputs, [{'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}, {'answer': 'AVERAGE > ', 'coordinates': [], 'cells': [], 'aggregator': 'AVERAGE'}])
with self.assertRaises(ValueError):
table_querier(query='What does it do with empty context ?', table=None)
with self.assertRaises(ValueError):
table_querier(query='What does it do with empty context ?', table='')
with self.assertRaises(ValueError):
table_querier(query='What does it do with empty context ?', table={})
with self.assertRaises(ValueError):
table_querier(table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']})
with self.assertRaises(ValueError):
table_querier(query='', table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']})
with self.assertRaises(ValueError):
table_querier(query=None, table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']})
_torch
def test_slow_tokenizer_sqa_pt(self):
model_id = 'lysandre/tiny-tapas-random-sqa'
model = AutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
inputs = {'table': {'actors': ['brad pitt', 'leonardo di caprio', 'george clooney'], 'age': ['56', '45', '59'], 'number of movies': ['87', '53', '69'], 'date of birth': ['7 february 1967', '10 june 1996', '28 november 1967']}, 'query': ['how many movies has george clooney played in?', 'how old is he?', "what's his date of birth?"]}
sequential_outputs = table_querier(**inputs, sequential=True)
batch_outputs = table_querier(**inputs, sequential=False)
self.assertEqual(len(sequential_outputs), 3)
self.assertEqual(len(batch_outputs), 3)
self.assertEqual(sequential_outputs[0], batch_outputs[0])
self.assertNotEqual(sequential_outputs[1], batch_outputs[1])
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(table={'actors': ['brad pitt', 'leonardo di caprio', 'george clooney'], 'age': ['56', '45', '59'], 'number of movies': ['87', '53', '69'], 'date of birth': ['7 february 1967', '10 june 1996', '28 november 1967']}, query='how many movies has george clooney played in?')
self.assertEqual(outputs, {'answer': '7 february 1967', 'coordinates': [(0, 3)], 'cells': ['7 february 1967']})
outputs = table_querier(table={'actors': ['brad pitt', 'leonardo di caprio', 'george clooney'], 'age': ['56', '45', '59'], 'number of movies': ['87', '53', '69'], 'date of birth': ['7 february 1967', '10 june 1996', '28 november 1967']}, query=['how many movies has george clooney played in?', 'how old is he?', "what's his date of birth?"])
self.assertEqual(outputs, [{'answer': '7 february 1967', 'coordinates': [(0, 3)], 'cells': ['7 february 1967']}, {'answer': '7 february 1967', 'coordinates': [(0, 3)], 'cells': ['7 february 1967']}, {'answer': '7 february 1967', 'coordinates': [(0, 3)], 'cells': ['7 february 1967']}])
outputs = table_querier(table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']}, query=['What repository has the largest number of stars?', 'Given that the numbers of stars defines if a repository is active, what repository is the most active?', 'What is the number of repositories?', 'What is the average number of stars?', 'What is the total amount of stars?'])
self.assertEqual(outputs, [{'answer': 'Python, Python', 'coordinates': [(0, 3), (1, 3)], 'cells': ['Python', 'Python']}, {'answer': 'Python, Python', 'coordinates': [(0, 3), (1, 3)], 'cells': ['Python', 'Python']}, {'answer': 'Python, Python', 'coordinates': [(0, 3), (1, 3)], 'cells': ['Python', 'Python']}, {'answer': 'Python, Python', 'coordinates': [(0, 3), (1, 3)], 'cells': ['Python', 'Python']}, {'answer': 'Python, Python', 'coordinates': [(0, 3), (1, 3)], 'cells': ['Python', 'Python']}])
with self.assertRaises(ValueError):
table_querier(query='What does it do with empty context ?', table=None)
with self.assertRaises(ValueError):
table_querier(query='What does it do with empty context ?', table='')
with self.assertRaises(ValueError):
table_querier(query='What does it do with empty context ?', table={})
with self.assertRaises(ValueError):
table_querier(table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']})
with self.assertRaises(ValueError):
table_querier(query='', table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']})
with self.assertRaises(ValueError):
table_querier(query=None, table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']})
_tf
_tensorflow_probability
_pandas
_torch
def test_slow_tokenizer_sqa_tf(self):
model_id = 'lysandre/tiny-tapas-random-sqa'
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
inputs = {'table': {'actors': ['brad pitt', 'leonardo di caprio', 'george clooney'], 'age': ['56', '45', '59'], 'number of movies': ['87', '53', '69'], 'date of birth': ['7 february 1967', '10 june 1996', '28 november 1967']}, 'query': ['how many movies has george clooney played in?', 'how old is he?', "what's his date of birth?"]}
sequential_outputs = table_querier(**inputs, sequential=True)
batch_outputs = table_querier(**inputs, sequential=False)
self.assertEqual(len(sequential_outputs), 3)
self.assertEqual(len(batch_outputs), 3)
self.assertEqual(sequential_outputs[0], batch_outputs[0])
self.assertNotEqual(sequential_outputs[1], batch_outputs[1])
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(table={'actors': ['brad pitt', 'leonardo di caprio', 'george clooney'], 'age': ['56', '45', '59'], 'number of movies': ['87', '53', '69'], 'date of birth': ['7 february 1967', '10 june 1996', '28 november 1967']}, query='how many movies has george clooney played in?')
self.assertEqual(outputs, {'answer': '7 february 1967', 'coordinates': [(0, 3)], 'cells': ['7 february 1967']})
outputs = table_querier(table={'actors': ['brad pitt', 'leonardo di caprio', 'george clooney'], 'age': ['56', '45', '59'], 'number of movies': ['87', '53', '69'], 'date of birth': ['7 february 1967', '10 june 1996', '28 november 1967']}, query=['how many movies has george clooney played in?', 'how old is he?', "what's his date of birth?"])
self.assertEqual(outputs, [{'answer': '7 february 1967', 'coordinates': [(0, 3)], 'cells': ['7 february 1967']}, {'answer': '7 february 1967', 'coordinates': [(0, 3)], 'cells': ['7 february 1967']}, {'answer': '7 february 1967', 'coordinates': [(0, 3)], 'cells': ['7 february 1967']}])
outputs = table_querier(table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']}, query=['What repository has the largest number of stars?', 'Given that the numbers of stars defines if a repository is active, what repository is the most active?', 'What is the number of repositories?', 'What is the average number of stars?', 'What is the total amount of stars?'])
self.assertEqual(outputs, [{'answer': 'Python, Python', 'coordinates': [(0, 3), (1, 3)], 'cells': ['Python', 'Python']}, {'answer': 'Python, Python', 'coordinates': [(0, 3), (1, 3)], 'cells': ['Python', 'Python']}, {'answer': 'Python, Python', 'coordinates': [(0, 3), (1, 3)], 'cells': ['Python', 'Python']}, {'answer': 'Python, Python', 'coordinates': [(0, 3), (1, 3)], 'cells': ['Python', 'Python']}, {'answer': 'Python, Python', 'coordinates': [(0, 3), (1, 3)], 'cells': ['Python', 'Python']}])
with self.assertRaises(ValueError):
table_querier(query='What does it do with empty context ?', table=None)
with self.assertRaises(ValueError):
table_querier(query='What does it do with empty context ?', table='')
with self.assertRaises(ValueError):
table_querier(query='What does it do with empty context ?', table={})
with self.assertRaises(ValueError):
table_querier(table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']})
with self.assertRaises(ValueError):
table_querier(query='', table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']})
with self.assertRaises(ValueError):
table_querier(query=None, table={'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']})
_torch
def test_integration_wtq_pt(self):
table_querier = pipeline('table-question-answering')
data = {'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']}
queries = ['What repository has the largest number of stars?', 'Given that the numbers of stars defines if a repository is active, what repository is the most active?', 'What is the number of repositories?', 'What is the average number of stars?', 'What is the total amount of stars?']
results = table_querier(data, queries)
expected_results = [{'answer': 'Transformers', 'coordinates': [(0, 0)], 'cells': ['Transformers'], 'aggregator': 'NONE'}, {'answer': 'Transformers', 'coordinates': [(0, 0)], 'cells': ['Transformers'], 'aggregator': 'NONE'}, {'answer': 'COUNT > Transformers, Datasets, Tokenizers', 'coordinates': [(0, 0), (1, 0), (2, 0)], 'cells': ['Transformers', 'Datasets', 'Tokenizers'], 'aggregator': 'COUNT'}, {'answer': 'AVERAGE > 36542, 4512, 3934', 'coordinates': [(0, 1), (1, 1), (2, 1)], 'cells': ['36542', '4512', '3934'], 'aggregator': 'AVERAGE'}, {'answer': 'SUM > 36542, 4512, 3934', 'coordinates': [(0, 1), (1, 1), (2, 1)], 'cells': ['36542', '4512', '3934'], 'aggregator': 'SUM'}]
self.assertListEqual(results, expected_results)
_tensorflow_probability
_pandas
def test_integration_wtq_tf(self):
model_id = 'google/tapas-base-finetuned-wtq'
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = pipeline('table-question-answering', model=model, tokenizer=tokenizer)
data = {'Repository': ['Transformers', 'Datasets', 'Tokenizers'], 'Stars': ['36542', '4512', '3934'], 'Contributors': ['651', '77', '34'], 'Programming language': ['Python', 'Python', 'Rust, Python and NodeJS']}
queries = ['What repository has the largest number of stars?', 'Given that the numbers of stars defines if a repository is active, what repository is the most active?', 'What is the number of repositories?', 'What is the average number of stars?', 'What is the total amount of stars?']
results = table_querier(data, queries)
expected_results = [{'answer': 'Transformers', 'coordinates': [(0, 0)], 'cells': ['Transformers'], 'aggregator': 'NONE'}, {'answer': 'Transformers', 'coordinates': [(0, 0)], 'cells': ['Transformers'], 'aggregator': 'NONE'}, {'answer': 'COUNT > Transformers, Datasets, Tokenizers', 'coordinates': [(0, 0), (1, 0), (2, 0)], 'cells': ['Transformers', 'Datasets', 'Tokenizers'], 'aggregator': 'COUNT'}, {'answer': 'AVERAGE > 36542, 4512, 3934', 'coordinates': [(0, 1), (1, 1), (2, 1)], 'cells': ['36542', '4512', '3934'], 'aggregator': 'AVERAGE'}, {'answer': 'SUM > 36542, 4512, 3934', 'coordinates': [(0, 1), (1, 1), (2, 1)], 'cells': ['36542', '4512', '3934'], 'aggregator': 'SUM'}]
self.assertListEqual(results, expected_results)
_torch
def test_integration_sqa_pt(self):
table_querier = pipeline('table-question-answering', model='google/tapas-base-finetuned-sqa', tokenizer='google/tapas-base-finetuned-sqa')
data = {'Actors': ['Brad Pitt', 'Leonardo Di Caprio', 'George Clooney'], 'Age': ['56', '45', '59'], 'Number of movies': ['87', '53', '69'], 'Date of birth': ['7 february 1967', '10 june 1996', '28 november 1967']}
queries = ['How many movies has George Clooney played in?', 'How old is he?', "What's his date of birth?"]
results = table_querier(data, queries, sequential=True)
expected_results = [{'answer': '69', 'coordinates': [(2, 2)], 'cells': ['69']}, {'answer': '59', 'coordinates': [(2, 1)], 'cells': ['59']}, {'answer': '28 november 1967', 'coordinates': [(2, 3)], 'cells': ['28 november 1967']}]
self.assertListEqual(results, expected_results)
_tensorflow_probability
_pandas
def test_integration_sqa_tf(self):
model_id = 'google/tapas-base-finetuned-sqa'
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = pipeline('table-question-answering', model=model, tokenizer=tokenizer)
data = {'Actors': ['Brad Pitt', 'Leonardo Di Caprio', 'George Clooney'], 'Age': ['56', '45', '59'], 'Number of movies': ['87', '53', '69'], 'Date of birth': ['7 february 1967', '10 june 1996', '28 november 1967']}
queries = ['How many movies has George Clooney played in?', 'How old is he?', "What's his date of birth?"]
results = table_querier(data, queries, sequential=True)
expected_results = [{'answer': '69', 'coordinates': [(2, 2)], 'cells': ['69']}, {'answer': '59', 'coordinates': [(2, 1)], 'cells': ['59']}, {'answer': '28 november 1967', 'coordinates': [(2, 3)], 'cells': ['28 november 1967']}]
self.assertListEqual(results, expected_results)
_torch
def test_large_model_pt_tapex(self):
model_id = 'microsoft/tapex-large-finetuned-wtq'
table_querier = pipeline('table-question-answering', model=model_id)
data = {'Actors': ['Brad Pitt', 'Leonardo Di Caprio', 'George Clooney'], 'Age': ['56', '45', '59'], 'Number of movies': ['87', '53', '69'], 'Date of birth': ['7 february 1967', '10 june 1996', '28 november 1967']}
queries = ['How many movies has George Clooney played in?', 'How old is Mr Clooney ?', "What's the date of birth of Leonardo ?"]
results = table_querier(data, queries, sequential=True)
expected_results = [{'answer': ' 69'}, {'answer': ' 59'}, {'answer': ' 10 june 1996'}]
self.assertListEqual(results, expected_results) |
class MLPAlgorithm(NNFit):
algorithm_name = 'Neural Network'
algorithm_short_name = 'Neural Network'
def __init__(self, params):
super(MLPAlgorithm, self).__init__(params)
logger.debug('MLPAlgorithm.__init__')
self.max_iters = 1
self.library_version = sklearn.__version__
h1 = params.get('dense_1_size', 32)
h2 = params.get('dense_2_size', 16)
learning_rate = params.get('learning_rate', 0.05)
max_iter = 500
self.model = MLPClassifier(hidden_layer_sizes=(h1, h2), activation='relu', solver='adam', learning_rate=params.get('learning_rate_type', 'constant'), learning_rate_init=learning_rate, alpha=params.get('alpha', 0.0001), early_stopping=True, n_iter_no_change=50, max_iter=max_iter, random_state=params.get('seed', 123))
def get_metric_name(self):
return 'logloss' |
class VideoModelCoordLatentNL(nn.Module):
def __init__(self, opt):
super(VideoModelCoordLatentNL, self).__init__()
self.nr_boxes = opt.num_boxes
self.nr_actions = opt.num_classes
self.nr_frames = (opt.num_frames // 2)
self.img_feature_dim = opt.img_feature_dim
self.coord_feature_dim = opt.coord_feature_dim
self.category_embed_layer = nn.Embedding(3, (opt.coord_feature_dim // 2), padding_idx=0, scale_grad_by_freq=True)
self.coord_to_feature = nn.Sequential(nn.Linear(4, (self.coord_feature_dim // 2), bias=False), nn.BatchNorm1d((self.coord_feature_dim // 2)), nn.ReLU(inplace=True), nn.Linear((self.coord_feature_dim // 2), self.coord_feature_dim, bias=False), nn.BatchNorm1d(self.coord_feature_dim), nn.ReLU())
self.coord_category_fusion = nn.Sequential(nn.Linear((self.coord_feature_dim + (self.coord_feature_dim // 2)), self.coord_feature_dim, bias=False), nn.BatchNorm1d(self.coord_feature_dim), nn.ReLU(inplace=True))
self.spatial_node_fusion = nn.Sequential(nn.Linear((self.coord_feature_dim * 2), self.coord_feature_dim, bias=False), nn.BatchNorm1d(self.coord_feature_dim), nn.ReLU(inplace=True), nn.Linear(self.coord_feature_dim, self.coord_feature_dim, bias=False), nn.BatchNorm1d(self.coord_feature_dim), nn.ReLU())
self.nr_nonlocal_layers = 3
self.nonlocal_fusion = []
for i in range(self.nr_nonlocal_layers):
self.nonlocal_fusion.append(nn.Sequential(Nonlocal(dim=self.coord_feature_dim, dim_inner=(self.coord_feature_dim // 2)), nn.Conv1d(self.coord_feature_dim, self.coord_feature_dim, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm1d(self.coord_feature_dim), nn.ReLU()))
self.nonlocal_fusion = nn.ModuleList(self.nonlocal_fusion)
self.box_feature_fusion = nn.Sequential(nn.Linear((self.nr_frames * self.coord_feature_dim), self.coord_feature_dim, bias=False), nn.BatchNorm1d(self.coord_feature_dim), nn.ReLU(inplace=True), nn.Linear(self.coord_feature_dim, self.coord_feature_dim, bias=False), nn.BatchNorm1d(self.coord_feature_dim), nn.ReLU())
self.classifier = nn.Sequential(nn.Linear(self.coord_feature_dim, self.coord_feature_dim), nn.ReLU(inplace=True), nn.Linear(self.coord_feature_dim, 512), nn.ReLU(inplace=True), nn.Linear(512, self.nr_actions))
if opt.fine_tune:
self.fine_tune(opt.fine_tune)
def train(self, mode=True):
super(VideoModelCoordLatentNL, self).train(mode)
for m in self.modules():
if (isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm3d)):
m.eval()
m.weight.requires_grad = False
m.bias.requires_grad = False
def fine_tune(self, restore_path, parameters_to_train=['classifier']):
weights = torch.load(restore_path)['state_dict']
new_weights = {}
for (k, v) in weights.items():
if (not ('classifier.4' in k)):
new_weights[k.replace('module.', '')] = v
self.load_state_dict(new_weights, strict=False)
print('Num of weights in restore dict {}'.format(len(new_weights.keys())))
frozen_weights = 0
for (name, param) in self.named_parameters():
if (not ('classifier.4' in name)):
param.requires_grad = False
frozen_weights += 1
else:
print('Training : {}'.format(name))
print('Number of frozen weights {}'.format(frozen_weights))
assert (frozen_weights != 0), 'You are trying to fine tune, but no weights are frozen!!! Check the naming convention of the parameters'
def forward(self, global_img_input, box_categories, box_input, video_label, is_inference=False):
(b, _, _, _h, _w) = global_img_input.size()
box_input = box_input.transpose(2, 1).contiguous()
box_input = box_input.view(((b * self.nr_boxes) * self.nr_frames), 4)
box_categories = box_categories.long()
box_categories = box_categories.transpose(2, 1).contiguous()
box_categories = box_categories.view(((b * self.nr_boxes) * self.nr_frames))
box_category_embeddings = self.category_embed_layer(box_categories)
bf = self.coord_to_feature(box_input)
bf = torch.cat([bf, box_category_embeddings], dim=1)
bf = self.coord_category_fusion(bf)
bf = bf.view(b, self.nr_boxes, self.nr_frames, self.coord_feature_dim)
spatial_message = bf.sum(dim=1, keepdim=True)
spatial_message = ((spatial_message - bf) / (self.nr_boxes - 1))
bf_and_message = torch.cat([bf, spatial_message], dim=3)
bf_spatial = self.spatial_node_fusion(bf_and_message.view(((b * self.nr_boxes) * self.nr_frames), (- 1)))
bf_spatial = bf_spatial.view(b, self.nr_boxes, self.nr_frames, self.coord_feature_dim)
bf_temporal_input = bf_spatial.view(b, self.nr_boxes, (self.nr_frames * self.coord_feature_dim))
bf_nonlocal = self.box_feature_fusion(bf_temporal_input.view((b * self.nr_boxes), (- 1)))
bf_nonlocal = bf_nonlocal.view(b, self.nr_boxes, self.coord_feature_dim).permute(0, 2, 1).contiguous()
for i in range(self.nr_nonlocal_layers):
bf_nonlocal = self.nonlocal_fusion[i](bf_nonlocal)
box_features = torch.mean(bf_nonlocal, dim=2)
video_features = box_features
cls_output = self.classifier(video_features)
return cls_output |
def get_git_hash(fallback='unknown', digits=None):
if ((digits is not None) and (not isinstance(digits, int))):
raise TypeError('digits must be None or an integer')
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
if (digits is not None):
sha = sha[:digits]
except OSError:
sha = fallback
return sha |
def get_condensenet(num_layers, groups=4, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
if (num_layers == 74):
init_block_channels = 16
layers = [4, 6, 8, 10, 8]
growth_rates = [8, 16, 32, 64, 128]
else:
raise ValueError('Unsupported CondenseNet version with number of layers {}'.format(num_layers))
from functools import reduce
channels = reduce((lambda xi, yi: (xi + [reduce((lambda xj, yj: (xj + [(xj[(- 1)] + yj)])), ([yi[1]] * yi[0]), [xi[(- 1)][(- 1)]])[1:]])), zip(layers, growth_rates), [[init_block_channels]])[1:]
net = CondenseNet(channels=channels, init_block_channels=init_block_channels, groups=groups, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
class BeamNodeEz(BeamNode):
def __init__(self, prob: float, token_idx: int, prev: List, prev_score: List, min_len: int=10, finished: bool=False) -> None:
super().__init__(prob, token_idx, prev, prev_score, min_len, finished)
self.get_canonical_path()
assert self.all_token_idx
assert self.all_score
assert self.length
self.has_finished()
def get_repr(self):
return self
def get_antecedent(self):
antecedents = []
prev = self.prev
while prev:
antecedents += prev
new_prev = []
for p in prev:
new_prev += p.prev
new_prev = list(set(new_prev))
new_prev = [x for x in new_prev if (x not in antecedents)]
prev = new_prev
return antecedents
def add_prev_node(self, node, score):
if ((self in node.get_antecedent()) or (self == node)):
return
self.prev.append(node)
self.prev_score.append(score)
def visualization(self):
(nodes, edges) = ({}, {})
seen = {}
def dfs(node: BeamNodeEz):
if (not node):
return
if (node.uid in seen):
return
seen[node.uid] = True
(my_prev, my_prev_score) = (node.prev, node.prev_score)
for (p, ps) in zip(my_prev, my_prev_score):
edge_info = {'src': p.uid, 'tgt': node.uid, 'score': ps}
edges[f'{p.uid}_{node.uid}'] = edge_info
nodes[node.uid] = {'uid': node.uid, 'text': node.token_str, 'tok_idx': node.token_idx}
prevs = node.prev
for p in prevs:
dfs(p)
dfs(self)
return (nodes, edges)
def print_lattice(self):
seen = {}
recomb_units = []
def dfs(node, par_nodes):
if (not node):
return
if (node.uid in seen):
last_path = seen[node.uid]
cur_path = par_nodes
last_path_tokens = [x.token_idx for x in last_path]
cur_path_tokens = [x.token_idx for x in cur_path]
(shared_prefix_len, _) = find_prefix(last_path_tokens, cur_path_tokens)
last = last_path_tokens[shared_prefix_len:][::(- 1)]
newer = cur_path_tokens[shared_prefix_len:][::(- 1)]
shared_tokens = last_path_tokens[:shared_prefix_len][::(- 1)]
logging.info(f'''
======{tokenizer.decode(last)} || {tokenizer.decode(shared_tokens)}
-----{tokenizer.decode(newer)} || {tokenizer.decode(shared_tokens)} ''')
recomb_units.append([last, newer])
seen[node.uid] = par_nodes
return
seen[node.uid] = par_nodes
prevs = node.prev
for p in prevs:
dfs(p, (par_nodes + [node]))
dfs(self, [])
logging.info(f'There are {len(recomb_units)} recomb phrases in this case.')
def get_canonical_path(self):
tokens = [self.token_idx]
scores = [self.score]
prev = self.prev
while prev:
prev = prev[0]
tokens.append(prev.token_idx)
scores.append(prev.score)
prev = prev.prev
self.all_score = scores[::(- 1)]
self.all_token_idx = tokens[::(- 1)]
self.length = len(tokens)
def get_tokens_str(self):
out = [self.token_str]
prev = self.prev
while prev:
prev = prev[0]
out.append(prev.token_str)
prev = prev.prev
out = out[::(- 1)]
return '<-'.join(out)
def get_token_idx_as_input(self):
tokens = self.all_token_idx
dec_prefix = torch.tensor([tokens], dtype=torch.long)
return dec_prefix
def _get_length(self):
l = 1
prev = self.prev
while prev:
prev = prev[0]
l += 1
prev = prev.prev
return l
def get_score_sum(self):
all_score = self.all_score
return sum(all_score)
def __repr__(self) -> str:
return self.get_tokens_str()
def get_tokens_match_suffix(self, suffix_tokens: List[int]):
reversed_tokens = []
prev = [self]
while (prev and suffix_tokens):
last_target_token_idx = suffix_tokens.pop((- 1))
new_prev = []
for p in prev:
token = p.token_idx
if (token == last_target_token_idx):
new_prev += p.prev
reversed_tokens.append(last_target_token_idx)
prev = new_prev
if (not prev):
raise Exception('Not found!')
while prev:
prev = prev[0]
reversed_tokens.append(prev.token_idx)
prev = prev.prev
return reversed_tokens[::(- 1)] |
class Gym(object):
def __init__(self, model, train_data, test_data, dev_data, optimizers, logger, models_save_dir):
self.model = model
self.logger = logger
self.train_data = train_data
self.test_data = test_data
self.dev_data = dev_data
self.model_save_dir = models_save_dir
if (not os.path.exists(self.model_save_dir)):
os.mkdir(self.model_save_dir)
' Optimizers '
self.optimizers = optimizers
self.optimizer_id = (- 1)
self.current_optimizer = None
self.current_switch_step = (- 1)
def switch_optimizer(self):
self.optimizer_id += 1
if (self.optimizer_id >= len(self.optimizers)):
print('Finished training...')
exit(0)
(self.current_optimizer, self.current_switch_step) = self.optimizers[self.optimizer_id]
self.model.compile(optimizer=self.current_optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
self.logger.set_model(self.model)
print('Switching to number {} optimizer'.format(self.current_optimizer))
def train(self, batch_size=70, eval_interval=500, shuffle=True):
print('train:\t', [d.shape for d in self.train_data])
print('test:\t', [d.shape for d in self.test_data])
print('dev:\t', [d.shape for d in self.dev_data])
self.switch_optimizer()
self.model.summary()
(train_step, eval_step, no_progress_steps) = (0, 0, 0)
train_batch_start = 0
best_loss = 1000.0
while True:
if shuffle:
random.shuffle(list(zip(train_data)))
train_inputs = train_data[:(- 1)]
train_labels = train_data[(- 1)]
(test_loss, dev_loss) = self.evaluate(eval_step=eval_step, batch_size=batch_size)
eval_step += 1
no_progress_steps += 1
if (dev_loss < best_loss):
best_loss = dev_loss
no_progress_steps = 0
if (no_progress_steps >= self.current_switch_step):
self.switch_optimizer()
no_progress_steps = 0
for _ in tqdm(range(eval_interval)):
[loss, acc] = model.train_on_batch([train_input[train_batch_start:(train_batch_start + batch_size)] for train_input in train_inputs], train_labels[train_batch_start:(train_batch_start + batch_size)])
self.logger.on_epoch_end(epoch=train_step, logs={'train_acc': acc, 'train_loss': loss})
train_step += 1
train_batch_start += batch_size
if (train_batch_start > len(train_inputs[0])):
train_batch_start = 0
if shuffle:
random.shuffle(list(zip(train_data)))
def evaluate(self, eval_step, batch_size=None):
[test_loss, test_acc] = model.evaluate(self.test_data[:(- 1)], self.test_data[(- 1)], batch_size=batch_size)
[dev_loss, dev_acc] = model.evaluate(self.dev_data[:(- 1)], self.dev_data[(- 1)], batch_size=batch_size)
self.logger.on_epoch_end(epoch=eval_step, logs={'test_acc': test_acc, 'test_loss': test_loss})
self.logger.on_epoch_end(epoch=eval_step, logs={'dev_acc': dev_acc, 'dev_loss': dev_loss})
model.save((self.model_save_dir + 'epoch={}-tloss={}-tacc={}.model'.format(eval_step, test_loss, test_acc)))
return (test_loss, dev_loss) |
def train(model, train_config):
model = model
train_config = train_config
model_config = model.model_config
global_step_tensor = tf.Variable(0, trainable=False, name='global_step')
max_iterations = train_config.max_iterations
summary_interval = train_config.summary_interval
checkpoint_interval = train_config.checkpoint_interval
max_checkpoints = train_config.max_checkpoints_to_keep
paths_config = model_config.paths_config
logdir = paths_config.logdir
if (not os.path.exists(logdir)):
os.makedirs(logdir)
checkpoint_dir = paths_config.checkpoint_dir
if (not os.path.exists(checkpoint_dir)):
os.makedirs(checkpoint_dir)
checkpoint_path = ((checkpoint_dir + '/') + model_config.checkpoint_name)
global_summaries = set([])
prediction_dict = model.build()
summary_histograms = train_config.summary_histograms
summary_img_images = train_config.summary_img_images
summary_bev_images = train_config.summary_bev_images
(loss_dict, total_loss, rpn_score_2d_loss, rpn_acc_score_neg, rpn_acc_score_pos, rpn_class_loss, rpn_reg_loss, rpn_acc_all, rpn_acc_pos, refine_class_loss, refine_reg_loss, avod_acc_all, avod_acc_pos) = model.loss(prediction_dict)
training_optimizer = optimizer_builder.build(train_config.optimizer, global_summaries, global_step_tensor)
with tf.variable_scope('train_op'):
train_op = slim.learning.create_train_op(total_loss, training_optimizer, clip_gradient_norm=1.0, global_step=global_step_tensor)
saver = tf.train.Saver(max_to_keep=max_checkpoints, pad_step_number=True)
tf.summary.scalar('training_loss', train_op)
is_travis = ('TRAVIS' in os.environ)
if (not is_travis):
tf.summary.scalar('max_bytes', tf.contrib.memory_stats.MaxBytesInUse())
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
summary_merged = summary_utils.summaries_to_keep(summaries, global_summaries, histograms=summary_histograms, input_imgs=summary_img_images, input_bevs=summary_bev_images)
allow_gpu_mem_growth = train_config.allow_gpu_mem_growth
if allow_gpu_mem_growth:
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
sess = tf.Session(config=config)
else:
sess = tf.Session()
datetime_str = str(datetime.datetime.now())
logdir = (logdir + '/train')
train_writer = tf.summary.FileWriter(((logdir + '/') + datetime_str), sess.graph)
init = tf.global_variables_initializer()
if (not train_config.overwrite_checkpoints):
trainer_utils.load_checkpoints(checkpoint_dir, saver)
all_variables = tf.get_collection_ref(tf.GraphKeys.GLOBAL_VARIABLES)
sess.run(tf.variables_initializer(all_variables))
var_list = [var for var in all_variables if (('beta' not in var.name) and ('Adam' not in var.name) and ('Average' not in var.name))]
saver_part = tf.train.Saver(var_list=var_list)
if train_config.use_pretrained:
saver_part.restore(sess, train_config.pretrained)
print('Model loaded from: {}'.format(train_config.pretrained))
step_base = tf.train.global_step(sess, global_step_tensor)
elif (len(saver.last_checkpoints) > 0):
checkpoint_to_restore = saver.last_checkpoints[(- 1)]
saver_part.restore(sess, checkpoint_to_restore)
step_base = 0
else:
sess.run(init)
else:
sess.run(init)
global_step = (tf.train.global_step(sess, global_step_tensor) - step_base)
print('Starting from step {} / {}'.format(global_step, max_iterations))
last_time = time.time()
for step in range(global_step, (max_iterations + 1)):
if ((step % checkpoint_interval) == 0):
global_step = (tf.train.global_step(sess, global_step_tensor) - step_base)
saver.save(sess, save_path=checkpoint_path, global_step=global_step)
print('Step {} / {}, Checkpoint saved to {}-{:08d}'.format(step, max_iterations, checkpoint_path, global_step))
feed_dict = model.create_feed_dict()
if ((step % summary_interval) == 0):
current_time = time.time()
time_elapsed = (current_time - last_time)
last_time = current_time
(train_op_loss, summary_out, rpn_score_2d_loss_np, rpn_acc_score_neg_np, rpn_acc_score_pos_np, rpn_class_loss_np, rpn_reg_loss_np, rpn_acc_all_np, rpn_acc_pos_np, refine_class_loss_np, refine_reg_loss_np, avod_acc_all_np, avod_acc_pos_np) = sess.run([train_op, summary_merged, rpn_score_2d_loss, rpn_acc_score_neg, rpn_acc_score_pos, rpn_class_loss, rpn_reg_loss, rpn_acc_all, rpn_acc_pos, refine_class_loss, refine_reg_loss, avod_acc_all, avod_acc_pos], feed_dict=feed_dict)
print('Step {}, Total Loss {:0.3f} | Score {:0.3f}, Acc {:0.2f} {:0.2f} | RPN Class {:0.3f}, Reg {:0.3f}, Acc {:0.2f} {:0.2f} | Final Class {:0.3f}, Reg {:0.3f}, Acc {:0.2f} {:0.2f}'.format(step, train_op_loss, rpn_score_2d_loss_np, (rpn_acc_score_neg_np * 100), (rpn_acc_score_pos_np * 100), rpn_class_loss_np, rpn_reg_loss_np, (rpn_acc_all_np * 100), (rpn_acc_pos_np * 100), refine_class_loss_np, refine_reg_loss_np, (avod_acc_all_np * 100), (avod_acc_pos_np * 100)))
train_writer.add_summary(summary_out, step)
else:
sess.run(train_op, feed_dict)
train_writer.close() |
def replace(input_dict, pop_key, new_key, new_value):
output_dict = deepcopy(input_dict)
output_dict.pop(pop_key)
output_dict[new_key] = new_value
return output_dict |
def drn_d_107(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 2, 2], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-107']))
return model |
def test_creation_from_zZ():
shape = (3, 1, 5)
z = torch.tensor(np.random.rand(*shape))
Z = (z + torch.tensor(np.random.rand(*shape)))
box = SigmoidBoxTensor.from_zZ(z, Z)
assert (box.z.shape == (3, 1, 5)) |
class Factory():
def __init__(self, latent_dist_name, *args, **kwargs):
self.output_dist = get_net_factory('distribution', latent_dist_name, *args, **kwargs)
assert (self.output_dist is not None), 'Cannot get the distribution'
def __call__(self, input_tensor, gt_tensor):
(input_tensor, input_shape) = self.flatten_dist_tensor(input_tensor)
gt_s = tmf.get_shape(gt_tensor)
gt_tensor = tf.reshape(gt_tensor, [gt_s[0], np.prod(gt_s[1:])])
(dist_param, _) = self.visible_dist(input_tensor)
(nll, _) = self.output_dist.nll(dist_param, gt_tensor)
nll = tf.reshape(nll, input_shape)
return nll
def flatten_dist_tensor(self, dist_tensor):
s = tmf.get_shape(dist_tensor)
total_hidden = np.prod(s[1:])
input_tensor = tf.reshape(dist_tensor, [s[0], (total_hidden // self.param_num()), self.param_num()])
input_tensor = tf.transpose(input_tensor, [0, 2, 1])
input_tensor = tf.reshape(input_tensor, [s[0], total_hidden])
s[(- 1)] //= self.param_num()
return (input_tensor, s)
def self_entropy(self, input_tensor):
(input_tensor, input_shape) = self.flatten_dist_tensor(input_tensor)
(dist_param, _) = self.visible_dist(input_tensor)
se = self.output_dist.self_entropy(dist_param)
se = tf.reshape(se, input_shape)
return se
def mean(self, input_tensor):
(input_tensor, input_shape) = self.flatten_dist_tensor(input_tensor)
(dist_param, param_tensor) = self.visible_dist(input_tensor)
vis = self.output_dist.mean(dist_param)
param_tensor = tf.reshape(param_tensor, ([input_shape[0], self.param_num()] + input_shape[1:]))
vis = tf.reshape(vis, input_shape)
return (vis, param_tensor)
visualize = mean
def visible_dist(self, input_tensor):
s = tmf.get_shape(input_tensor)
latent_dim = (np.prod(s[1:]) // self.param_num())
param_tensor = self.output_dist.transform2param(input_tensor, latent_dim)
dist_param = self.output_dist.parametrize(param_tensor, latent_dim)
return (dist_param, param_tensor)
def param_num(self):
return self.output_dist.param_num() |
def instantiate_multigpu_model_if_multiple_gpus(training_model):
if (len(cfg.gpus) > 1):
training_model = multi_gpu_model(training_model, len(cfg.gpus))
return training_model |
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn) |
def main(opt):
translator = make_translator(opt, report_score=True)
translator.translate(opt.src_dir, opt.src, opt.tgt, opt.doc, opt.batch_size, opt.attn_debug) |
class ScaleLROnPlateau(NamedTuple):
step_size: jnp.ndarray
minimum_loss: jnp.ndarray
steps_without_reduction: jnp.ndarray
max_steps_without_reduction: jnp.ndarray
reduction_factor: jnp.ndarray |
_function('unsqueeze')
class AutogradUnsqueeze(AutogradFunction):
def forward(ctx, input, dim):
ctx.save_for_backward(dim)
return input.unsqueeze(dim)
def backward(ctx, grad_output):
(dim,) = ctx.saved_tensors
return grad_output.squeeze(dim) |
class CompressedStatsTrackerPeak(CompressedStatsTracker):
__slots__ = (CompressedStatsTracker.__slots__ + ('secondary_weight',))
def __init__(self, hg, chi, secondary_weight=0.001):
self.secondary_weight = secondary_weight
super().__init__(hg, chi)
def score(self):
return (math.log2(self.peak_size) + (math.log2((self.flops + 1)) * self.secondary_weight)) |
def main(args, trainqpath, trainrpath, trainlpath, devqpath, devrpath, devlpath, testqpath, testrpath, testlpath, weight_decay=0.0001, lr=0.001):
with open(f'data/src-vocab.pkl', 'rb') as f:
srcv = pickle.load(f)
with open(f'data/tgt-vocab.pkl', 'rb') as f:
tgtv = pickle.load(f)
src_embed = pickle.load(open('./data/src-embed.pkl', 'rb'))
tgt_embed = pickle.load(open('./data/tgt-embed.pkl', 'rb'))
net = RUBER_unrefer(srcv.get_vocab_size(), tgtv.get_vocab_size(), 300, args.hidden_size, SOURCE=srcv, TARGET=tgtv, src_embed=src_embed, tgt_embed=tgt_embed, num_layers=args.num_layers)
if torch.cuda.is_available():
net.cuda()
print('[!] Finish init the vocab and net')
loc = ('cuda' if torch.cuda.is_available() else 'cpu')
load_best_model(net, args.exp_dir, load_file=args.init_checkpoint, map_location=loc)
optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay)
epochs = 100
grad_clip = 10
early_stop_patience = 5
pbar = range(1, (epochs + 1))
(training_losses, validation_losses, validation_metrices) = ([], [], [])
min_loss = np.inf
best_metric = (- 1)
patience = 0
begin_time = time.time()
idxx = 1
for epoch in pbar:
train_iter = get_batch(trainqpath, trainrpath, trainlpath, args.batch_size, seed=args.seed)
dev_iter = get_batch(devqpath, devrpath, devlpath, args.batch_size, args.seed, shuffle=False)
training_loss = train(train_iter, net, optimizer, epoch=epoch)
(validation_loss, validation_metric) = validation(dev_iter, net)
training_losses.append(training_loss)
validation_losses.append(validation_loss)
validation_metrices.append(validation_metric)
if (best_metric < validation_metric):
patience = 0
best_metric = validation_metric
min_loss = validation_loss
else:
patience += 1
state = {'net': net.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch}
torch.save(state, f'{args.exp_dir}/Acc_{validation_metric}_vloss_{validation_loss}_epoch_{epoch}.pt')
print(f'Epoch: {epoch}, loss(train-dev): {training_loss}-{validation_loss}, Acc: {validation_metric}, patience: {patience}')
idxx += 1
sys.stdout.flush()
if (patience > early_stop_patience):
print('[!] early stop')
break
end_time = time.time()
hour = math.floor(((end_time - begin_time) / 3600))
minute = math.floor((((end_time - begin_time) - (3600 * hour)) / 60))
second = (((end_time - begin_time) - (hour * 3600)) - (minute * 60))
print(f'Cost {hour}h, {minute}m, {round(second, 2)}s')
evaluate(args, testqpath, testrpath, testlpath) |
def read_files(subdirs, module_file):
all_lines = []
for subdir in subdirs:
with open(os.path.join(DATA_SOURCE_DIR, subdir, module_file), 'r') as f:
lines = f.readlines()
print('... read {} lines from {}'.format(len(lines), subdir))
all_lines += lines
return all_lines |
class ShuffledResults(BaseResults):
def __init__(self, random_theta: np.ndarray):
shuffled_theta = np.stack([random_theta, flip_theta_series(random_theta)], axis=1)
super().__init__(theta=shuffled_theta, scores=None, skeletons=None) |
class XGBoostOptuna(object):
def __init__(self, task: str=BINARY_CLASSIFICATION, metric: str='accuracy', random_state=42):
self.task = task
self.seed = random_state
if (metric is None):
self.metric = default_task_metric[task]
else:
self.metric = metric
assert (self.task in support_ml_task), ('Only Support ML Tasks: %s' % support_ml_task)
if (self.task == REGRESSION):
self.estimator = XGBRegressor
else:
self.estimator = XGBClassifier
def fit(self, X_train, y_train, X_val=None, y_val=None, split_ratio=0.2, max_evals: int=100, timeout=3600):
if (X_val is not None):
(X_train, X_val) = self._validate_fit_data(train_data=X_train, tuning_data=X_val)
else:
logger.info(('Tuning data is None, the original train_data will be split: train vs val = %2s vs %2s' % ((1 - split_ratio), split_ratio)))
(X_train, X_val, y_train, y_val) = train_test_split(X_train, y_train, test_size=split_ratio)
objective = self.get_objective(X_train, y_train, X_val, y_val)
logger.info('===== Beginning RandomForest Hpo training ======')
logger.info(('Max Hpo trials: %s' % max_evals))
logger.info(('Time Out: %s s ' % timeout))
optimizer_direction = self.get_optimizer_direction(self.task, self.metric)
self.n_warmup_steps = 20
try:
study = optuna.create_study(direction=optimizer_direction, sampler=optuna.samplers.TPESampler(seed=self.seed), pruner=optuna.pruners.MedianPruner(n_warmup_steps=self.n_warmup_steps))
study.optimize(objective, n_trials=max_evals, timeout=timeout)
trial = study.best_trial
best_param = trial.params
logger.info('====== Finished RandomForest Hpo training ======')
logger.info('Get the best model params ...')
logger.info('parms: %s', best_param)
logger.info('Retraining on the whole dataset.')
self.model = self.estimator(**best_param).fit(X_train, y_train)
except optuna.exceptions.TrialPruned as e:
raise e
except Exception as e:
print('Exception in RandomForestObjective', str(e))
return None
return best_param
def predict(self, X_test):
return self.model.predict(X_test)
def predict_proba(self, X_test):
return self.model.predict_proba(X_test)
def get_score_fn(self, task, metric):
if (metric is None):
metric = default_task_metric[task]
score_fn = get_metric_fn[metric]
return score_fn
def get_optimizer_direction(self, task, metric):
if (metric is not None):
metric = default_task_metric[task]
direction = default_optimizer_direction[metric]
return direction
def xgboost_objective(self, ml_task):
objective = 'reg:squarederror'
if (ml_task == BINARY_CLASSIFICATION):
objective = 'binary:logistic'
elif (ml_task == MULTICLASS_CLASSIFICATION):
objective = 'multi:softprob'
else:
objective = 'reg:squarederror'
return objective
def get_objective(self, X_train, y_train, X_val=None, y_val=None, **kwargs):
def objective(trial):
obj = self.xgboost_objective(self.task)
param = {'verbosity': 0, 'objective': obj, 'use_label_encoder': False, 'booster': trial.suggest_categorical('booster', ['gbtree', 'gblinear', 'dart']), 'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.3), 'max_depth': trial.suggest_int('max_depth', 2, 32, step=1), 'n_estimators': trial.suggest_int('n_estimators', 100, 1000, step=100), 'subsample': trial.suggest_float('subsample', 0.2, 1.0), 'colsample_bytree': trial.suggest_float('colsample_bytree', 0.2, 1.0), 'lambda': trial.suggest_float('lambda', 1e-08, 1.0, log=True), 'alpha': trial.suggest_float('alpha', 1e-08, 1.0, log=True)}
if ((param['booster'] == 'gbtree') or (param['booster'] == 'dart')):
param['max_depth'] = trial.suggest_int('max_depth', 1, 9)
param['eta'] = trial.suggest_float('eta', 1e-08, 1.0, log=True)
param['min_child_weight'] = trial.suggest_int('min_child_weight', 2, 10)
param['gamma'] = trial.suggest_float('gamma', 1e-08, 1.0, log=True)
param['grow_policy'] = trial.suggest_categorical('grow_policy', ['depthwise', 'lossguide'])
if (param['booster'] == 'dart'):
param['sample_type'] = trial.suggest_categorical('sample_type', ['uniform', 'weighted'])
param['normalize_type'] = trial.suggest_categorical('normalize_type', ['tree', 'forest'])
param['rate_drop'] = trial.suggest_float('rate_drop', 1e-08, 1.0, log=True)
param['skip_drop'] = trial.suggest_float('skip_drop', 1e-08, 1.0, log=True)
model = self.estimator(**param).fit(X_train, y_train)
preds = model.predict(X_val)
score_fn = self.get_score_fn(self.task, self.metric)
score = score_fn(y_val, preds)
return score
return objective
def _validate_fit_data(self, train_data, tuning_data=None):
if (not isinstance(train_data, pd.DataFrame)):
raise AssertionError(f'train_data is required to be a pandas DataFrame, but was instead: {type(train_data)}')
if (len(set(tuning_data.columns)) < len(train_data.columns)):
raise ValueError("Column names are not unique, please change duplicated column names (in pandas: train_data.rename(columns={'current_name':'new_name'})")
if (tuning_data is not None):
if (not isinstance(tuning_data, pd.DataFrame)):
raise AssertionError(f'tuning_data is required to be a pandas DataFrame, but was instead: {type(tuning_data)}')
train_features = train_data.columns
tuning_features = tuning_data.columns
train_features = np.array(train_features)
tuning_features = np.array(tuning_features)
if np.any((train_features != tuning_features)):
raise ValueError('Column names must match between training and tuning data')
return (train_data, tuning_data) |
class BackgroundGenerator(threading.Thread):
def __init__(self, generator, max_prefetch=1):
threading.Thread.__init__(self)
self.queue = Queue.Queue(max_prefetch)
self.generator = generator
self.daemon = True
self.start()
def run(self):
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if (next_item is None):
raise StopIteration
return next_item
def __next__(self):
return self.next()
def __iter__(self):
return self
def __len__(self):
return len(self.generator) |
def _parse_main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('decls_file')
parser.add_argument('dest_dir')
parser.add_argument('n_workers', type=int)
parser.add_argument('rec_limit', type=int)
parser.add_argument('depth_limit', type=int)
parser.add_argument('weight_limit', type=int)
parser.add_argument('decls_per_shard', type=int)
return parser.parse_args() |
def test(flags, num_episodes: int=10):
if (flags.xpid is None):
checkpointpath = './latest/model.tar'
else:
checkpointpath = os.path.expandvars(os.path.expanduser(('%s/%s/%s' % (flags.savedir, flags.xpid, 'model.tar'))))
gym_env = create_env(flags, flags.level_name, 1)
env = environment.Environment(gym_env)
model = Net(gym_env._observation().shape, len(environment.DEFAULT_ACTION_SET), flags.use_lstm)
model.eval()
checkpoint = torch.load(checkpointpath, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
observation = env.initial()
returns = []
while (len(returns) < num_episodes):
agent_outputs = model(observation)
(policy_outputs, _) = agent_outputs
observation = env.step(policy_outputs['action'])
if observation['done'].item():
returns.append(observation['episode_return'].item())
logging.info('Episode ended after %d steps. Return: %.1f', observation['episode_step'].item(), observation['episode_return'].item())
env.close()
logging.info('Average returns over %i steps: %.1f', num_episodes, (sum(returns) / len(returns))) |
class PanopticEvaluator(object):
def __init__(self, ann_file, ann_folder, output_dir='panoptic_eval'):
self.gt_json = ann_file
self.gt_folder = ann_folder
if utils.is_main_process():
if (not os.path.exists(output_dir)):
os.mkdir(output_dir)
self.output_dir = output_dir
self.predictions = []
def update(self, predictions):
for p in predictions:
with open(os.path.join(self.output_dir, p['file_name']), 'wb') as f:
f.write(p.pop('png_string'))
self.predictions += predictions
def synchronize_between_processes(self):
all_predictions = utils.all_gather(self.predictions)
merged_predictions = []
for p in all_predictions:
merged_predictions += p
self.predictions = merged_predictions
def summarize(self):
if utils.is_main_process():
json_data = {'annotations': self.predictions}
predictions_json = os.path.join(self.output_dir, 'predictions.json')
with open(predictions_json, 'w') as f:
f.write(json.dumps(json_data))
return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir)
return None |
def get_igcv3(width_scale, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
init_block_channels = 32
final_block_channels = 1280
layers = [1, 4, 6, 8, 6, 6, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce((lambda x, y: ((x + [([y[0]] * y[1])]) if (y[2] != 0) else (x[:(- 1)] + [(x[(- 1)] + ([y[0]] * y[1]))]))), zip(channels_per_layers, layers, downsample), [[]])
if (width_scale != 1.0):
def make_even(x):
return (x if ((x % 2) == 0) else (x + 1))
channels = [[make_even(int((cij * width_scale))) for cij in ci] for ci in channels]
init_block_channels = make_even(int((init_block_channels * width_scale)))
if (width_scale > 1.0):
final_block_channels = make_even(int((final_block_channels * width_scale)))
net = IGCV3(channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def _gen_mobilenet_v2(variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k3_s2_e6_c32'], ['ir_r4_k3_s2_e6_c64'], ['ir_r3_k3_s1_e6_c96'], ['ir_r3_k3_s2_e6_c160'], ['ir_r1_k3_s1_e6_c320']]
model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head), num_features=(1280 if fix_stem_head else round_channels(1280, channel_multiplier, 8, None)), stem_size=32, fix_stem=fix_stem_head, channel_multiplier=channel_multiplier, norm_kwargs=resolve_bn_args(kwargs), act_layer=resolve_act_layer(kwargs, 'relu6'), **kwargs)
model = _create_model(model_kwargs, default_cfgs[variant], pretrained)
return model |
class Seq2SeqLoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lrs = {f'lr_group_{i}': param['lr'] for (i, param) in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lrs)
_zero_only
def _write_logs(self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****')
metrics = trainer.callback_metrics
trainer.logger.log_metrics({k: v for (k, v) in metrics.items() if (k not in ['log', 'progress_bar', 'preds'])})
od = Path(pl_module.hparams.output_dir)
if (type_path == 'test'):
results_file = (od / 'test_results.txt')
generations_file = (od / 'test_generations.txt')
else:
results_file = (od / f'{type_path}_results/{trainer.global_step:05d}.txt')
generations_file = (od / f'{type_path}_generations/{trainer.global_step:05d}.txt')
results_file.parent.mkdir(exist_ok=True)
generations_file.parent.mkdir(exist_ok=True)
with open(results_file, 'a+') as writer:
for key in sorted(metrics):
if (key in ['log', 'progress_bar', 'preds']):
continue
val = metrics[key]
if isinstance(val, torch.Tensor):
val = val.item()
msg = f'''{key}: {val:.6f}
'''
writer.write(msg)
if (not save_generations):
return
if ('preds' in metrics):
content = '\n'.join(metrics['preds'])
generations_file.open('w+').write(content)
_zero_only
def on_train_start(self, trainer, pl_module):
try:
npars = pl_module.model.model.num_parameters()
except AttributeError:
npars = pl_module.model.num_parameters()
n_trainable_pars = count_trainable_parameters(pl_module)
trainer.logger.log_metrics({'n_params': npars, 'mp': (npars / 1000000.0), 'grad_mp': (n_trainable_pars / 1000000.0)})
_zero_only
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
save_json(pl_module.metrics, pl_module.metrics_save_path)
return self._write_logs(trainer, pl_module, 'test')
_zero_only
def on_validation_end(self, trainer: pl.Trainer, pl_module):
save_json(pl_module.metrics, pl_module.metrics_save_path) |
class DropColumns(JuTransformer):
def __init__(self, apply_to: ColumnTypesLike, row_select_col_type: Optional[ColumnTypesLike]=None, row_select_vals: Optional[Union[(str, int, list, bool)]]=None):
super().__init__(apply_to=apply_to, needed_types=None, row_select_col_type=row_select_col_type, row_select_vals=row_select_vals)
def _fit(self, X: pd.DataFrame, y: Optional[DataLike]=None) -> 'DropColumns':
self.support_mask_ = pd.Series(True, index=X.columns, dtype=bool)
try:
self.drop_columns_ = self.filter_columns(X).columns
self.support_mask_[self.drop_columns_] = False
except ValueError:
self.drop_columns_ = []
self.support_mask_ = self.support_mask_.values
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
logger.debug(f'Dropping columns: {self.drop_columns_}')
return X.drop(columns=self.drop_columns_)
def get_support(self, indices: bool=False) -> Union[(ArrayLike, pd.Series)]:
if indices:
return np.arange(len(self.support_mask_))[self.support_mask_]
else:
return self.support_mask_ |
def predict(X):
token_embeddings = list(map(get_embedding, X))
instr_chain = torch.stack(token_embeddings).unsqueeze(1)
(_, (final_state, _)) = model.instr_rnn(instr_chain, model.get_instr_init())
return model.linear(final_state.squeeze()).squeeze() |
class FSMTForConditionalGeneration(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_assassination_result(message: str, answer: str):
match_num = '\\d+'
player_id = []
player_id = re.findall(match_num, (str(message) + str(answer)))
player_id = int(player_id[(- 1)])
return player_id |
class ResultsManager():
_instance = None
log = logging.getLogger('MAIN.RESULTS')
multi_run_res = {}
def __new__(cls, _=None):
if (cls._instance is None):
cls._instance = super(ResultsManager, cls).__new__(cls)
return cls._instance
def __init__(self, metric=''):
if hasattr(self, 'results'):
return
columns = ['method', 'task', 'value', 'scenario']
self.results = pd.DataFrame(columns=columns)
self.metric = metric
def has_results(self):
return (not self.results.empty)
def save_to_file(self, file_name=None):
if (not file_name):
path = 'results/raw_results_df.pkl'
else:
path = ('results/' + file_name)
self.results.to_pickle(path)
def load_from_file(self, file_name=None):
if (not file_name):
path = 'results/raw_results_df.pkl'
else:
path = ('results/' + file_name)
if (not exists(path)):
raise Exception('Results file not found')
self.results = pd.read_pickle(path)
def add_result(self, method, task, value, scenario):
entry = pd.DataFrame([{'method': method, 'task': task, 'value': value, 'scenario': scenario}])
self.results = pd.concat([self.results, entry], ignore_index=True)
if (method not in self.multi_run_res):
self.multi_run_res[method] = {}
if (scenario not in self.multi_run_res[method]):
self.multi_run_res[method][scenario] = {}
if (task not in self.multi_run_res[method][scenario]):
self.multi_run_res[method][scenario][task] = []
self.multi_run_res[method][scenario][task].append(value)
def print_multiple_runs_results(self):
if (not self.multi_run_res):
return
from statistics import mean, variance, stdev
self.log.info(' Multi run results ')
for (method, v2) in self.multi_run_res.items():
self.log.info(f'''
Method: {method}''')
for (scenario, v1) in v2.items():
self.log.info(f' Scenario: {scenario}')
for (task, v) in v1.items():
self.log.info(f' Task: {task}')
self.log.info(f' MEAN: {mean(v):.3f}, VAR: {variance(v):.3f}, STDEV {stdev(v):.3f}')
self.log.info('')
def reset_results(self):
if hasattr(self, 'summary'):
delattr(self, 'summary')
columns = ['method', 'task', 'value', 'scenario']
self.results = pd.DataFrame(columns=columns)
def generate_summary(self):
self.summary = {}
tasks = self.results.task.unique()
methods = self.results.method.unique()
self.summary['online'] = pd.DataFrame(columns=tasks)
self.summary['offline'] = pd.DataFrame(columns=tasks)
for method in methods:
for scenario in ['online', 'offline']:
df = self.results[((self.results['method'] == method) & self.results['scenario'].isin([scenario, None]))]
if (not len(df)):
continue
self.summary[scenario].loc[method] = list(df['value'])
def print_summary(self):
if (not hasattr(self, 'summary')):
self.generate_summary()
self.log.info('Results summary:')
pd.set_option('display.max_columns', None)
for (scenario, scenario_summary) in self.summary.items():
self.log.info(scenario.upper(), ':')
self.log.info(scenario_summary, '\n')
def print_summary_latex(self, max_cols=8):
self.log.info(f'''
{self.results}''')
import warnings
from math import ceil
warnings.simplefilter(action='ignore', category=FutureWarning)
if (not hasattr(self, 'summary')):
self.generate_summary()
res = ((('-' * 30) + 'START LATEX') + ('-' * 30))
for scenario in self.summary.keys():
hdrs = self.summary[scenario].columns.values
short_hdrs = [x for x in hdrs]
length = len(hdrs)
if ((max_cols == 0) or (max_cols > length)):
max_cols = length
start = 0
end = min(max_cols, length)
num_splits = ceil((length / max_cols))
res += (('\n\\begin{table}\n\\centering\n\\caption{' + scenario.capitalize()) + '}\n')
for x in range(num_splits):
res += self.summary[scenario].to_latex(float_format='%.1f', columns=hdrs[start:end], header=short_hdrs[start:end])
if (x < (num_splits - 1)):
res += '\\vspace{-.6mm}\\\\\n'
start += max_cols
if (x == (num_splits - 2)):
end = length
else:
end += max_cols
res += '\\end{table}\n'
res += ((('-' * 30) + 'END LATEX') + ('-' * 30))
self.log.info(res)
def plot_summary(self, file_name=None):
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import seaborn as sns
sns.set_style('whitegrid')
g = sns.FacetGrid(data=self.results, col='scenario', hue='method', legend_out=True, height=4, aspect=1.33)
g.map(sns.lineplot, 'task', 'value', marker='o')
g.add_legend()
for axes in g.axes.flat:
ticks_loc = axes.get_xticks()
axes.xaxis.set_major_locator(mticker.FixedLocator(ticks_loc))
axes.set_xticklabels(axes.get_xticklabels(), rotation=90)
axes.tick_params(labelleft=True)
axes.set_xlabel('Task')
axes.set_ylabel(self.metric)
path = (f'results/{file_name}' if file_name else 'results/plot_results.png')
g.tight_layout()
plt.savefig(path) |
def build_fake_yaml():
fake_yaml = '\n model:\n name: fake_yaml\n framework: tensorflow\n inputs: x\n outputs: op_to_store\n device: cpu\n evaluation:\n accuracy:\n metric:\n topk: 1\n quantization:\n model_wise:\n weight:\n granularity: per_tensor\n scheme: sym\n dtype: int8\n algorithm: minmax\n tuning:\n strategy:\n name: basic\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n performance_only: True\n workspace:\n path: saved\n '
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', 'w', encoding='utf-8') as f:
yaml.dump(y, f)
f.close() |
class PromptExtractor(nn.Module):
def __init__(self):
super().__init__()
self._buffer_init = False
self.with_trainable_params = False
def init_buffer(self, clip_model):
self._buffer_init = True
def forward(self, noun_list: List[str], clip_model: nn.Module):
raise NotImplementedError() |
def levenshtein_similarity(string1, string2):
return (1 - (levenshtein(string1, string2) / float(max(len(string1), len(string2), 1.0)))) |
def randomRotation(imgs, label):
mode = Image.BICUBIC
if (random.random() > 0.8):
random_angle = np.random.randint((- 15), 15)
for i in range(len(imgs)):
imgs[i] = imgs[i].rotate(random_angle, mode)
label = label.rotate(random_angle, mode)
return (imgs, label) |
def test_space__volume(space: Space) -> None:
volume = space.volume()
chex.assert_type(volume, float)
assert (volume == 1.0) |
def setup_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False |
class Checkpointer(object):
def __init__(self, model, optimizer=None, scheduler=None, save_dir='', ckpt_path=None, save_to_disk=None, logger=None):
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.pretrained_path = ckpt_path
self.finetune = False
self.save_dir = save_dir
self.save_to_disk = save_to_disk
if (logger is None):
logger = logging.getLogger(__name__)
self.logger = logger
def save(self, name, **kwargs):
self.logger.info(name)
if (not self.save_dir):
return
if (not self.save_to_disk):
return
data = {}
data['model'] = self.model.state_dict()
if (self.optimizer is not None):
data['optimizer'] = self.optimizer.state_dict()
if (self.scheduler is not None):
print(dir(self.scheduler))
data['scheduler'] = self.scheduler.state_dict()
data.update(kwargs)
save_file = os.path.join(self.save_dir, '{}.pth'.format(name))
self.logger.info('Saving checkpoint to {}'.format(save_file))
torch.save(data, save_file)
self.tag_last_checkpoint(save_file)
def load(self, f=None):
if (f is not None):
f = self.get_checkpoint_file(f)
elif self.has_checkpoint(self.save_dir):
f = self.get_checkpoint_file(self.save_dir)
if (not f):
self.logger.info('No checkpoint found. Initializing model from scratch')
return {}
self.logger.info('Loading checkpoint from {}'.format(f))
checkpoint = self._load_file(f)
self._load_model(checkpoint)
if (('optimizer' in checkpoint) and self.optimizer):
self.logger.info('Loading optimizer from {}'.format(f))
self.optimizer.load_state_dict(checkpoint.pop('optimizer'))
if (('scheduler' in checkpoint) and self.scheduler):
self.logger.info('Loading scheduler from {}'.format(f))
self.scheduler.load_state_dict(checkpoint.pop('scheduler'))
return checkpoint
def finetune_load(self, ckpt_path=None, f=None):
if (ckpt_path is not None):
self.pretrained_path = ckpt_path
self.finetune = True
f = self.get_checkpoint_file(ckpt_path)
assert (f is not None), 'Finetune should provide a valid ckpt path'
self.logger.info('Loading pretrained model from {}'.format(f))
checkpoint = self._load_file(f)
self._load_model(checkpoint)
def has_checkpoint(self, save_dir):
save_file = os.path.join(save_dir, 'last_checkpoint')
return os.path.exists(save_file)
def get_checkpoint_file(self, save_dir):
save_file = os.path.join(save_dir, 'last_checkpoint')
try:
with open(save_file, 'r') as f:
last_saved = f.read()
last_saved = last_saved.strip()
except IOError:
last_saved = ''
return last_saved
def tag_last_checkpoint(self, last_filename):
save_file = os.path.join(self.save_dir, 'last_checkpoint')
with open(save_file, 'w') as f:
f.write(last_filename)
def _load_file(self, f):
return torch.load(f, map_location=torch.device('cpu'))
def _load_model(self, checkpoint):
if self.finetune:
finetune_load_state_dict(self.model, checkpoint.pop('model'), logger=self.logger)
else:
load_state_dict(self.model, checkpoint.pop('model'), logger=self.logger) |
def main():
if (not os.path.isdir('./images')):
os.makedirs('./images')
for image_file in glob('valid/*.png'):
print(image_file[6:])
input = image_file
output = (('images/' + image_file[6:(- 4)]) + '.npz')
num_filters = 128
checkpoint_dir = 'models'
compress(input, output, num_filters, checkpoint_dir) |
class SiameseBaseModel(EztorchBaseModule, ABC):
def __init__(self, trunk: DictConfig, optimizer: DictConfig, projector: Optional[DictConfig]=None, predictor: Optional[DictConfig]=None, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, normalize_outputs: bool=True, num_global_crops: int=2, num_local_crops: int=0, num_splits: int=0, num_splits_per_combination: int=2, mutual_pass: bool=False) -> None:
super().__init__()
assert (not (mutual_pass and (num_splits > 0))), 'mutual_pass is not supported with num_splits > 0.'
self.save_hyperparameters()
self.trunk = hydra.utils.instantiate(trunk)
self.projector = (hydra.utils.instantiate(projector) if (projector is not None) else None)
self.predictor = (hydra.utils.instantiate(predictor) if ((predictor is not None) and (self.projector is not None)) else None)
self.train_transform = (hydra.utils.instantiate(train_transform) if (train_transform is not None) else None)
self.val_transform = (hydra.utils.instantiate(val_transform) if (val_transform is not None) else None)
self.test_transform = (hydra.utils.instantiate(test_transform) if (test_transform is not None) else None)
self.optimizer_cfg = optimizer
self.normalize_outputs = normalize_outputs
self.num_global_crops = num_global_crops
self.num_local_crops = num_local_crops
self.num_crops = (self.num_global_crops + self.num_local_crops)
self.num_splits = num_splits
self.num_splits_per_combination = num_splits_per_combination
self.use_split = (num_splits > 0)
self.mutual_pass = mutual_pass
def learnable_params(self) -> List[Parameter]:
params = []
params.extend(self.trunk.parameters())
if (self.projector is not None):
params.extend(self.projector.parameters())
if (self.predictor is not None):
params.extend(self.predictor.parameters())
return params
def training_steps_per_epoch(self) -> Optional[int]:
if ((self.trainer.datamodule is not None) and (self.trainer.datamodule.train_num_samples > 0)):
return (self.trainer.datamodule.train_num_samples // self.trainer.datamodule.train_global_batch_size)
else:
return None
def configure_optimizers(self) -> Dict[(Any, Any)]:
(optimizer, scheduler) = hydra.utils.instantiate(self.optimizer_cfg, num_steps_per_epoch=self.optimizer_cfg.get('num_steps_per_epoch', self.training_steps_per_epoch), model=self)
if (scheduler is None):
return optimizer
return {'optimizer': optimizer, 'lr_scheduler': scheduler}
def forward(self, x: Tensor) -> Tensor:
h = self.trunk(x)
z = (self.projector(h) if (self.projector is not None) else h)
q = (self.predictor(z) if (self.predictor is not None) else z)
return q
_grad()
def local_split(self, x):
side_indent = ((x.size((- 2)) // self.num_splits), (x.size((- 1)) // self.num_splits))
col_splits = x.split(side_indent[1], dim=(- 1))
x = [split for col_split in col_splits for split in col_split.split(side_indent[0], dim=(- 2))]
x = torch.cat(x, dim=0)
return x
def multi_crop_shared_step(self, x: List[Tensor]) -> List[Dict[(str, Tensor)]]:
if self.mutual_pass:
if (self.num_local_crops > 0):
return self.multi_crop_with_local_shared_step(x)
else:
return self.multi_crop_global_shared_step(x)
else:
return [self.shared_step(x_i) for x_i in x]
def multi_crop_global_shared_step(self, x: List[Tensor]) -> List[Dict[(str, Tensor)]]:
outputs = [{} for _ in range(len(x))]
global_tensor = torch.cat(x)
global_output = self.shared_step(global_tensor)
dict_keys = global_output.keys()
for key in dict_keys:
if (((key == 'z') and (self.projector is None)) or ((key == 'q') and (self.predictor is None))):
continue
global_key_output = global_output[key]
chunked_global_key_output = global_key_output.chunk(len(x))
for i in range(len(x)):
outputs[i][key] = chunked_global_key_output[i]
if (self.projector is None):
for i in range(len(x)):
outputs[i]['z'] = outputs[i]['h']
if (self.predictor is None):
for i in range(len(x)):
outputs[i]['q'] = outputs[i]['z']
return outputs
def multi_crop_with_local_shared_step(self, x: List[Tensor]) -> List[Dict[(str, Tensor)]]:
idx_crops = torch.cumsum(torch.unique_consecutive(torch.tensor([inp.shape[(- 1)] for inp in x]), return_counts=True)[1], 0)
(start_idx, h) = (0, torch.empty(0, device=x[0].device))
for (_, end_idx) in enumerate(idx_crops):
h_output = self.trunk(torch.cat(x[start_idx:end_idx]))
start_idx = end_idx
h = torch.cat((h, h_output))
z = (self.projector(h) if (self.projector is not None) else h)
if (self.predictor is not None):
q = self.predictor(z)
if self.hparams.normalize_outputs:
z = nn.functional.normalize(z, dim=1)
q = nn.functional.normalize(q, dim=1)
else:
if self.hparams.normalize_outputs:
z = nn.functional.normalize(z, dim=1)
q = z
outputs = [{} for _ in range(len(x))]
global_output = {'h': h, 'z': z, 'q': q}
dict_keys = global_output.keys()
for key in dict_keys:
if (((key == 'z') and (self.projector is None)) or ((key == 'q') and (self.predictor is None))):
continue
global_key_output = global_output[key]
chunked_global_key_output = global_key_output.chunk(len(x))
for i in range(len(x)):
outputs[i][key] = chunked_global_key_output[i]
if (self.projector is None):
for i in range(len(x)):
outputs[i]['z'] = outputs[i]['h']
if (self.predictor is None):
for i in range(len(x)):
outputs[i]['q'] = outputs[i]['z']
return outputs
def shared_step(self, x: Tensor) -> Dict[(str, Tensor)]:
if self.use_split:
batch_size = x.size(0)
x = self.local_split(x)
h = self.trunk(x)
if self.use_split:
h_splits = list(h.split((h.size(0) // (self.num_splits ** 2)), dim=0))
h = torch.cat(list(map((lambda x: (sum(x) / self.num_splits_per_combination)), list(combinations(h_splits, r=self.num_splits_per_combination)))), dim=0)
z = (self.projector(h) if (self.projector is not None) else h)
if (self.predictor is not None):
q = self.predictor(z)
if self.hparams.normalize_outputs:
z = nn.functional.normalize(z, dim=1)
q = nn.functional.normalize(q, dim=1)
if self.use_split:
q_split = q.split(batch_size, dim=0)
else:
if self.hparams.normalize_outputs:
z = nn.functional.normalize(z, dim=1)
if self.use_split:
q_split = z.split(batch_size, dim=0)
q = z
if self.use_split:
return {'h': h, 'z': z, 'q': q, 'q_split': q_split}
return {'h': h, 'z': z, 'q': q}
def val_shared_step(self, x: Tensor) -> Dict[(str, Tensor)]:
h = self.trunk(x)
z = (self.projector(h) if (self.projector is not None) else h)
if (self.predictor is not None):
q = self.predictor(z)
if self.hparams.normalize_outputs:
z = nn.functional.normalize(z, dim=1)
q = nn.functional.normalize(q, dim=1)
else:
if self.hparams.normalize_outputs:
z = nn.functional.normalize(z, dim=1)
q = z
return {'h': h, 'z': z, 'q': q}
def test_shared_step(self, x: Tensor) -> Dict[(str, Tensor)]:
h = self.trunk(x)
z = (self.projector(h) if (self.projector is not None) else h)
if (self.predictor is not None):
q = self.predictor(z)
if self.hparams.normalize_outputs:
z = nn.functional.normalize(z, dim=1)
q = nn.functional.normalize(q, dim=1)
else:
if self.hparams.normalize_outputs:
z = nn.functional.normalize(z, dim=1)
q = z
return {'h': h, 'z': z, 'q': q}
def up_to_projector_shared_step(self, x: Tensor) -> Dict[(str, Tensor)]:
h = self.trunk(x)
z = (self.projector(h) if (self.projector is not None) else h)
if self.hparams.normalize_outputs:
z = nn.functional.normalize(z, dim=1)
return {'h': h, 'z': z}
def compute_loss(self):
pass
def training_step(self, batch: Iterable[Any], batch_idx: int):
pass
def validation_step(self, batch: Iterable[Any], batch_idx: int):
x = batch['input']
if (self.val_transform is not None):
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=False):
x = self.val_transform(x)
return self.val_shared_step(x)
def test_step(self, batch: Iterable[Any], batch_idx: int):
x = batch['input']
if (self.test_transform is not None):
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=False):
x = self.test_transform(x)
return self.test_shared_step(x)
def num_layers(self) -> int:
return ((self.trunk.num_layers + self.projector.num_layers) + (self.predictor.num_layers if (self.predictor is not None) else 0))
def get_param_layer_id(self, name: str) -> int:
if name.startswith('trunk.'):
return self.trunk.get_param_layer_id(name[len('trunk.'):])
elif name.startswith('projector.'):
return (self.trunk.num_layers + self.projector.get_param_layer_id(name[len('projector.'):]))
elif name.startswith('predictor.'):
return ((self.trunk.num_layers + self.projector.num_layers) + self.predictor.get_param_layer_id(name[len('predictor.'):]))
else:
raise NotImplementedError(f'{name} not found.') |
(version='2.0')
class SequentialSampler(Sampler):
def __init__(self, dataset, distributed):
self.whole_dataset = dataset
self.distributed = distributed
def __iter__(self):
self.process_rank = 0
self.process_size = 1
if self.distributed:
import horovod.tensorflow as hvd
hvd.init()
self.process_rank = hvd.rank()
self.process_size = hvd.size()
if (self.process_size < 2):
raise EnvironmentError("The program is now trying to traverse the distributed TensorFlow DefaultDataLoader in only one process. If you do not want to use distributed DataLoader, please set 'distributed: False'. Or If you want to use distributed DataLoader, please set 'distributed: True' and launch multiple processes.")
return iter(range(self.process_rank, len(self.whole_dataset), self.process_size))
def __len__(self):
return len(self.whole_dataset) |
def train_baseline(mlp, data, train_batches, test_batches, num_epochs, learning_rate_mlp, device_id=0):
optim = OPTIMIZER(mlp.parameters(), lr=learning_rate_mlp)
criterion = nn.MSELoss()
if torch.cuda.is_available():
criterion = criterion.cuda(device_id)
for epoch in range(num_epochs):
train_losses = []
print('training')
for (iteration, batch_idxs) in enumerate(train_batches):
mlp.train()
optim.zero_grad()
loss = eval_batch_mlp(mlp, data, batch_idxs, criterion, device_id)
loss.backward()
optim.step()
train_losses.append(loss.item())
print('\riteration {}/{}'.format((iteration + 1), train_batches.num_iterations()), end='')
print()
test_losses = []
print('evaluating')
for (iteration, batch_idxs) in enumerate(test_batches):
mlp.eval()
loss = eval_batch_mlp(mlp, data, batch_idxs, criterion)
test_losses.append(loss.item())
print('\riteration {}/{}'.format((iteration + 1), test_batches.num_iterations()), end='')
print()
train_loss = np.sqrt(np.mean(train_losses))
test_loss = np.sqrt(np.mean(test_losses))
print('epoch {}/{} - avg train loss: {}, test loss: {}'.format((epoch + 1), num_epochs, train_loss, test_loss))
return (train_loss, test_loss) |
_torch
class BenchmarkTest(unittest.TestCase):
def check_results_dict_not_empty(self, results):
for model_result in results.values():
for (batch_size, sequence_length) in zip(model_result['bs'], model_result['ss']):
result = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(result)
def test_inference_no_configs(self):
MODEL_ID = 'sshleifer/tiny-gpt2'
benchmark_args = PyTorchBenchmarkArguments(models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_configs_only_pretrain(self):
MODEL_ID = 'sgugger/tiny-distilbert-classification'
benchmark_args = PyTorchBenchmarkArguments(models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, only_pretrain_model=True)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_torchscript(self):
MODEL_ID = 'sshleifer/tiny-gpt2'
benchmark_args = PyTorchBenchmarkArguments(models=[MODEL_ID], training=False, inference=True, torchscript=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
((torch_device == 'cpu'), 'Cant do half precision')
def test_inference_fp16(self):
MODEL_ID = 'sshleifer/tiny-gpt2'
benchmark_args = PyTorchBenchmarkArguments(models=[MODEL_ID], training=False, inference=True, fp16=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_model_no_architectures(self):
MODEL_ID = 'sshleifer/tiny-gpt2'
config = AutoConfig.from_pretrained(MODEL_ID)
config.architectures = None
benchmark_args = PyTorchBenchmarkArguments(models=[MODEL_ID], training=True, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_train_no_configs(self):
MODEL_ID = 'sshleifer/tiny-gpt2'
benchmark_args = PyTorchBenchmarkArguments(models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
((torch_device == 'cpu'), "Can't do half precision")
def test_train_no_configs_fp16(self):
MODEL_ID = 'sshleifer/tiny-gpt2'
benchmark_args = PyTorchBenchmarkArguments(models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], fp16=True, multi_process=False)
benchmark = PyTorchBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_inference_with_configs(self):
MODEL_ID = 'sshleifer/tiny-gpt2'
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_encoder_decoder_with_configs(self):
MODEL_ID = 'sshleifer/tinier_bart'
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_train_with_configs(self):
MODEL_ID = 'sshleifer/tiny-gpt2'
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_train_encoder_decoder_with_configs(self):
MODEL_ID = 'sshleifer/tinier_bart'
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = PyTorchBenchmarkArguments(models=[MODEL_ID], training=True, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False)
benchmark = PyTorchBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_save_csv_files(self):
MODEL_ID = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = PyTorchBenchmarkArguments(models=[MODEL_ID], training=True, inference=True, save_to_csv=True, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(tmp_dir, 'inf_time.csv'), train_memory_csv_file=os.path.join(tmp_dir, 'train_mem.csv'), inference_memory_csv_file=os.path.join(tmp_dir, 'inf_mem.csv'), train_time_csv_file=os.path.join(tmp_dir, 'train_time.csv'), env_info_csv_file=os.path.join(tmp_dir, 'env.csv'), multi_process=False)
benchmark = PyTorchBenchmark(benchmark_args)
benchmark.run()
self.assertTrue(Path(os.path.join(tmp_dir, 'inf_time.csv')).exists())
self.assertTrue(Path(os.path.join(tmp_dir, 'train_time.csv')).exists())
self.assertTrue(Path(os.path.join(tmp_dir, 'inf_mem.csv')).exists())
self.assertTrue(Path(os.path.join(tmp_dir, 'train_mem.csv')).exists())
self.assertTrue(Path(os.path.join(tmp_dir, 'env.csv')).exists())
def test_trace_memory(self):
MODEL_ID = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(summary):
self.assertTrue(hasattr(summary, 'sequential'))
self.assertTrue(hasattr(summary, 'cumulative'))
self.assertTrue(hasattr(summary, 'current'))
self.assertTrue(hasattr(summary, 'total'))
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = PyTorchBenchmarkArguments(models=[MODEL_ID], training=True, inference=True, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(tmp_dir, 'log.txt'), log_print=True, trace_memory_line_by_line=True, multi_process=False)
benchmark = PyTorchBenchmark(benchmark_args)
result = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(tmp_dir, 'log.txt')).exists()) |
class uniform(BaseInitializer):
def __init__(self, a=(- 0.0), b=1.0):
super(uniform, self).__init__(a=a, b=b)
self.a = a
self.b = b |
class Seq2SeqEncoder(_EncoderBase):
def get_input_dim(self) -> int:
raise NotImplementedError
def get_output_dim(self) -> int:
raise NotImplementedError
def is_bidirectional(self) -> bool:
raise NotImplementedError |
class ConcatFuse(HybridBlock):
def __init__(self, channels=64):
super(ConcatFuse, self).__init__()
self.channels = channels
self.post = nn.HybridSequential(prefix='post')
self.post.add(nn.Conv2D(channels, kernel_size=3, strides=1, padding=1, dilation=1))
self.post.add(nn.BatchNorm())
self.post.add(nn.Activation('relu'))
def hybrid_forward(self, F, xh, xl):
xs = F.concat(xh, xl, dim=1)
xs = self.post(xs)
return xs |
def has_key(x, y):
if hasattr(x, 'has_key'):
return x.has_key(y)
else:
return (y in x) |
class FlaxUpsample2D(nn.Module):
in_channels: int
dtype: jnp.dtype = jnp.float32
def setup(self):
self.conv = nn.Conv(self.in_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype)
def __call__(self, hidden_states):
(batch, height, width, channels) = hidden_states.shape
hidden_states = jax.image.resize(hidden_states, shape=(batch, (height * 2), (width * 2), channels), method='nearest')
hidden_states = self.conv(hidden_states)
return hidden_states |
def ema_loss(x, running_mean, alpha):
t_exp = torch.exp((torch.logsumexp(x, 0) - math.log(x.shape[0]))).detach()
if (running_mean == 0):
running_mean = t_exp
else:
running_mean = ema(t_exp, alpha, running_mean.item())
t_log = EMALoss.apply(x, running_mean)
return (t_log, running_mean) |
def make_predictions(df, model, window):
predictions_list = []
for i in range(len(df)):
row = df.iloc[i]
cur_preds = get_auto_reg_predictions(model, row, window)
predictions_list.append(cur_preds)
df['predicted_deaths'] = predictions_list
return df |
def catx_network_with_dropout_extras() -> Type[CATXHaikuNetwork]:
return CatxNetworkWithDropoutExtras |
class RLAv1_ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, rla_channel=32, SE=False, ECA=None, zero_init_last_bn=True, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(RLAv1_ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
if (ECA is None):
ECA = ([None] * 4)
elif (len(ECA) != 4):
raise ValueError('argument ECA should be a 4-element tuple, got {}'.format(ECA))
self.rla_channel = rla_channel
self.flops = False
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
conv_outs = ([None] * 4)
recurrent_convs = ([None] * 4)
stages = ([None] * 4)
stage_bns = ([None] * 4)
(stages[0], stage_bns[0], conv_outs[0], recurrent_convs[0]) = self._make_layer(block, 64, layers[0], rla_channel=rla_channel, SE=SE, ECA_size=ECA[0])
(stages[1], stage_bns[1], conv_outs[1], recurrent_convs[1]) = self._make_layer(block, 128, layers[1], rla_channel=rla_channel, SE=SE, ECA_size=ECA[1], stride=2, dilate=replace_stride_with_dilation[0])
(stages[2], stage_bns[2], conv_outs[2], recurrent_convs[2]) = self._make_layer(block, 256, layers[2], rla_channel=rla_channel, SE=SE, ECA_size=ECA[2], stride=2, dilate=replace_stride_with_dilation[1])
(stages[3], stage_bns[3], conv_outs[3], recurrent_convs[3]) = self._make_layer(block, 512, layers[3], rla_channel=rla_channel, SE=SE, ECA_size=ECA[3], stride=2, dilate=replace_stride_with_dilation[2])
self.conv_outs = nn.ModuleList(conv_outs)
self.recurrent_convs = nn.ModuleList(recurrent_convs)
self.stages = nn.ModuleList(stages)
self.stage_bns = nn.ModuleList(stage_bns)
self.tanh = nn.Tanh()
self.bn2 = norm_layer(rla_channel)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(((512 * block.expansion) + rla_channel), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_last_bn:
for m in self.modules():
if isinstance(m, RLAv1_Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, block, planes, blocks, rla_channel, SE, ECA_size, stride=1, dilate=False):
conv_out = conv1x1((planes * block.expansion), rla_channel)
recurrent_conv = conv3x3(rla_channel, rla_channel)
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=previous_dilation, norm_layer=norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
bns = [norm_layer(rla_channel) for _ in range(blocks)]
return (nn.ModuleList(layers), nn.ModuleList(bns), conv_out, recurrent_conv)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
(batch, _, height, width) = x.size()
h = torch.zeros(batch, self.rla_channel, height, width, device=torch.device(('cuda' if torch.cuda.is_available() else 'cpu')))
for (layers, bns, conv_out, recurrent_conv) in zip(self.stages, self.stage_bns, self.conv_outs, self.recurrent_convs):
for (layer, bn) in zip(layers, bns):
(x, y, h, identity) = layer(x, h)
y_out = conv_out(y)
h = (h + y_out)
h = bn(h)
h = self.tanh(h)
h = recurrent_conv(h)
h = self.bn2(h)
h = self.tanh(h)
x = torch.cat((x, h), dim=1)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x) |
def test_Eta_e(white_noise):
a = FeatureSpace(featureList=['Eta_e'])
a = a.calculateFeature(white_noise)
assert ((a.result(method='array') >= 1.9) and (a.result(method='array') <= 2.1)) |
def parse_args():
parser = argparse.ArgumentParser(description='Parse args for training')
parser.add_argument('--script', type=str, help='training script name')
parser.add_argument('--config', type=str, default='baseline', help='yaml configure file name')
parser.add_argument('--save_dir', type=str, help='root directory to save checkpoints, logs, and tensorboard')
parser.add_argument('--mode', type=str, choices=['single', 'multiple', 'multi_node'], default='multiple', help='train on single gpu or multiple gpus')
parser.add_argument('--nproc_per_node', type=int, help='number of GPUs per node')
parser.add_argument('--use_lmdb', type=int, choices=[0, 1], default=0)
parser.add_argument('--script_prv', type=str, help='training script name')
parser.add_argument('--config_prv', type=str, default='baseline', help='yaml configure file name')
parser.add_argument('--use_wandb', type=int, choices=[0, 1], default=0)
parser.add_argument('--distill', type=int, choices=[0, 1], default=0)
parser.add_argument('--script_teacher', type=str, help='teacher script name')
parser.add_argument('--config_teacher', type=str, help='teacher yaml configure file name')
parser.add_argument('--rank', type=int, help='Rank of the current process.')
parser.add_argument('--world-size', type=int, help='Number of processes participating in the job.')
parser.add_argument('--ip', type=str, default='127.0.0.1', help='IP of the current rank 0.')
parser.add_argument('--port', type=int, default='20000', help='Port of the current rank 0.')
args = parser.parse_args()
return args |
def test_digits_cosine_stochastic():
model = FacilityLocationSelection(100, 'cosine', optimizer='stochastic', random_state=0)
model.fit(X_digits)
assert_array_equal(model.ranking, digits_cosine_stochastic_ranking)
assert_array_almost_equal(model.gains, digits_cosine_stochastic_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
_incremental_state
class FairseqIncrementalDecoder(FairseqDecoder):
def __init__(self, dictionary):
super().__init__(dictionary)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
raise NotImplementedError
def extract_features(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
raise NotImplementedError
def reorder_incremental_state(self, incremental_state, new_order):
seen = set()
for module in self.modules():
if ((module != self) and hasattr(module, 'reorder_incremental_state') and (module not in seen)):
seen.add(module)
result = module.reorder_incremental_state(incremental_state, new_order)
if (result is not None):
incremental_state = result
def set_beam_size(self, beam_size):
if (getattr(self, '_beam_size', (- 1)) != beam_size):
seen = set()
def apply_set_beam_size(module):
if ((module != self) and hasattr(module, 'set_beam_size') and (module not in seen)):
seen.add(module)
module.set_beam_size(beam_size)
self.apply(apply_set_beam_size)
self._beam_size = beam_size |
def boolean_string(string):
low_string = string.lower()
if (low_string not in {'false', 'true'}):
invalidInputError(False, 'Not a valid boolean string')
return (low_string == 'true') |
def read_heterograph_pyg(raw_dir, add_inverse_edge=False, additional_node_files=[], additional_edge_files=[], binary=False):
if binary:
graph_list = read_binary_heterograph_raw(raw_dir, add_inverse_edge)
else:
graph_list = read_csv_heterograph_raw(raw_dir, add_inverse_edge, additional_node_files=additional_node_files, additional_edge_files=additional_edge_files)
pyg_graph_list = []
print('Converting graphs into PyG objects...')
for graph in tqdm(graph_list):
g = Data()
g.__num_nodes__ = graph['num_nodes_dict']
g.num_nodes_dict = graph['num_nodes_dict']
g.edge_index_dict = {}
for (triplet, edge_index) in graph['edge_index_dict'].items():
g.edge_index_dict[triplet] = torch.from_numpy(edge_index)
del graph['edge_index_dict']
if (graph['edge_feat_dict'] is not None):
g.edge_attr_dict = {}
for triplet in graph['edge_feat_dict'].keys():
g.edge_attr_dict[triplet] = torch.from_numpy(graph['edge_feat_dict'][triplet])
del graph['edge_feat_dict']
if (graph['node_feat_dict'] is not None):
g.x_dict = {}
for nodetype in graph['node_feat_dict'].keys():
g.x_dict[nodetype] = torch.from_numpy(graph['node_feat_dict'][nodetype])
del graph['node_feat_dict']
for key in additional_node_files:
g[key] = {}
for nodetype in graph[key].keys():
g[key][nodetype] = torch.from_numpy(graph[key][nodetype])
del graph[key]
for key in additional_edge_files:
g[key] = {}
for triplet in graph[key].keys():
g[key][triplet] = torch.from_numpy(graph[key][triplet])
del graph[key]
pyg_graph_list.append(g)
return pyg_graph_list |
def file_len(fname):
with open(fname, 'rb') as f:
for (i, l) in enumerate(f):
pass
return (i + 1) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.