code stringlengths 101 5.91M |
|---|
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=InstanceNorm2d, use_dropout=False, n_blocks=9, gpu_ids=[], padding_type='reflect'):
assert (n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.gpu_ids = gpu_ids
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, stride=1, bias=True), norm_layer(ngf), nn.ReLU(True), nn.Conv2d(ngf, (2 * ngf), kernel_size=3, padding=1, stride=1, bias=True), norm_layer((2 * ngf)), nn.ReLU(True), nn.Conv2d((2 * ngf), (4 * ngf), kernel_size=3, padding=1, stride=2, bias=True), norm_layer((4 * ngf)), nn.ReLU(True)]
for i in range(3):
model += [ResnetBlock((4 * ngf), padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=True)]
model += [nn.ConvTranspose2d((4 * ngf), (2 * ngf), kernel_size=3, stride=2, padding=1, output_padding=1, bias=True), norm_layer((2 * ngf)), nn.ReLU(True), nn.Conv2d((2 * ngf), ngf, kernel_size=3, padding=1, bias=True), norm_layer(ngf), nn.ReLU(True), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=3), nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
if ((len(self.gpu_ids) > 1) and isinstance(input.data, torch.cuda.FloatTensor)):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input) |
class LearningSchedule(object):
def __init__(self, start_schedule, schedule_timesteps, initial_p=1.0, final_p=0.05):
self.initial_p = initial_p
self.final_p = final_p
self.schedule_timesteps = schedule_timesteps
self.start_schedule = start_schedule
def value(self, t):
fraction = min((max(0.0, float((t - self.start_schedule))) / self.schedule_timesteps), 1.0)
return (self.initial_p + (fraction * (self.final_p - self.initial_p))) |
class BasicType(ValueType):
def __init__(self, cur_type):
self.type = cur_type
def to_string(self, value):
return str(value)
def from_string(self, value):
return self.type(value) |
def patch_graph(graph):
for u in graph.nodes():
graph.nodes[u]['label'] = graph.nodes[u]['label'].split('-')[0]
return graph |
class AggregateTransformer(TransformerMixin):
def __init__(self, case_id_col, cat_cols, num_cols, boolean=False, fillna=True):
self.case_id_col = case_id_col
self.cat_cols = cat_cols
self.num_cols = num_cols
self.boolean = boolean
self.fillna = fillna
self.columns = None
self.fit_time = 0
self.transform_time = 0
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
start = time()
if (len(self.num_cols) > 0):
dt_numeric = X.groupby(self.case_id_col)[self.num_cols].agg({'mean': np.mean, 'max': np.max, 'min': np.min, 'sum': np.sum, 'std': np.std})
dt_numeric.columns = ['_'.join(col).strip() for col in dt_numeric.columns.values]
dt_transformed = pd.get_dummies(X[self.cat_cols])
dt_transformed[self.case_id_col] = X[self.case_id_col]
del X
if self.boolean:
dt_transformed = dt_transformed.groupby(self.case_id_col).max()
else:
dt_transformed = dt_transformed.groupby(self.case_id_col).sum()
if (len(self.num_cols) > 0):
dt_transformed = pd.concat([dt_transformed, dt_numeric], axis=1)
del dt_numeric
if self.fillna:
dt_transformed = dt_transformed.fillna(0)
if (self.columns is None):
self.columns = dt_transformed.columns
else:
missing_cols = [col for col in self.columns if (col not in dt_transformed.columns)]
for col in missing_cols:
dt_transformed[col] = 0
dt_transformed = dt_transformed[self.columns]
self.transform_time = (time() - start)
return dt_transformed
def get_feature_names(self):
return self.columns |
class TFCamembertForCausalLM(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class MgpstrModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class ChunkTabPreprocessor(TabPreprocessor):
('with_attention', 'for_transformer')
('cat_embed_cols', 'embed_cols')
('scale', 'scale_cont_cols')
('cols_and_bins', 'quantization_setup')
def __init__(self, n_chunks: int, cat_embed_cols: Optional[Union[(List[str], List[Tuple[(str, int)]])]]=None, continuous_cols: Optional[List[str]]=None, cols_and_bins: Optional[Dict[(str, List[float])]]=None, cols_to_scale: Optional[Union[(List[str], str)]]=None, default_embed_dim: int=16, with_attention: bool=False, with_cls_token: bool=False, shared_embed: bool=False, verbose: int=1, *, scale: bool=False, already_standard: List[str]=None, **kwargs):
super(ChunkTabPreprocessor, self).__init__(cat_embed_cols=cat_embed_cols, continuous_cols=continuous_cols, quantization_setup=None, cols_to_scale=cols_to_scale, auto_embed_dim=False, embedding_rule='google', default_embed_dim=default_embed_dim, with_attention=with_attention, with_cls_token=with_cls_token, shared_embed=shared_embed, verbose=verbose, scale=scale, already_standard=already_standard, **kwargs)
self.n_chunks = n_chunks
self.chunk_counter = 0
self.cols_and_bins = cols_and_bins
if (self.cols_and_bins is not None):
self.quantizer = Quantizer(self.cols_and_bins, **self.quant_args)
self.embed_prepared = False
self.continuous_prepared = False
def partial_fit(self, chunk: pd.DataFrame) -> 'ChunkTabPreprocessor':
self.chunk_counter += 1
chunk_adj = (self._insert_cls_token(chunk) if self.with_cls_token else chunk.copy())
self.column_idx: Dict[(str, int)] = {}
if (self.cat_embed_cols is not None):
if (not self.embed_prepared):
chunk_emb = self._prepare_embed(chunk_adj)
self.label_encoder = LabelEncoder(columns_to_encode=chunk_emb.columns.tolist(), shared_embed=self.shared_embed, with_attention=self.with_attention)
self.label_encoder.partial_fit(chunk_emb)
else:
chunk_emb = chunk_adj[self.cat_embed_cols]
self.label_encoder.partial_fit(chunk_emb)
self.column_idx.update({k: v for (v, k) in enumerate(chunk_emb.columns)})
if (self.continuous_cols is not None):
if (not self.continuous_prepared):
chunk_cont = self._prepare_continuous(chunk_adj)
else:
chunk_cont = chunk[self.continuous_cols]
if (self.standardize_cols is not None):
self.scaler.partial_fit(chunk_cont[self.standardize_cols].values)
self.column_idx.update({k: (v + len(self.column_idx)) for (v, k) in enumerate(chunk_cont.columns)})
if (self.chunk_counter == self.n_chunks):
self.cat_embed_input: List[Union[(Tuple[(str, int)], Tuple[(str, int, int)])]] = []
for (k, v) in self.label_encoder.encoding_dict.items():
if self.with_attention:
self.cat_embed_input.append((k, len(v)))
else:
self.cat_embed_input.append((k, len(v), self.embed_dim[k]))
self.is_fitted = True
return self
def fit(self, chunk: pd.DataFrame) -> 'ChunkTabPreprocessor':
return self.partial_fit(chunk)
def _prepare_embed(self, chunk: pd.DataFrame) -> pd.DataFrame:
if self.with_attention:
embed_colname = self.cat_embed_cols
elif isinstance(self.cat_embed_cols[0], tuple):
self.embed_dim: Dict = dict(self.cat_embed_cols)
embed_colname = [emb[0] for emb in self.cat_embed_cols]
else:
self.embed_dim = {e: self.default_embed_dim for e in self.cat_embed_cols}
embed_colname = self.cat_embed_cols
self.embed_prepared = True
return chunk[embed_colname]
def _prepare_continuous(self, chunk: pd.DataFrame) -> pd.DataFrame:
if (not hasattr(self, 'standardize_cols')):
if (self.cols_to_scale is not None):
self.standardize_cols = (self.cols_to_scale if (self.cols_to_scale != 'all') else self.continuous_cols)
elif self.scale:
if (self.already_standard is not None):
self.standardize_cols = [c for c in self.continuous_cols if (c not in self.already_standard)]
else:
self.standardize_cols = self.continuous_cols
else:
self.standardize_cols = None
if (not hasattr(self, 'scaler')):
self.scaler = StandardScaler(**self.scale_args)
elif self.verbose:
warnings.warn('Continuous columns will not be normalised')
self.continuous_prepared = True
return chunk[self.continuous_cols]
def __repr__(self) -> str:
list_of_params: List[str] = []
if (self.n_chunks is not None):
list_of_params.append('n_chunks={n_chunks}')
if (self.cat_embed_cols is not None):
list_of_params.append('cat_embed_cols={cat_embed_cols}')
if (self.continuous_cols is not None):
list_of_params.append('continuous_cols={continuous_cols}')
if (self.cols_and_bins is not None):
list_of_params.append('cols_and_bins={cols_and_bins}')
if (self.cols_to_scale is not None):
list_of_params.append('cols_to_scale={cols_to_scale}')
if (self.default_embed_dim != 16):
list_of_params.append('default_embed_dim={default_embed_dim}')
if self.with_attention:
list_of_params.append('with_attention={with_attention}')
if self.with_cls_token:
list_of_params.append('with_cls_token={with_cls_token}')
if self.shared_embed:
list_of_params.append('shared_embed={shared_embed}')
if (self.verbose != 1):
list_of_params.append('verbose={verbose}')
if self.scale:
list_of_params.append('scale={scale}')
if (self.already_standard is not None):
list_of_params.append('already_standard={already_standard}')
if (len(self.quant_args) > 0):
list_of_params.append(', '.join([((f'{k}' + '=') + f'{v}') for (k, v) in self.quant_args.items()]))
if (len(self.scale_args) > 0):
list_of_params.append(', '.join([((f'{k}' + '=') + f'{v}') for (k, v) in self.scale_args.items()]))
all_params = ', '.join(list_of_params)
return f'ChunkTabPreprocessor({all_params.format(**self.__dict__)})' |
class Actor(nn.Module):
def _distribution(self, obs):
raise NotImplementedError
def _log_prob_from_distribution(self, pi, act):
raise NotImplementedError
def forward(self, obs, act=None):
pi = self._distribution(obs)
logp_a = None
if (act is not None):
logp_a = self._log_prob_from_distribution(pi, act)
return (pi, logp_a)
def _d_kl(self, obs, old_mu, old_log_std, device):
raise NotImplementedError |
class MaskedLinear(nn.Linear):
def __init__(self, in_features: int, out_features: int, bias: bool=True, mask_init: str='constant', mask_scale: float=0.0, pruning_method: str='topK'):
super(MaskedLinear, self).__init__(in_features=in_features, out_features=out_features, bias=bias)
assert (pruning_method in ['topK', 'threshold', 'sigmoied_threshold', 'magnitude', 'l0'])
self.pruning_method = pruning_method
if (self.pruning_method in ['topK', 'threshold', 'sigmoied_threshold', 'l0']):
self.mask_scale = mask_scale
self.mask_init = mask_init
self.mask_scores = nn.Parameter(torch.Tensor(self.weight.size()))
self.init_mask()
def init_mask(self):
if (self.mask_init == 'constant'):
init.constant_(self.mask_scores, val=self.mask_scale)
elif (self.mask_init == 'uniform'):
init.uniform_(self.mask_scores, a=(- self.mask_scale), b=self.mask_scale)
elif (self.mask_init == 'kaiming'):
init.kaiming_uniform_(self.mask_scores, a=math.sqrt(5))
def forward(self, input: torch.tensor, threshold: float):
if (self.pruning_method == 'topK'):
mask = TopKBinarizer.apply(self.mask_scores, threshold)
elif (self.pruning_method in ['threshold', 'sigmoied_threshold']):
sig = ('sigmoied' in self.pruning_method)
mask = ThresholdBinarizer.apply(self.mask_scores, threshold, sig)
elif (self.pruning_method == 'magnitude'):
mask = MagnitudeBinarizer.apply(self.weight, threshold)
elif (self.pruning_method == 'l0'):
(l, r, b) = ((- 0.1), 1.1, (2 / 3))
if self.training:
u = torch.zeros_like(self.mask_scores).uniform_().clamp(0.0001, 0.9999)
s = torch.sigmoid((((u.log() - (1 - u).log()) + self.mask_scores) / b))
else:
s = torch.sigmoid(self.mask_scores)
s_bar = ((s * (r - l)) + l)
mask = s_bar.clamp(min=0.0, max=1.0)
weight_thresholded = (mask * self.weight)
return F.linear(input, weight_thresholded, self.bias) |
def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs):
if pretrained:
aux_loss = True
kwargs['pretrained_backbone'] = False
model = _segm_model(arch_type, backbone, num_classes, aux_loss, **kwargs)
if pretrained:
_load_weights(model, arch_type, backbone, progress)
return model |
_cache()
def statcast_pitcher_active_spin(year: int, minP: int=250, _type: str='spin-based') -> pd.DataFrame:
url = f'
res = requests.get(url, timeout=None).content
if (res and ('<html' in res.decode('utf-8'))):
if (_type == 'spin-based'):
warnings.warn(f'Could not get active spin results for year {year} that are "spin-based". Trying to get the older "observed" results.')
return statcast_pitcher_active_spin(year, minP, 'observed')
warnings.warn('Statcast did not return any active spin results for the query provided.')
return pd.DataFrame()
data = pd.read_csv(io.StringIO(res.decode('utf-8')))
if ((_type == 'spin-based') and ((data is None) or data.empty)):
return statcast_pitcher_active_spin(year, minP, 'observed')
data = sanitize_statcast_columns(data)
return data |
def setup_agent(cfg: DictConfig, env: Environment) -> Agent:
agent: Agent
if (cfg.agent == 'random'):
random_policy = _setup_random_policy(cfg, env)
agent = RandomAgent(env=env, n_steps=cfg.env.training.n_steps, total_batch_size=cfg.env.training.total_batch_size, random_policy=random_policy)
elif (cfg.agent == 'a2c'):
actor_critic_networks = _setup_actor_critic_neworks(cfg, env)
optimizer = optax.adam(cfg.env.a2c.learning_rate)
agent = A2CAgent(env=env, n_steps=cfg.env.training.n_steps, total_batch_size=cfg.env.training.total_batch_size, actor_critic_networks=actor_critic_networks, optimizer=optimizer, normalize_advantage=cfg.env.a2c.normalize_advantage, discount_factor=cfg.env.a2c.discount_factor, bootstrapping_factor=cfg.env.a2c.bootstrapping_factor, l_pg=cfg.env.a2c.l_pg, l_td=cfg.env.a2c.l_td, l_en=cfg.env.a2c.l_en)
else:
raise ValueError(f"Expected agent name to be in ['random', 'a2c'], got {cfg.agent}.")
return agent |
def test_force_in_10m13kms2():
(vofid, rofid) = (200.0, 8.0)
assert (numpy.fabs((((4.0 * conversion.force_in_10m13kms2(vofid, rofid)) / conversion.force_in_10m13kms2((2.0 * vofid), rofid)) - 1.0)) < (10.0 ** (- 10.0))), 'force_in_10m13kms2 did not work as expected'
assert (numpy.fabs((((0.5 * conversion.force_in_10m13kms2(vofid, rofid)) / conversion.force_in_10m13kms2(vofid, (2 * rofid))) - 1.0)) < (10.0 ** (- 10.0))), 'force_in_10m13kms2 did not work as expected'
return None |
class AttributeMonitor(BaseMonitor):
def __init__(self, attribute_name: str, pre_forward: bool, net: nn.Module, instance: (Any or tuple)=None, function_on_attribute: Callable=(lambda x: x)):
super().__init__()
self.attribute_name = attribute_name
self.function_on_attribute = function_on_attribute
for (name, m) in net.named_modules():
if isinstance(m, instance):
self.monitored_layers.append(name)
self.name_records_index[name] = []
if pre_forward:
self.hooks.append(m.register_forward_pre_hook(self.create_hook(name)))
else:
self.hooks.append(m.register_forward_hook(self.create_hook(name)))
def create_hook(self, name):
def hook(m, x, y):
if self.is_enable():
self.name_records_index[name].append(self.records.__len__())
self.records.append(self.function_on_attribute(m.__getattr__(self.attribute_name)))
return hook |
def create_inception_v3_two_path_mixed_layer(x, id, name='', channel_axis=3, bottleneck_compression=0.5, compression=0.655, has_batch_norm=False, kType=0):
if (name == ''):
name = 'mixed'
interleaved = cai.layers.InterleaveChannels(2, name=(name + '_interleaved'))(x)
a = create_inception_path(last_tensor=interleaved, compression=bottleneck_compression, channel_axis=channel_axis, name=(name + '_ta'), activation=None, has_batch_norm=has_batch_norm, kType=kType)
b = create_inception_path(last_tensor=interleaved, compression=bottleneck_compression, channel_axis=channel_axis, name=(name + '_tb'), activation=None, has_batch_norm=has_batch_norm, kType=kType)
a = create_inception_v3_mixed_layer(a, id=id, name=(name + 'a'), bottleneck_compression=bottleneck_compression, compression=compression, kType=kType)
b = create_inception_v3_mixed_layer(b, id=id, name=(name + 'b'), bottleneck_compression=bottleneck_compression, compression=compression, kType=kType)
return keras.layers.Concatenate(axis=channel_axis, name=name)([a, b]) |
class TestLoadCheckpoint(unittest.TestCase):
def setUp(self):
self.args_mock = MagicMock()
self.args_mock.optimizer_overrides = '{}'
self.args_mock.reset_dataloader = False
self.args_mock.reset_meters = False
self.args_mock.reset_optimizer = False
self.patches = {'os.makedirs': MagicMock(), 'os.path.join': MagicMock(), 'os.path.isfile': MagicMock(return_value=True), 'os.path.isabs': MagicMock(return_value=False)}
self.applied_patches = [patch(p, d) for (p, d) in self.patches.items()]
[p.start() for p in self.applied_patches]
def test_load_partial_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
(trainer, epoch_itr) = get_trainer_and_epoch_itr(2, 150, 200, 50)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
(_, epoch_itr) = checkpoint_utils.load_checkpoint(self.args_mock, trainer)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 50)
self.assertEqual(epoch_itr.iterations_in_epoch, 51)
for _ in range((150 - 52)):
next(itr)
self.assertEqual(epoch_itr.iterations_in_epoch, 149)
self.assertTrue(itr.has_next())
next(itr)
self.assertFalse(itr.has_next())
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertTrue(itr.has_next())
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
def test_load_full_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
(trainer, epoch_itr) = get_trainer_and_epoch_itr(2, 150, 300, 150)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
(_, epoch_itr) = checkpoint_utils.load_checkpoint(self.args_mock, trainer)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0)
def test_load_no_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
(trainer, epoch_itr) = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
self.patches['os.path.isfile'].return_value = False
(_, epoch_itr) = checkpoint_utils.load_checkpoint(self.args_mock, trainer)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 1)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0)
def tearDown(self):
patch.stopall() |
class TestRollout():
def setup_method(self):
self.env = GarageEnv(DummyBoxEnv(obs_dim=(4, 4), action_dim=(2, 2)))
self.policy = DummyPolicy(self.env.spec)
def test_max_path_length(self):
path = utils.rollout(self.env, self.policy, max_path_length=3)
assert (path['observations'].shape[0] == 3)
assert (path['actions'].shape[0] == 3)
assert (path['rewards'].shape[0] == 3)
agent_info = [path['agent_infos'][k] for k in self.policy.distribution.dist_info_keys]
assert (agent_info[0].shape[0] == 3)
assert (path['env_infos']['dummy'].shape[0] == 3)
def test_deterministic_action(self):
path = utils.rollout(self.env, self.policy, max_path_length=5, deterministic=True)
assert (path['actions'] == 0.0).all() |
def get_Future3D_text_annotation(cfg: DictConfig, id: str) -> dict:
form = 'future3d-STMCS'
with open(cfg.data.future3d_json, 'r') as f:
model_info = json.load(f)
id2annot = {el['model_id']: [el['style'], el['theme'], el['material'], el['category'], el['super-category']] for el in model_info}
text_annot = ' '.join([el for el in id2annot[id] if (el is not None)]).lower()
return {'text': (text_annot if len(text_annot) else None), 'format': form} |
class FeatureRectifyModule(nn.Module):
def __init__(self, dim, reduction=1, lambda_c=0.5, lambda_s=0.5):
super(FeatureRectifyModule, self).__init__()
self.lambda_c = lambda_c
self.lambda_s = lambda_s
self.channel_weights = ChannelWeights(dim=dim, reduction=reduction)
self.spatial_weights = SpatialWeights(dim=dim, reduction=reduction)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / fan_out)))
if (m.bias is not None):
m.bias.data.zero_()
def forward(self, x1, x2):
channel_weights = self.channel_weights(x1, x2)
spatial_weights = self.spatial_weights(x1, x2)
out_x1 = ((x1 + ((self.lambda_c * channel_weights[1]) * x2)) + ((self.lambda_s * spatial_weights[1]) * x2))
out_x2 = ((x2 + ((self.lambda_c * channel_weights[0]) * x1)) + ((self.lambda_s * spatial_weights[0]) * x1))
return (out_x1, out_x2) |
class Cropping1D(ZooKerasLayer):
def __init__(self, cropping=(1, 1), input_shape=None, **kwargs):
super(Cropping1D, self).__init__(None, cropping, (list(input_shape) if input_shape else None), **kwargs) |
class XFUN(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [XFUNConfig(name=f'xfun.{lang}', lang=lang) for lang in _LANG]
tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-base')
def _info(self):
return datasets.DatasetInfo(features=datasets.Features({'id': datasets.Value('string'), 'input_ids': datasets.Sequence(datasets.Value('int64')), 'bbox': datasets.Sequence(datasets.Sequence(datasets.Value('int64'))), 'labels': datasets.Sequence(datasets.ClassLabel(names=['O', 'B-QUESTION', 'B-ANSWER', 'B-HEADER', 'I-ANSWER', 'I-QUESTION', 'I-HEADER'])), 'image': datasets.Array3D(shape=(3, 224, 224), dtype='uint8'), 'entities': datasets.Sequence({'start': datasets.Value('int64'), 'end': datasets.Value('int64'), 'label': datasets.ClassLabel(names=['HEADER', 'QUESTION', 'ANSWER'])}), 'relations': datasets.Sequence({'head': datasets.Value('int64'), 'tail': datasets.Value('int64'), 'start_index': datasets.Value('int64'), 'end_index': datasets.Value('int64')})}), supervised_keys=None)
def _split_generators(self, dl_manager):
file_dir = 'xfund&funsd/'
train_files_for_many_langs = [[(file_dir + f'{self.config.lang}.train.json'), (file_dir + f'{self.config.lang}')]]
val_files_for_many_langs = [[(file_dir + f'{self.config.lang}.val.json'), (file_dir + f'{self.config.lang}')]]
if self.config.additional_langs:
additional_langs = self.config.additional_langs.split('+')
if ('all' in additional_langs):
additional_langs = [lang for lang in _LANG if (lang != self.config.lang)]
for lang in additional_langs:
train_files_for_many_langs.append([(file_dir + f'{lang}.train.json'), (file_dir + f'{lang}')])
logger.info(f'Training on {self.config.lang} with additional langs({self.config.additional_langs})')
logger.info(f'Evaluating on {self.config.lang}')
logger.info(f'Testing on {self.config.lang}')
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepaths': train_files_for_many_langs}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'filepaths': val_files_for_many_langs})]
def _generate_examples(self, filepaths):
for filepath in filepaths:
logger.info('Generating examples from = %s', filepath)
with open(filepath[0], 'r') as f:
data = json.load(f)
for doc in data['documents']:
doc['img']['fpath'] = os.path.join(filepath[1], doc['img']['fname'])
(image, size) = load_image(doc['img']['fpath'])
document = doc['document']
tokenized_doc = {'input_ids': [], 'bbox': [], 'labels': []}
entities = []
relations = []
id2label = {}
entity_id_to_index_map = {}
empty_entity = set()
for line in document:
if (len(line['text']) == 0):
empty_entity.add(line['id'])
continue
id2label[line['id']] = line['label']
relations.extend([tuple(sorted(l)) for l in line['linking']])
if ('/en' in filepath[0]):
tokenized_inputs = self.tokenizer(' '.join([q['text'].replace(u'\uf703', '') for q in line['words']]), add_special_tokens=False, return_offsets_mapping=True, return_attention_mask=False)
else:
tokenized_inputs = self.tokenizer(line['text'], add_special_tokens=False, return_offsets_mapping=True, return_attention_mask=False)
text_length = 0
ocr_length = 0
bbox = []
last_box = None
for (token_id, offset) in zip(tokenized_inputs['input_ids'], tokenized_inputs['offset_mapping']):
if (token_id == 6):
bbox.append(None)
continue
text_length += (offset[1] - offset[0])
tmp_box = []
while (ocr_length < text_length):
ocr_word = line['words'].pop(0)
ocr_length += len(self.tokenizer._tokenizer.normalizer.normalize_str(ocr_word['text'].strip()))
tmp_box.append(simplify_bbox(line['box']))
if (len(tmp_box) == 0):
tmp_box = last_box
bbox.append(normalize_bbox(merge_bbox(tmp_box), size))
last_box = tmp_box
bbox = [([bbox[(i + 1)][0], bbox[(i + 1)][1], bbox[(i + 1)][0], bbox[(i + 1)][1]] if (b is None) else b) for (i, b) in enumerate(bbox)]
if (line['label'] == 'other'):
label = (['O'] * len(bbox))
else:
label = ([f"I-{line['label'].upper()}"] * len(bbox))
label[0] = f"B-{line['label'].upper()}"
tokenized_inputs.update({'bbox': bbox, 'labels': label})
if (label[0] != 'O'):
entity_id_to_index_map[line['id']] = len(entities)
entities.append({'start': len(tokenized_doc['input_ids']), 'end': (len(tokenized_doc['input_ids']) + len(tokenized_inputs['input_ids'])), 'label': line['label'].upper()})
for i in tokenized_doc:
tokenized_doc[i] = (tokenized_doc[i] + tokenized_inputs[i])
relations = list(set(relations))
relations = [rel for rel in relations if ((rel[0] not in empty_entity) and (rel[1] not in empty_entity))]
kvrelations = []
for rel in relations:
pair = [id2label[rel[0]], id2label[rel[1]]]
if (pair == ['question', 'answer']):
kvrelations.append({'head': entity_id_to_index_map[rel[0]], 'tail': entity_id_to_index_map[rel[1]]})
elif (pair == ['answer', 'question']):
kvrelations.append({'head': entity_id_to_index_map[rel[1]], 'tail': entity_id_to_index_map[rel[0]]})
else:
continue
def get_relation_span(rel):
bound = []
for entity_index in [rel['head'], rel['tail']]:
bound.append(entities[entity_index]['start'])
bound.append(entities[entity_index]['end'])
return (min(bound), max(bound))
relations = sorted([{'head': rel['head'], 'tail': rel['tail'], 'start_index': get_relation_span(rel)[0], 'end_index': get_relation_span(rel)[1]} for rel in kvrelations], key=(lambda x: x['head']))
chunk_size = 512
for (chunk_id, index) in enumerate(range(0, len(tokenized_doc['input_ids']), chunk_size)):
item = {}
for k in tokenized_doc:
item[k] = tokenized_doc[k][index:(index + chunk_size)]
entities_in_this_span = []
global_to_local_map = {}
for (entity_id, entity) in enumerate(entities):
if ((index <= entity['start'] < (index + chunk_size)) and (index <= entity['end'] < (index + chunk_size))):
entity['start'] = (entity['start'] - index)
entity['end'] = (entity['end'] - index)
global_to_local_map[entity_id] = len(entities_in_this_span)
entities_in_this_span.append(entity)
relations_in_this_span = []
for relation in relations:
if ((index <= relation['start_index'] < (index + chunk_size)) and (index <= relation['end_index'] < (index + chunk_size))):
relations_in_this_span.append({'head': global_to_local_map[relation['head']], 'tail': global_to_local_map[relation['tail']], 'start_index': (relation['start_index'] - index), 'end_index': (relation['end_index'] - index)})
item.update({'id': f"{doc['id']}_{chunk_id}", 'image': image, 'entities': entities_in_this_span, 'relations': relations_in_this_span})
(yield (f"{doc['id']}_{chunk_id}", item)) |
class GenDiffPOBase(GenFOBase):
def gen_model(self):
if (not hasattr(self, '_gen_model')):
torch.manual_seed(0)
self._gen_model = self.model_class(n_obs_neurons=self.n_obs_neurons, n_hidden_neurons=self.n_hidden_neurons, connection_tensor=self.connection_tensor, n_inducing_points=self.kernel_weight.shape[0], activation_kwargs={'upperbound': upperbound}, temperature=self.temperature)
self._gen_model.params['bias'].data = self.bias
self._gen_model.params['kernel_weight'].data = self.kernel_weight
self._gen_model.hard = True
return self._gen_model |
class RepeatedContinuousStratifiedGroupKFold(_RepeatedSplits):
def __init__(self, n_bins, method='binning', n_splits: int=5, n_repeats: int=10, random_state: Optional[Union[(int, RandomState)]]=None):
super().__init__(ContinuousStratifiedGroupKFold, n_bins=n_bins, method=method, n_repeats=n_repeats, random_state=random_state, n_splits=n_splits) |
class FairseqDataset(torch.utils.data.Dataset, EpochListening):
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def collater(self, samples):
raise NotImplementedError
def num_tokens(self, index):
raise NotImplementedError
def size(self, index):
raise NotImplementedError
def ordered_indices(self):
return np.arange(len(self))
def supports_prefetch(self):
return False
def attr(self, attr: str, index: int):
return getattr(self, attr, None)
def prefetch(self, indices):
raise NotImplementedError |
class Self_Attn(nn.Module):
def __init__(self, in_dim, activation):
super(Self_Attn, self).__init__()
self.chanel_in = in_dim
self.activation = activation
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=(in_dim // 8), kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=(in_dim // 8), kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=(- 1))
def forward(self, x):
(m_batchsize, C, width, height) = x.size()
proj_query = self.query_conv(x).view(m_batchsize, (- 1), (width * height)).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, (- 1), (width * height))
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, (- 1), (width * height))
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, width, height)
out = ((self.gamma * out) + x)
return (out, attention) |
def do_training(tr: Training, callback: tf.keras.callbacks.Callback, verbose=0):
tr.model_name = ((tr.dataset_name + '_') + str(tr.hyperparameters))
tr.checkpoint_path = os.path.join(models_dir, tr.model_name, 'checkpoints')
tr.log_path = os.path.join(models_dir, tr.model_name, 'logs')
tr.custom_objects = {'direction_metric': metrics.direction_metric, 'angle_metric': metrics.angle_metric}
if tr.hyperparameters.WANDB:
import wandb
from wandb.keras import WandbCallback
wandb.init(project='openbot')
config = wandb.config
config.epochs = tr.hyperparameters.NUM_EPOCHS
config.learning_rate = tr.hyperparameters.LEARNING_RATE
config.batch_size = tr.hyperparameters.TRAIN_BATCH_SIZE
config['model_name'] = tr.model_name
resume_training = False
model: tf.keras.Model
if tr.hyperparameters.USE_LAST:
try:
dirs = utils.list_dirs(tr.checkpoint_path)
last_checkpoint = os.path.join(tr.checkpoint_path, 'cp-last.ckpt')
os.path.join(tr.checkpoint_path, last_checkpoint)
model = tf.keras.models.load_model(last_checkpoint, custom_objects=tr.custom_objects, compile=False)
log_file = open(os.path.join(tr.log_path, 'log.csv'), 'r')
tr.INITIAL_EPOCH = (int(log_file.readlines()[(- 1)].split(',')[0]) + 1)
log_file.close()
resume_training = True
print(f'Resuming from checkpoint: {last_checkpoint}')
except FileNotFoundError as err:
print('No checkpoint or log file found, training new model!')
print(err)
except Exception as err:
print(err)
raise
if (not resume_training):
model = getattr(models, tr.hyperparameters.MODEL)(tr.NETWORK_IMG_WIDTH, tr.NETWORK_IMG_HEIGHT, tr.hyperparameters.BATCH_NORM, tr.hyperparameters.POLICY)
dot_img_file = os.path.join(models_dir, tr.model_name, 'model.png')
tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
callback.broadcast('model', tr.model_name)
if (tr.hyperparameters.POLICY == 'autopilot'):
tr.loss_fn = losses.sq_weighted_mse_angle
elif (tr.hyperparameters.POLICY == 'point_goal_nav'):
tr.loss_fn = losses.mae_raw_weighted_mse_angle
tr.metric_list = ['mean_absolute_error', tr.custom_objects['direction_metric'], tr.custom_objects['angle_metric']]
optimizer = tf.keras.optimizers.Adam(learning_rate=tr.hyperparameters.LEARNING_RATE)
model.compile(optimizer=optimizer, loss=tr.loss_fn, metrics=tr.metric_list)
if verbose:
print(model.summary())
if verbose:
print(tr.model_name)
STEPS_PER_EPOCH = np.ceil((tr.image_count_train / tr.hyperparameters.TRAIN_BATCH_SIZE))
callback.broadcast('message', 'Fit model...')
callback_list = [callbacks.checkpoint_last_cb(tr.checkpoint_path), callbacks.checkpoint_best_train_cb(tr.checkpoint_path), callbacks.checkpoint_best_val_cb(tr.checkpoint_path), callbacks.tensorboard_cb(tr.log_path), callbacks.logger_cb(tr.log_path, resume_training), callback]
if tr.hyperparameters.WANDB:
callback_list += [WandbCallback()]
tr.history = model.fit(tr.train_ds, epochs=tr.hyperparameters.NUM_EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, initial_epoch=tr.INITIAL_EPOCH, validation_data=tr.test_ds, verbose=verbose, callbacks=callback_list)
if tr.hyperparameters.WANDB:
wandb.save(tr.log_path)
wandb.finish()
callback.broadcast('message', '...Done') |
class XceptionAligned(nn.Module):
def __init__(self, block_cfg, num_classes=1000, in_chans=3, output_stride=32, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_rate=0.0, global_pool='avg'):
super(XceptionAligned, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
assert (output_stride in (8, 16, 32))
layer_args = dict(act_layer=act_layer, norm_layer=norm_layer)
self.stem = nn.Sequential(*[ConvBnAct(in_chans, 32, kernel_size=3, stride=2, **layer_args), ConvBnAct(32, 64, kernel_size=3, stride=1, **layer_args)])
curr_dilation = 1
curr_stride = 2
self.feature_info = []
self.blocks = nn.Sequential()
for (i, b) in enumerate(block_cfg):
b['dilation'] = curr_dilation
if (b['stride'] > 1):
self.feature_info += [dict(num_chs=to_3tuple(b['out_chs'])[(- 2)], reduction=curr_stride, module=f'blocks.{i}.stack.act3')]
next_stride = (curr_stride * b['stride'])
if (next_stride > output_stride):
curr_dilation *= b['stride']
b['stride'] = 1
else:
curr_stride = next_stride
self.blocks.add_module(str(i), XceptionModule(**b, **layer_args))
self.num_features = self.blocks[(- 1)].out_channels
self.feature_info += [dict(num_chs=self.num_features, reduction=curr_stride, module=('blocks.' + str((len(self.blocks) - 1))))]
self.head = ClassifierHead(in_chs=self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate)
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
x = self.blocks(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x |
def main(args):
ray.init(num_cpus=args.num_cpus, memory=(1800 * (1024 ** 2)), object_store_memory=(300 * (1024 ** 2)))
def train_reg(config, reporter):
sys.path.append(BASE_DIR)
from experiments.data_sim import provide_data
(data_train, data_valid, _) = provide_data(dataset=args.dataset, seed=SEED)
from meta_learn.GPR_meta_svgd import GPRegressionMetaLearnedSVGD
torch.set_num_threads(N_THREADS_PER_RUN)
model = GPRegressionMetaLearnedSVGD(data_train, **config)
with gpytorch.settings.max_cg_iterations(300):
eval_period = 3000
train_iter = 0
for i in range((config['num_iter_fit'] // eval_period)):
loss = model.meta_fit(verbose=False, log_period=2000, n_iter=eval_period)
train_iter += eval_period
(ll, rmse, calib_err) = model.eval_datasets(data_valid)
reporter(timesteps_total=train_iter, loss=loss, test_rmse=rmse, test_ll=ll, calib_err=calib_err)
def train_test(config):
results_dict = config
try:
sys.path.append(BASE_DIR)
from experiments.data_sim import provide_data
(data_train, _, data_test) = provide_data(dataset=args.dataset, seed=SEED)
from meta_learn.GPR_meta_svgd import GPRegressionMetaLearnedSVGD
torch.set_num_threads(N_THREADS_PER_RUN)
with gpytorch.settings.max_cg_iterations(300):
model = GPRegressionMetaLearnedSVGD(data_train, **config)
model.meta_fit(data_test, log_period=5000)
(ll, rmse, calib_err) = model.eval_datasets(data_test)
results_dict.update(ll=ll, rmse=rmse, calib_err=calib_err)
except Exception as e:
print(e)
results_dict.update(ll=np.nan, rmse=np.nan, calib_err=np.nan)
return results_dict
assert (args.metric in ['test_ll', 'test_rmse'])
exp_name = ('tune_meta_svgd_%s_kernel_%s' % (args.covar_module, args.dataset))
if args.load_analysis:
analysis_dir = os.path.join(HPARAM_EXP_DIR, exp_name)
assert os.path.isdir(analysis_dir), 'load_analysis_from must be a valid directory'
print(('Loading existing tune analysis results from %s' % analysis_dir))
analysis = Analysis(analysis_dir)
else:
space = {'task_kl_weight': hp.loguniform('task_kl_weight', math.log(0.08), math.log(1.0)), 'prior_factor': hp.loguniform('prior_factor', math.log(1e-06), math.log(0.2)), 'lr': hp.loguniform('lr', math.log(0.0005), math.log(0.005)), 'lr_decay': hp.loguniform('lr_decay', math.log(0.8), math.log(1.0)), 'bandwidth': hp.loguniform('bandwidth', math.log(0.001), math.log(500.0)), 'num_particles': hp.choice('num_particles', [10, 50]), 'task_batch_size': hp.choice('task_batch_size', [4, 10])}
config = {'num_samples': 200, 'config': {'num_iter_fit': 30000, 'kernel_nn_layers': [32, 32, 32, 32], 'mean_nn_layers': [32, 32, 32, 32], 'random_seed': SEED, 'mean_module': 'NN', 'covar_module': args.covar_module, 'normalize_data': True}, 'stop': {'timesteps_total': 30000}}
algo = HyperOptSearch(space, max_concurrent=args.num_cpus, metric=args.metric, mode=('max' if (args.metric == 'test_ll') else 'min'))
analysis = tune.run(train_reg, name=exp_name, search_alg=algo, verbose=1, raise_on_failed_trial=False, local_dir=HPARAM_EXP_DIR, **config)
from experiments.hyperparam_search.util import select_best_configs
if (args.metric == 'test_ll'):
best_configs = select_best_configs(analysis, metric='test_ll', mode='max', N=args.n_test_runs)
elif (args.metric == 'test_rmse'):
best_configs = select_best_configs(analysis, metric='test_rmse', mode='min', N=args.n_test_runs)
else:
raise AssertionError('metric must be test_ll or test_rmse')
test_configs = []
for config in best_configs:
for seed in TEST_SEEDS:
test_config = copy.deepcopy(config)
test_config.update({'random_seed': seed})
test_configs.append(test_config)
result_dicts = ray.get([train_test.remote(config) for config in test_configs])
result_df = pd.DataFrame(result_dicts)
print(result_df.to_string())
csv_file_name = os.path.join(HPARAM_EXP_DIR, ('%s_%s.csv' % (exp_name, datetime.now().strftime('%b_%d_%Y_%H:%M:%S'))))
result_df.to_csv(csv_file_name)
print(('\nSaved result csv to %s' % csv_file_name)) |
class GAPNormalizer(object):
def __init__(self, vocab_file):
self.vocab_file = vocab_file
self.init_vocabulary()
self.letters = set((string.ascii_letters + '*'))
def init_vocabulary(self):
self.vocabulary = []
with open(self.vocab_file) as f:
for line in f:
term = line.strip()
if ((len(term) < 3) or ('[' in term) or ('#' in term) or any((char.isdigit() for char in term))):
continue
self.vocabulary.append(term)
def normalize(self, text):
for found_star in re.finditer('\\*', text):
start_index = found_star.start()
while ((start_index >= 0) and (text[start_index] in self.letters)):
start_index -= 1
start_index += 1
end_index = found_star.start()
while ((end_index < len(text)) and (text[end_index] in self.letters)):
end_index += 1
if ((end_index - start_index) < 4):
continue
star_term = text[start_index:end_index]
if (star_term.count('*') >= (len(star_term) - 1)):
continue
replacements = fnmatch.filter(self.vocabulary, star_term.replace('*', '?'))
if (len(replacements) > 0):
text = ((text[:start_index] + replacements[0]) + text[end_index:])
return text |
def store_in_memory(mmemory, addr, value):
for i in range((addr + 1), (addr + 32)):
if (i in mmemory):
if (not is_undefined(mmemory[i])):
if is_undefined(value):
mmemory[i]['type'] = 'undefined'
continue
obytes = (i - addr)
old_value = mmemory[i]['z3']
new_value = ((old_value & ((2 ** (8 * obytes)) - 1)) ^ (value['z3'] << (8 * obytes)))
if (new_value == 0):
del mmemory[i]
else:
mmemory[i]['z3'] = new_value
for i in range((addr - 31), addr):
if (i in mmemory):
if (not is_undefined(mmemory[i])):
if is_undefined(value):
mmemory[i]['type'] = 'undefined'
continue
obytes = (addr - i)
old_value = mmemory[i]['z3']
new_value = ((old_value & (((2 ** (8 * obytes)) - 1) << (8 * (32 - obytes)))) ^ (value['z3'] >> (8 * obytes)))
if (new_value == 0):
del mmemory[i]
else:
mmemory[i]['z3'] = new_value
mmemory[addr] = value |
def test_transformer_head_loss():
s = 256
img_metas = [{'img_shape': (s, s, 3), 'scale_factor': 1, 'pad_shape': (s, s, 3), 'batch_input_shape': (s, s)}]
train_cfg = dict(assigner=dict(type='HungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=1.0), reg_cost=dict(type='BBoxL1Cost', weight=5.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0)))
transformer_cfg = dict(type='Transformer', embed_dims=4, num_heads=1, num_encoder_layers=1, num_decoder_layers=1, feedforward_channels=1, dropout=0.1, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN'), num_fcs=2, pre_norm=False, return_intermediate_dec=True)
positional_encoding_cfg = dict(type='SinePositionalEncoding', num_feats=2, normalize=True)
self = TransformerHead(num_classes=4, in_channels=1, num_fcs=2, train_cfg=train_cfg, transformer=transformer_cfg, positional_encoding=positional_encoding_cfg)
self.init_weights()
feat = [torch.rand(1, 1, (s // feat_size), (s // feat_size)) for feat_size in [4, 8, 16, 32, 64]]
(cls_scores, bbox_preds) = self.forward(feat, img_metas)
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
for (key, loss) in empty_gt_losses.items():
if ('cls' in key):
assert (loss.item() > 0), 'cls loss should be non-zero'
elif ('bbox' in key):
assert (loss.item() == 0), 'there should be no box loss when there are no true boxes'
elif ('iou' in key):
assert (loss.item() == 0), 'there should be no iou loss when there are no true boxes'
gt_bboxes = [torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
for loss in one_gt_losses.values():
assert (loss.item() > 0), 'cls loss, or box loss, or iou loss should be non-zero'
self.forward_train(feat, img_metas, gt_bboxes, gt_labels)
self.get_bboxes(cls_scores, bbox_preds, img_metas, rescale=True) |
def ensure_valid_input(model, tokens, input_names):
print('Ensuring inputs are in correct order')
model_args_name = model.forward.__code__.co_varnames
(model_args, ordered_input_names) = ([], [])
for arg_name in model_args_name[1:]:
if (arg_name in input_names):
ordered_input_names.append(arg_name)
model_args.append(tokens[arg_name])
else:
print(f'{arg_name} is not present in the generated input list.')
break
print(f'Generated inputs order: {ordered_input_names}')
return (ordered_input_names, tuple(model_args)) |
class XLMRobertaTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None:
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
self.fairseq_tokens_to_ids = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
self.fairseq_offset = 1
self.fairseq_tokens_to_ids['<mask>'] = (len(self.sp_model) + self.fairseq_offset)
self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()}
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
state['sp_model_proto'] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, d):
self.__dict__ = d
if (not hasattr(self, 'sp_model_kwargs')):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def vocab_size(self):
return ((len(self.sp_model) + self.fairseq_offset) + 1)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
if (token in self.fairseq_tokens_to_ids):
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
return ((spm_id + self.fairseq_offset) if spm_id else self.unk_token_id)
def _convert_id_to_token(self, index):
if (index in self.fairseq_ids_to_tokens):
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece((index - self.fairseq_offset))
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,) |
def test_action_space():
space = ActionSpace({'move': gym.spaces.Dict({'position': gym.spaces.Discrete(2), 'velocity': gym.spaces.Discrete(3)}), 'move_forward': EmptySpace()})
assert space.contains(space.sample())
assert space.contains({'action': 'move', 'action_args': {'position': 0, 'velocity': 1}})
assert space.contains({'action': 'move_forward'})
assert (not space.contains([0, 1, 2]))
assert (not space.contains({'zero': None}))
assert (not space.contains({'action': 'bad'}))
assert (not space.contains({'action': 'move'}))
assert (not space.contains({'action': 'move', 'action_args': {'position': 0}}))
assert (not space.contains({'action': 'move_forward', 'action_args': {'position': 0}})) |
def get_dep_adj(passage, tag):
map_passage = {}
word_passage = passage.split()
tags = tag.split()
assert (len(word_passage) == len(tags))
for (position, word) in enumerate(word_passage):
map_passage[position] = word
adj = np.zeros([len(word_passage), len(word_passage)])
str_passage = ' '.join(word_passage)
sentences = str_passage.replace('.', '#').replace('!', '#').replace('?', '#').split('#')
start_position = 0
end_position = 0
for sent in sentences:
end_position += len(sent.split())
if (end_position > len(word_passage)):
end_position = len(word_passage)
flag = tags[start_position:end_position]
if (not (('I' in flag) or ('B' in flag) or ('S' in flag))):
start_position = (end_position + 1)
adj[(end_position - 1)][min(start_position, (len(word_passage) - 1))] = adj[min(start_position, (len(word_passage) - 1))][(end_position - 1)] = 1
end_position = start_position
continue
try:
res = list(parser.raw_parse(sent))
except:
print('one error occur')
continue
for row in res[0].triples():
word1 = row[0][0]
word2 = row[2][0]
pos1 = 0
pos2 = 0
for i in range(start_position, end_position):
if (map_passage[i] == word1):
pos1 = i
if (map_passage[i] == word2):
pos2 = i
adj[pos1][pos2] = 1
start_position = (end_position + 1)
adj[(end_position - 1)][min(start_position, (len(word_passage) - 1))] = adj[min(start_position, (len(word_passage) - 1))][(end_position - 1)] = 1
end_position = start_position
for i in range(adj.shape[0]):
adj[i][i] = 1
return adj |
def fg_mask2d(img_2d, thresh):
mask_map = np.float32((img_2d > thresh))
def getLargestCC(segmentation):
labels = label(segmentation)
assert (labels.max() != 0)
largestCC = (labels == (np.argmax(np.bincount(labels.flat)[1:]) + 1))
return largestCC
if (mask_map.max() < 0.999):
return mask_map
else:
post_mask = getLargestCC(mask_map)
fill_mask = snm.binary_fill_holes(post_mask)
return fill_mask |
def main():
env = MineCraft()
env.set_render(True)
random_play = False
minecraft_global_setup()
obs = env.reset()
if random_play:
action = np.random.randint(0, env.action_space.n)
else:
input_key = cv2.waitKey(0)
action = env.key_map_to_action[input_key]
while True:
(obs, reward, done, info) = env.step(action)
if done:
if random_play:
env.saveWorld('./random_policy_savegame_6.sav')
break
else:
env.reset()
if random_play:
action = np.random.randint(0, env.action_space.n)
else:
input_key = cv2.waitKey(0)
if (input_key == key.J):
break
if (input_key == key.K):
env.saveWorld('./none_action_savegame.sav')
break
action = env.key_map_to_action[input_key]
cv2.destroyAllWindows() |
def get_existing_filenames(path_to_file):
f = open(path_to_file, 'r')
filenames = []
for line in f:
line = line.replace('\n', '')
filename = extract_filename_from_url(line)
if (not filename):
print('>>>get_existing_filenames: Empty line extracted.')
continue
filenames.append(filename)
f.close()
print(((('>>Read ' + str(len(filenames))) + ' filenames from ') + path_to_file))
return filenames |
def write_results(filename, results, data_type):
if (data_type == 'mot'):
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif (data_type == 'kitti'):
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for (frame_id, tlwhs, track_ids) in results:
if (data_type == 'kitti'):
frame_id -= 1
for (tlwh, track_id) in zip(tlwhs, track_ids):
if (track_id < 0):
continue
(x1, y1, w, h) = tlwh
(x2, y2) = ((x1 + w), (y1 + h))
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to {}'.format(filename)) |
def gptneox_sample_repetition_penalty(ctx: gptneox_context_p, candidates, last_tokens_data, last_tokens_size: c_int, penalty: c_float):
return _lib.gptneox_sample_repetition_penalty(ctx, candidates, last_tokens_data, last_tokens_size, penalty) |
def train_user_pred(optims, generator, bsize, embed_dim, recom_length, trainSample, validSample, testSample, mode='generator with rec', inner_val_acc_best=None, inner_val_preck_best=None, inner_val_rewd_best=None, inner_loss_best=None, only_rewards=False, n_epochs=10):
outputdir = 'model_output'
outputmodelname = 'simu.model.pth'
lrshrink = 5
minlr = 1e-05
generator_only = True
action_given = True
loss_fn_target = nn.CrossEntropyLoss()
loss_fn_reward = nn.BCEWithLogitsLoss()
loss_fn_target.size_average = True
loss_fn_target.to(device)
loss_fn_reward.size_average = True
loss_fn_reward.to(device)
(optim_fn, optim_params) = get_optimizer(optims)
if (mode == 'generator'):
params = list(generator.parameters())
action_given = False
elif (mode == 'generator with rec'):
params = list(generator.parameters())
action_given = True
else:
print('No such mode! Select from generator/generator with rec!')
optimizer = optim_fn(filter((lambda p: p.requires_grad), params), **optim_params)
if (inner_val_acc_best == None):
inner_val_acc_best = (- .0)
inner_val_preck_best = (- .0)
inner_val_rewd_best = (- .0)
inner_loss_best = .0
stop_training = False
times_no_improvement = 0
epoch = 1
eval_type = 'valid'
best_model = generator
while ((not stop_training) and (epoch <= n_epochs)):
if (not only_rewards):
(train_acc, train_preck, _) = train_pred_each(generator, epoch, trainSample, optimizer, bsize, embed_dim, recom_length, loss_fn_target, loss_fn_reward, device, generator_only, action_given, False)
print('User model evaluation!')
(eval_acc, eval_preck, eval_rewd, eval_loss) = evaluate_user(generator, epoch, bsize, (recom_length - 1), validSample, testSample, loss_fn_target, loss_fn_reward, device, eval_type)
if ((eval_type == 'valid') and (epoch <= n_epochs)):
if ((eval_acc > inner_val_acc_best) or (eval_preck > inner_val_preck_best)):
best_model = generator
print('saving model at epoch {0}'.format(epoch))
if (not os.path.exists(outputdir)):
os.makedirs(outputdir)
torch.save(generator.state_dict(), os.path.join(outputdir, ('irecGan_gen3.' + outputmodelname)))
inner_val_acc_best = eval_acc
inner_val_preck_best = eval_preck
inner_val_rewd_best = eval_rewd
inner_loss_best = eval_loss
times_no_improvement = 0
else:
times_no_improvement += 1
stop_training = adj_optim(optims, optimizer, minlr, lrshrink, stop_training, times_no_improvement)
epoch += 1
return (best_model, inner_val_acc_best, inner_val_preck_best, inner_val_rewd_best, inner_loss_best) |
class RuleTrimmer():
def __init__(self, quantitative_dataframe):
self.__dataframe = quantitative_dataframe
def transform(self, rules):
copied_rules = [rule.copy() for rule in rules]
trimmed = [self.__trim(rule) for rule in copied_rules]
return trimmed
def __trim(self, rule):
(covered_by_antecedent_mask, covered_by_consequent_mask) = self.__dataframe.find_covered_by_rule_mask(rule)
covered_by_rule_mask = (covered_by_antecedent_mask & covered_by_consequent_mask)
correctly_covered_by_r = self.__dataframe.mask(covered_by_rule_mask)
antecedent = rule.antecedent
for (idx, literal) in enumerate(antecedent):
(attribute, interval) = literal
if (type(interval) == str):
continue
current_column = correctly_covered_by_r[[attribute]].values
current_column_unique = np.unique(current_column)
if (not current_column.any()):
continue
minv = np.asscalar(min(current_column))
maxv = np.asscalar(max(current_column))
new_interval = Interval(minv, maxv, True, True)
antecedent[idx] = (attribute, new_interval)
return rule |
def squared_euclidean_distance(x: Tensor, y: Tensor) -> Tensor:
x_norm = (x ** 2).sum(1).view((- 1), 1)
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, (- 1))
dist = ((x_norm + y_norm) - (2.0 * torch.mm(x, y_t)))
return torch.clamp(dist, 0.0, np.inf) |
_config
def model_lifelong_independent_resnet_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'side_class': 'TaskonomyEncoder', 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'side_weights_path': '/mnt/models/curvature_encoder.dat', 'normalize_pre_transfer': True}}} |
class XLNetTokenizer():
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) |
def get_dataloaders(config: Namespace, train: bool=True) -> Tuple[(DataLoader, DataLoader)]:
if train:
config.return_volumes = False
if (config.percentage != 100):
config.return_volumes = True
slices = get_camcan_slices(config)
if (config.percentage != 100):
if (config.percentage == (- 1)):
if (config.seed == 10):
slices = slices[0]
else:
slices = slices[(- 1)]
slices = ([slices] * 100)
slices = np.concatenate(slices, axis=0)
else:
if (config.seed == 10):
slices = slices[:int((len(slices) * (config.percentage / 100)))]
else:
slices = slices[(- int((len(slices) * (config.percentage / 100)))):]
slices = np.concatenate(slices, axis=0)
slices = slices[(np.sum(slices, axis=(1, 2, 3)) > 0)]
split_idx = int((len(slices) * config.normal_split))
trainset = NormalDataset(slices[:split_idx], config)
valset = NormalDataset(slices[split_idx:], config)
train_dl = GenericDataloader(trainset, config)
val_dl = GenericDataloader(valset, config)
return (train_dl, val_dl)
elif (not train):
config.return_volumes = True
if (config.sequence == 't1'):
if config.brats_t1:
(slices, segmentations) = get_brats_slices(config)
else:
(slices, segmentations) = get_atlas_slices(config)
elif (config.sequence == 't2'):
(slices, segmentations) = get_brats_slices(config)
split_idx = int((len(slices) * config.anomal_split))
slices_big = np.concatenate(slices[:split_idx], axis=0)
slices_small = np.concatenate(slices[split_idx:], axis=0)
seg_big = np.concatenate(segmentations[:split_idx], axis=0)
seg_small = np.concatenate(segmentations[split_idx:], axis=0)
non_zero_idx_s = (np.sum(slices_small, axis=(1, 2, 3)) > 0)
slices_small = slices_small[non_zero_idx_s]
seg_small = seg_small[non_zero_idx_s]
non_zero_idx_b = (np.sum(slices_big, axis=(1, 2, 3)) > 0)
slices_big = slices_big[non_zero_idx_b]
seg_big = seg_big[non_zero_idx_b]
for i in slices_big:
if (np.count_nonzero(i) < 5):
print(np.count_nonzero(i))
big = AnomalDataset([slices_big, seg_big], config)
small = AnomalDataset([slices_small, seg_small], config)
big_test_dl = GenericDataloader(big, config, shuffle=config.shuffle)
small_test_dl = GenericDataloader(small, config, shuffle=config.shuffle)
del slices, segmentations, slices_small, seg_small
return (big_test_dl, small_test_dl) |
class Arcface(nn.Module):
def __init__(self, in_features, out_features, s=30.0, m=0.3, easy_margin=False, ls_eps=0.0):
super(Arcface, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.ls_eps = ls_eps
self.weight = Parameter(torch.FloatTensor(out_features, in_features))
nn.init.xavier_uniform_(self.weight)
self.easy_margin = easy_margin
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.th = math.cos((math.pi - m))
self.mm = (math.sin((math.pi - m)) * m)
def forward(self, input, label):
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
sine = torch.sqrt((1.0 - torch.pow(cosine, 2)))
phi = ((cosine * self.cos_m) - (sine * self.sin_m))
phi = phi.type_as(cosine)
if self.easy_margin:
phi = torch.where((cosine > 0), phi, cosine)
else:
phi = torch.where((cosine > self.th), phi, (cosine - self.mm))
one_hot = torch.zeros(cosine.size(), device='cuda')
one_hot.scatter_(1, label.view((- 1), 1).long(), 1)
if (self.ls_eps > 0):
one_hot = (((1 - self.ls_eps) * one_hot) + (self.ls_eps / self.out_features))
output = ((one_hot * phi) + ((1.0 - one_hot) * cosine))
output *= self.s
return output |
def get_arguments():
parser = ArgumentParser(description='nilm-project')
parser.add_argument('--settings')
parser.add_argument('--appliance')
parser.add_argument('--path')
parser.add_argument('--train', action='store_true')
parser.add_argument('--tune', action='store_true')
parser.add_argument('--epochs')
parser.add_argument('--disable-plot', action='store_true')
parser.add_argument('--disable-random', action='store_true')
return parser.parse_args() |
class TensorflowSavedModelModel(TensorflowBaseModel):
def __init__(self, model, **kwargs):
super(TensorflowSavedModelModel, self).__init__(model, **kwargs)
self._auto_trackable = None
def get_all_weight_names(self):
import tensorflow as tf
names = []
for (index, layer) in enumerate(tf.keras.models.load_model(self._model).layers):
if len(layer.weights):
names.append(index)
return names
def update_weights(self, tensor_name, new_tensor):
pass
def get_weight(self, tensor_name):
return self.weights[tensor_name]
def model(self):
if self._auto_trackable:
return self._auto_trackable
root = os.path.abspath(os.path.expanduser(cfg.default_workspace))
root += str(time.time())
if os.path.exists(root):
shutil.rmtree(root)
os.makedirs(root, exist_ok=True)
if (not self._sess):
self._load_sess(self._model, **self.kwargs)
(_, builder) = self.build_saved_model(root)
builder.save()
model = tf.saved_model.load(root)
shutil.rmtree(root)
self._auto_trackable = model
return model
def model(self, input_model):
self._auto_trackable = input_model
def report_sparsity(self):
import numpy as np
import pandas as pd
import tensorflow as tf
df = pd.DataFrame(columns=['Name', 'Shape', 'NNZ (dense)', 'NNZ (sparse)', 'Sparsity(%)'])
pd.set_option('display.precision', 2)
param_dims = [2, 4]
params_size = 0
sparse_params_size = 0
for (index, layer) in enumerate(tf.keras.models.load_model(self._model).layers):
if (not len(layer.weights)):
continue
weights = layer.get_weights()[0]
if (weights.ndim in param_dims):
(param_size, sparse_param_size, dense_param_size) = compute_sparsity(weights)
density = (dense_param_size / param_size)
params_size += param_size
sparse_params_size += sparse_param_size
df.loc[len(df.index)] = [index, list(weights.shape), dense_param_size, sparse_param_size, ((1 - density) * 100)]
total_sparsity = ((sparse_params_size / params_size) * 100)
df.loc[len(df.index)] = ['Total sparsity:', '-', params_size, sparse_params_size, total_sparsity]
return (df, total_sparsity)
def build_saved_model(self, root=None):
if (not root):
root = cfg.default_workspace
root = os.path.abspath(os.path.expanduser(root))
if os.path.exists(root):
import shutil
shutil.rmtree(root)
os.makedirs(root, exist_ok=True)
from tensorflow.python.saved_model import signature_constants, tag_constants
from neural_compressor.adaptor.tf_utils.util import get_tensor_by_name
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(root)
sigs = {}
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(self.graph.as_graph_def(), name='')
g = tf.compat.v1.get_default_graph()
inp = [get_tensor_by_name(g, x) for x in self._input_tensor_names]
out = [get_tensor_by_name(g, x) for x in self._output_tensor_names]
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = tf.compat.v1.saved_model.signature_def_utils.predict_signature_def({k: v for (k, v) in zip(self._input_tensor_names, inp)}, {k: v for (k, v) in zip(self._output_tensor_names, out)})
builder.add_meta_graph_and_variables(sess, [tag_constants.SERVING], signature_def_map=sigs)
return (root, builder)
def save(self, root=None):
(root, builder) = self.build_saved_model(root)
builder.save()
logger.info('Save quantized model to {}.'.format(root)) |
def get_possible_iterations(logdir, population_i):
possible_iterations = []
checkpoint_path_search_prefix = os.path.join(logdir, '{}{}{}-{}'.format(CHECKPOINT_PATH_PREFIX, CHECKPOINT_PATH_POPULATION_PREFIX, population_i, CHECKPOINT_PATH_ITERATION_PREFIX))
for file in glob.glob((checkpoint_path_search_prefix + '*')):
possible_iterations += [int(file.split(checkpoint_path_search_prefix)[1].split('-')[0])]
possible_iterations = np.asarray(possible_iterations)
possible_iterations.sort()
return list(possible_iterations) |
def test_resnet_backbone():
with pytest.raises(KeyError):
ResNet(20)
with pytest.raises(AssertionError):
ResNet(50, num_stages=0)
with pytest.raises(AssertionError):
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
ResNet(50, dcn=dcn, stage_with_dcn=(True,))
with pytest.raises(AssertionError):
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), stages=(False, True, True), position='after_conv3')]
ResNet(50, plugins=plugins)
with pytest.raises(AssertionError):
ResNet(18, num_stages=5)
with pytest.raises(AssertionError):
ResNet(18, strides=(1,), dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
model = ResNet(18, pretrained=0)
model.init_weights()
with pytest.raises(AssertionError):
ResNet(50, style='tensorflow')
model = ResNet(18, norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
model = ResNet(depth=18, norm_eval=True, pretrained='torchvision://resnet18')
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
frozen_stages = 1
model = ResNet(18, frozen_stages=frozen_stages)
model.init_weights()
model.train()
assert (model.norm1.training is False)
for layer in [model.conv1, model.norm1]:
for param in layer.parameters():
assert (param.requires_grad is False)
for i in range(1, (frozen_stages + 1)):
layer = getattr(model, 'layer{}'.format(i))
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert (mod.training is False)
for param in layer.parameters():
assert (param.requires_grad is False)
model = ResNetV1d(depth=18, frozen_stages=frozen_stages)
assert (len(model.stem) == 9)
model.init_weights()
model.train()
check_norm_state(model.stem, False)
for param in model.stem.parameters():
assert (param.requires_grad is False)
for i in range(1, (frozen_stages + 1)):
layer = getattr(model, 'layer{}'.format(i))
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert (mod.training is False)
for param in layer.parameters():
assert (param.requires_grad is False)
model = ResNet(18)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 64, 56, 56]))
assert (feat[1].shape == torch.Size([1, 128, 28, 28]))
assert (feat[2].shape == torch.Size([1, 256, 14, 14]))
assert (feat[3].shape == torch.Size([1, 512, 7, 7]))
model = ResNet(18)
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 64, 56, 56]))
assert (feat[1].shape == torch.Size([1, 128, 28, 28]))
assert (feat[2].shape == torch.Size([1, 256, 14, 14]))
assert (feat[3].shape == torch.Size([1, 512, 7, 7]))
model = ResNet(18, out_indices=(0, 1, 2))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 112, 112)
feat = model(imgs)
assert (len(feat) == 3)
assert (feat[0].shape == torch.Size([1, 64, 28, 28]))
assert (feat[1].shape == torch.Size([1, 128, 14, 14]))
assert (feat[2].shape == torch.Size([1, 256, 7, 7]))
model = ResNet(18, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 64, 56, 56]))
assert (feat[1].shape == torch.Size([1, 128, 28, 28]))
assert (feat[2].shape == torch.Size([1, 256, 14, 14]))
assert (feat[3].shape == torch.Size([1, 512, 7, 7]))
model = ResNet(18, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 64, 56, 56]))
assert (feat[1].shape == torch.Size([1, 128, 28, 28]))
assert (feat[2].shape == torch.Size([1, 256, 14, 14]))
assert (feat[3].shape == torch.Size([1, 512, 7, 7]))
model = ResNet(18, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 64, 56, 56]))
assert (feat[1].shape == torch.Size([1, 128, 28, 28]))
assert (feat[2].shape == torch.Size([1, 256, 14, 14]))
assert (feat[3].shape == torch.Size([1, 512, 7, 7]))
plugins = [dict(cfg=dict(type='GeneralizedAttention', spatial_range=(- 1), num_heads=8, attention_type='0010', kv_stride=2), stages=(False, True, True, True), position='after_conv2'), dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), stages=(False, True, True, False), position='after_conv3')]
model = ResNet(50, plugins=plugins)
for m in model.layer1.modules():
if is_block(m):
assert (not hasattr(m, 'context_block'))
assert (not hasattr(m, 'gen_attention_block'))
assert (m.nonlocal_block.in_channels == 64)
for m in model.layer2.modules():
if is_block(m):
assert (m.nonlocal_block.in_channels == 128)
assert (m.gen_attention_block.in_channels == 128)
assert (m.context_block.in_channels == 512)
for m in model.layer3.modules():
if is_block(m):
assert (m.nonlocal_block.in_channels == 256)
assert (m.gen_attention_block.in_channels == 256)
assert (m.context_block.in_channels == 1024)
for m in model.layer4.modules():
if is_block(m):
assert (m.nonlocal_block.in_channels == 512)
assert (m.gen_attention_block.in_channels == 512)
assert (not hasattr(m, 'context_block'))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 256, 56, 56]))
assert (feat[1].shape == torch.Size([1, 512, 28, 28]))
assert (feat[2].shape == torch.Size([1, 1024, 14, 14]))
assert (feat[3].shape == torch.Size([1, 2048, 7, 7]))
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=1), stages=(False, True, True, False), position='after_conv3'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=2), stages=(False, True, True, False), position='after_conv3')]
model = ResNet(50, plugins=plugins)
for m in model.layer1.modules():
if is_block(m):
assert (not hasattr(m, 'context_block'))
assert (not hasattr(m, 'context_block1'))
assert (not hasattr(m, 'context_block2'))
for m in model.layer2.modules():
if is_block(m):
assert (not hasattr(m, 'context_block'))
assert (m.context_block1.in_channels == 512)
assert (m.context_block2.in_channels == 512)
for m in model.layer3.modules():
if is_block(m):
assert (not hasattr(m, 'context_block'))
assert (m.context_block1.in_channels == 1024)
assert (m.context_block2.in_channels == 1024)
for m in model.layer4.modules():
if is_block(m):
assert (not hasattr(m, 'context_block'))
assert (not hasattr(m, 'context_block1'))
assert (not hasattr(m, 'context_block2'))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 256, 56, 56]))
assert (feat[1].shape == torch.Size([1, 512, 28, 28]))
assert (feat[2].shape == torch.Size([1, 1024, 14, 14]))
assert (feat[3].shape == torch.Size([1, 2048, 7, 7]))
model = ResNet(18, zero_init_residual=True)
model.init_weights()
for m in model.modules():
if isinstance(m, Bottleneck):
assert all_zeros(m.norm3)
elif isinstance(m, BasicBlock):
assert all_zeros(m.norm2)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 64, 56, 56]))
assert (feat[1].shape == torch.Size([1, 128, 28, 28]))
assert (feat[2].shape == torch.Size([1, 256, 14, 14]))
assert (feat[3].shape == torch.Size([1, 512, 7, 7]))
model = ResNetV1d(depth=18)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 64, 56, 56]))
assert (feat[1].shape == torch.Size([1, 128, 28, 28]))
assert (feat[2].shape == torch.Size([1, 256, 14, 14]))
assert (feat[3].shape == torch.Size([1, 512, 7, 7])) |
def make_data_creator(refs):
def data_creator(config, batch_size):
return refs
return data_creator |
def read_jsonl(path: str, key: str=None):
data = []
with open(os.path.expanduser(path)) as f:
for line in f:
if (not line):
continue
data.append(json.loads(line))
if (key is not None):
data.sort(key=(lambda x: x[key]))
data = {item[key]: item for item in data}
return data |
def get_real_epoch_or_iter(config):
cfg = mmcv.Config.fromfile(('./configs/' + config))
if (cfg.runner.type == 'EpochBasedRunner'):
epoch = cfg.runner.max_epochs
if (cfg.data.train.type == 'RepeatDataset'):
epoch *= cfg.data.train.times
return epoch
else:
return cfg.runner.max_iters |
def fewShot(paired_sample, n_ways, n_shots, n_unlabel, cnt_query, coco=False, cfg=None, labels=None):
cumsum_idx = np.cumsum(([0] + [((n_shots + n_unlabel) + x) for x in cnt_query]))
class_ids = [paired_sample[cumsum_idx[i]]['basic_class_id'] for i in range(n_ways)]
support_images = [[paired_sample[(cumsum_idx[i] + j)]['image'] for j in range(n_shots)] for i in range(n_ways)]
support_images_t = [[paired_sample[(cumsum_idx[i] + j)]['image_t'] for j in range(n_shots)] for i in range(n_ways)]
if coco:
support_labels = [[paired_sample[(cumsum_idx[i] + j)]['label'][class_ids[i]] for j in range(n_shots)] for i in range(n_ways)]
else:
support_labels = [[paired_sample[(cumsum_idx[i] + j)]['label'] for j in range(n_shots)] for i in range(n_ways)]
support_scribbles = [[paired_sample[(cumsum_idx[i] + j)]['scribble'] for j in range(n_shots)] for i in range(n_ways)]
support_insts = [[paired_sample[(cumsum_idx[i] + j)]['inst'] for j in range(n_shots)] for i in range(n_ways)]
query_images = [paired_sample[((cumsum_idx[(i + 1)] - j) - 1)]['image'] for i in range(n_ways) for j in range(cnt_query[i])]
query_images_t = [paired_sample[((cumsum_idx[(i + 1)] - j) - 1)]['image_t'] for i in range(n_ways) for j in range(cnt_query[i])]
if coco:
query_labels = [paired_sample[((cumsum_idx[(i + 1)] - j) - 1)]['label'][class_ids[i]] for i in range(n_ways) for j in range(cnt_query[i])]
else:
query_labels = [paired_sample[((cumsum_idx[(i + 1)] - j) - 1)]['label'] for i in range(n_ways) for j in range(cnt_query[i])]
if cfg['segments']:
query_segment = [paired_sample[((cumsum_idx[(i + 1)] - j) - 1)]['segment'] for i in range(n_ways) for j in range(cnt_query[i])]
query_cls_idx = [sorted(([0] + [(class_ids.index(x) + 1) for x in (set(np.unique(query_label)) & set(class_ids))])) for query_label in query_labels]
support_mask = [[getMask(support_labels[way][shot], support_scribbles[way][shot], class_ids[way], class_ids) for shot in range(n_shots)] for way in range(n_ways)]
support_labels_base = suppBaseOrder(cfg, support_labels)
query_labels_base = baseOrder(cfg, query_labels)
query_labels_tmp = [torch.zeros_like(x) for x in query_labels]
for (i, query_label_tmp) in enumerate(query_labels_tmp):
query_label_tmp[(query_labels[i] == 255)] = 255
for j in range(n_ways):
query_label_tmp[(query_labels[i] == class_ids[j])] = (j + 1)
query_masks = [[torch.where((query_label == 0), torch.ones_like(query_label), torch.zeros_like(query_label))[(None, ...)]] for query_label in query_labels]
for (i, query_label) in enumerate(query_labels):
for idx in query_cls_idx[i][1:]:
mask = torch.where((query_label == class_ids[(idx - 1)]), torch.ones_like(query_label), torch.zeros_like(query_label))[(None, ...)]
query_masks[i].append(mask)
if (n_unlabel > 0):
assert (n_unlabel > 0), 'More unlabel images'
cumsum_unlabel_idx = cumsum_idx.copy()
cumsum_unlabel_idx[:n_ways] += n_shots
unlabel_images = [[paired_sample[(cumsum_unlabel_idx[i] + j)]['image'] for j in range(n_unlabel)] for i in range(n_ways)]
unlabel_images_t = [[paired_sample[(cumsum_unlabel_idx[i] + j)]['image_t'] for j in range(n_unlabel)] for i in range(n_ways)]
if coco:
unlabel_labels = [[paired_sample[(cumsum_unlabel_idx[i] + j)]['label'][class_ids[i]] for j in range(n_unlabel)] for i in range(n_ways)]
else:
unlabel_labels = [[paired_sample[(cumsum_unlabel_idx[i] + j)]['label'] for j in range(n_unlabel)] for i in range(n_ways)]
if cfg['segments']:
unlabel_segment = [[paired_sample[(cumsum_unlabel_idx[i] + j)]['segment'] for j in range(n_unlabel)] for i in range(n_ways)]
unlabel_labels_tmp = [[torch.zeros_like(y) for y in x] for x in unlabel_labels]
for (i, unlabel_label_tmp) in enumerate(unlabel_labels_tmp):
for (k, tmp) in enumerate(unlabel_label_tmp):
tmp[(unlabel_labels[i][k] == 255)] = 255
for j in range(n_ways):
tmp[(unlabel_labels[i][k] == class_ids[j])] = (j + 1)
else:
assert (n_unlabel == 0), 'the number of unlabel images must be zero'
unlabel_images = query_images
unlabel_images_t = query_images_t
unlabel_labels_tmp = query_labels_tmp
unlabel_spix = query_labels_tmp
unlabel_segment = query_labels_tmp
query_segment = query_labels_tmp
img_name = str(class_ids)
return {'class_ids': class_ids, 'support_images_t': support_images_t, 'support_images': support_images, 'support_mask': support_mask, 'support_inst': support_insts, 'support_labels_base': support_labels_base, 'query_images_t': query_images_t, 'query_images': query_images, 'query_labels': query_labels_tmp, 'query_masks': query_masks, 'query_cls_idx': query_cls_idx, 'query_labels_base': query_labels_base, 'query_segment': query_segment, 'img_name': img_name, 'unlabel_images_t': unlabel_images_t, 'unlabel_images': unlabel_images, 'unlabel_labels': unlabel_labels_tmp, 'unlabel_segment': unlabel_segment, 'cnt_query': cnt_query} |
class ArithExpNode():
def __init__(self, type=None, value=None):
self.type = type
self.value = value
self.opNum = 0
self.cntDiv = 0
self.ArgSet = (0 if (type == 'CONSTANT') else (1 << value))
self.cntMinMax = 0
if (self.type == 'CONSTANT'):
self.cntConst = 1
self.maxarg = (- self.value)
else:
self.cntConst = 0
self.maxarg = self.value
def is_simple(self):
return True
def display(self):
if (self.type == 'CONSTANT'):
return str(self.value)
else:
return ('s' + str(self.value))
def evaluate(self, ArgValue) -> int:
if (self.type == 'CONSTANT'):
return self.value
else:
if (self.value >= len(ArgValue)):
raise Exception('No argument value')
return ArgValue[self.value]
def discrete_evaluate(self, ArgValue, enableCache) -> int:
if (self.type == 'CONSTANT'):
return self.value
elif (f's{self.value}' in ArgValue):
return ArgValue[f's{self.value}']
else:
raise Exception('No argument value')
def nnsmith_evaluate(self, ArgValue):
if (self.type == 'CONSTANT'):
return self.value
elif (f's{self.value}' in ArgValue):
return ArgValue[f's{self.value}']
else:
raise Exception('No argument value') |
class SupConResNet(nn.Module):
def __init__(self, name='resnet50', head='mlp', feat_dim=128):
super(SupConResNet, self).__init__()
(model_fun, dim_in) = model_dict[name]
self.encoder = model_fun()
if (head == 'linear'):
self.head = nn.Linear(dim_in, feat_dim)
elif (head == 'mlp'):
self.head = nn.Sequential(nn.Linear(dim_in, dim_in), nn.ReLU(inplace=True), nn.Linear(dim_in, feat_dim))
else:
raise NotImplementedError('head not supported: {}'.format(head))
def forward(self, x):
feat = self.encoder(x)
feat = F.normalize(self.head(feat), dim=1)
return feat |
def _test():
import torch
pretrained = False
models = [senet16, senet28, senet40, senet52, senet103, senet154]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != senet16) or (weight_count == ))
assert ((model != senet28) or (weight_count == ))
assert ((model != senet40) or (weight_count == ))
assert ((model != senet52) or (weight_count == ))
assert ((model != senet103) or (weight_count == ))
assert ((model != senet154) or (weight_count == ))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
def proxylessnas_gpu(**kwargs):
return get_proxylessnas(version='gpu', model_name='proxylessnas_gpu', **kwargs) |
_task('speech_to_text')
class SpeechToTextTask(LegacyFairseqTask):
def add_args(cls, parser):
parser.add_argument('data', help='manifest root path')
parser.add_argument('--config-yaml', type=str, default='config.yaml', help='Configuration YAML filename (under manifest root)')
parser.add_argument('--multitask-config-yaml', type=str, default=None, help='Configuration YAML filename for the multitasks (under manifest root)')
parser.add_argument('--max-source-positions', default=6000, type=int, metavar='N', help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
self.data_cfg = S2TDataConfig((Path(args.data) / args.config_yaml))
self.speaker_to_id = self._get_speaker_to_id()
if (self.data_cfg.prepend_tgt_lang_tag and self.data_cfg.prepend_bos_and_append_tgt_lang_tag):
raise ValueError('Please set only one of the two options to avoid adding target token multiple times')
self.multitask_tasks = {}
self.tgt_dict_mt = None
self.eos_token_mt = None
if (getattr(args, 'multitask_config_yaml', None) is not None):
multitask_cfg = MultitaskConfig((Path(args.data) / args.multitask_config_yaml))
first_pass_task_idx = multitask_cfg.first_pass_decoder_task_index
for (i, (task_name, task_config)) in enumerate(multitask_cfg.get_all_tasks().items()):
task_obj = DummyMultiTask(task_config, task_config.tgt_dict, first_pass=(i == first_pass_task_idx))
self.multitask_tasks[task_name] = task_obj
if task_obj.is_first_pass_decoder:
self.tgt_dict_mt = task_obj.target_dictionary
if task_config.prepend_bos_and_append_tgt_lang_tag:
self.eos_token_mt = task_config.eos_token
assert (not isinstance(self.eos_token_mt, List))
if (not self.eos_token_mt):
raise Warning('Please provide eos_token in --multitask-config-yaml to replace eos in sequence generator')
def _get_speaker_to_id(self):
speaker_to_id = None
speaker_set_filename = self.data_cfg.config.get('speaker_set_filename')
if (speaker_set_filename is not None):
speaker_set_path = (Path(self.args.data) / speaker_set_filename)
with open(speaker_set_path) as f:
speaker_to_id = {r.strip(): i for (i, r) in enumerate(f)}
return speaker_to_id
def setup_task(cls, args, **kwargs):
data_cfg = S2TDataConfig((Path(args.data) / args.config_yaml))
dict_path = (Path(args.data) / data_cfg.vocab_filename)
if (not dict_path.is_file()):
raise FileNotFoundError(f'Dict not found: {dict_path.as_posix()}')
tgt_dict = Dictionary.load(dict_path.as_posix())
logger.info(f'dictionary size ({data_cfg.vocab_filename}): {len(tgt_dict):,}')
if (getattr(args, 'train_subset', None) is not None):
if (not all((s.startswith('train') for s in args.train_subset.split(',')))):
raise ValueError('Train splits should be named like "train*".')
return cls(args, tgt_dict)
def build_criterion(self, args):
from fairseq import criterions
if (self.data_cfg.prepend_tgt_lang_tag and (args.ignore_prefix_size != 1)):
raise ValueError('Please set "--ignore-prefix-size 1" since target language ID token is prepended as BOS.')
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith('train')
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechToTextDatasetCreator.from_tsv(root=self.args.data, cfg=self.data_cfg, splits=split, tgt_dict=self.tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, is_train_split=is_train_split, epoch=epoch, seed=self.args.seed, speaker_to_id=self.speaker_to_id, multitask=self.multitask_tasks)
def target_dictionary(self):
return self.tgt_dict
def target_dictionary_mt(self):
return self.tgt_dict_mt
def source_dictionary(self):
return None
def max_positions(self):
return (self.args.max_source_positions, self.args.max_target_positions)
def build_model(self, args, from_checkpoint=False):
args.input_feat_per_channel = self.data_cfg.input_feat_per_channel
args.input_channels = self.data_cfg.input_channels
args.speaker_to_id = self.speaker_to_id
return super(SpeechToTextTask, self).build_model(args, from_checkpoint)
def build_generator_dual_decoder(self, models, args, extra_gen_cls_kwargs):
from examples.speech_to_speech.unity.sequence_generator_multi_decoder import MultiDecoderSequenceGenerator
lang_token_ids_aux = {i for (s, i) in self.tgt_dict_mt.indices.items() if TextTargetMultitaskData.is_lang_tag(s)}
extra_gen_cls_kwargs['symbols_to_strip_from_output'].update(lang_token_ids_aux)
eos_id_mt = (self.tgt_dict_mt.index(self.eos_token_mt) if self.eos_token_mt else None)
assert (eos_id_mt != self.tgt_dict_mt.unk())
extra_gen_cls_kwargs['eos_mt'] = eos_id_mt
return MultiDecoderSequenceGenerator(models, self.target_dictionary, self.target_dictionary_mt, beam_size=max(1, getattr(args, 'beam', 1)), beam_size_mt=max(1, getattr(args, 'beam_mt', 1)), max_len_a=getattr(args, 'max_len_a', 0), max_len_b=getattr(args, 'max_len_b', 200), max_len_a_mt=getattr(args, 'max_len_a_mt', 0), max_len_b_mt=getattr(args, 'max_len_b_mt', 0), min_len=getattr(args, 'min_len', 1), normalize_scores=(not getattr(args, 'unnormalized', False)), len_penalty=getattr(args, 'lenpen', 1), len_penalty_mt=getattr(args, 'lenpen_mt', 1), unk_penalty=getattr(args, 'unkpen', 0), temperature=getattr(args, 'temperature', 1.0), match_source_len=getattr(args, 'match_source_len', False), no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0), **extra_gen_cls_kwargs)
def build_generator(self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None):
if (self.data_cfg.prepend_tgt_lang_tag and (args.prefix_size != 1)):
raise ValueError('Please set "--prefix-size 1" since target language ID token is prepended as BOS.')
lang_token_ids = {i for (s, i) in self.tgt_dict.indices.items() if SpeechToTextDataset.is_lang_tag(s)}
if (extra_gen_cls_kwargs is None):
extra_gen_cls_kwargs = {}
extra_gen_cls_kwargs['symbols_to_strip_from_output'] = lang_token_ids
eos_token = (args.eos_token if (('eos_token' in args) and (args.eos_token is not None)) else self.data_cfg.config.get('eos_token', None))
if (self.data_cfg.prepend_bos_and_append_tgt_lang_tag and (not eos_token)):
raise Warning('Please provide --eos_token to replace eos in sequence generator')
eos_id = (self.tgt_dict.index(eos_token) if eos_token else None)
extra_gen_cls_kwargs['eos'] = eos_id
has_dual_decoder = (getattr(models[0], 'mt_task_name', None) is not None)
if has_dual_decoder:
return self.build_generator_dual_decoder(models, args, extra_gen_cls_kwargs=extra_gen_cls_kwargs)
else:
return super().build_generator(models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs)
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
for (task_name, task_obj) in self.multitask_tasks.items():
criterion.set_multitask_loss_weight(task_name, task_obj.args.get_loss_weight(update_num))
if (task_name in model.multitask_decoders):
model.multitask_decoders[task_name].train()
(loss, sample_size, logging_output) = super().train_step(sample, model, criterion, optimizer, update_num, ignore_grad)
return (loss, sample_size, logging_output)
def valid_step(self, sample, model, criterion):
for (task_name, task_obj) in self.multitask_tasks.items():
if (task_name in model.multitask_decoders):
model.multitask_decoders[task_name].eval()
(loss, sample_size, logging_output) = super().valid_step(sample, model, criterion)
return (loss, sample_size, logging_output)
def build_tokenizer(self, args):
logger.info(f'pre-tokenizer: {self.data_cfg.pre_tokenizer}')
return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer))
def build_bpe(self, args):
logger.info(f'tokenizer: {self.data_cfg.bpe_tokenizer}')
return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
def get_interactive_tokens_and_lengths(self, lines, encode_fn):
n_frames = [get_features_or_waveform(p).shape[0] for p in lines]
return (lines, n_frames)
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
return SpeechToTextDataset('interactive', False, self.data_cfg, src_tokens, src_lengths) |
class DetectionCrop(FeatureTransformer):
def __init__(self, roi_key, normalized=True, bigdl_type='float'):
super(DetectionCrop, self).__init__(bigdl_type, roi_key, normalized) |
def load_outcome_not_last_column_dataset():
data = [['a', 0, 10], ['a', 0, 10000], ['a', 0, 14], ['a', 0, 10], ['a', 0, 10]]
return pd.DataFrame(data, columns=['Categorical', 'Outcome', 'Numerical']) |
class ExperimentRunner(tune.Trainable):
def _setup(self, variant):
set_seed(variant['run_params']['seed'])
self._variant = variant
gpu_options = tf.GPUOptions(allow_growth=True)
session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
tf.keras.backend.set_session(session)
self._session = tf.keras.backend.get_session()
self.train_generator = None
self._built = False
def _stop(self):
tf.reset_default_graph()
tf.keras.backend.clear_session()
def _build(self):
variant = copy.deepcopy(self._variant)
environment_params = variant['environment_params']
training_environment = self.training_environment = get_environment_from_params(environment_params['training'])
evaluation_environment = self.evaluation_environment = (get_environment_from_params(environment_params['evaluation']) if ('evaluation' in environment_params) else training_environment)
replay_pool = self.replay_pool = get_replay_pool_from_variant(variant, training_environment)
sampler = self.sampler = get_sampler_from_variant(variant)
Qs = self.Qs = get_Q_function_from_variant(variant, training_environment)
policy = self.policy = get_policy_from_variant(variant, training_environment, Qs)
initial_exploration_policy = self.initial_exploration_policy = get_policy('UniformPolicy', training_environment)
domain = environment_params['training']['domain']
static_fns = mbpo.static[domain.lower()]
self.algorithm = get_algorithm_from_variant(variant=self._variant, training_environment=training_environment, evaluation_environment=evaluation_environment, policy=policy, initial_exploration_policy=initial_exploration_policy, Qs=Qs, pool=replay_pool, static_fns=static_fns, sampler=sampler, session=self._session)
initialize_tf_variables(self._session, only_uninitialized=True)
self._built = True
def _train(self):
if (not self._built):
self._build()
if (self.train_generator is None):
self.train_generator = self.algorithm.train()
diagnostics = next(self.train_generator)
return diagnostics
def _pickle_path(self, checkpoint_dir):
return os.path.join(checkpoint_dir, 'checkpoint.pkl')
def _replay_pool_pickle_path(self, checkpoint_dir):
return os.path.join(checkpoint_dir, 'replay_pool.pkl')
def _tf_checkpoint_prefix(self, checkpoint_dir):
return os.path.join(checkpoint_dir, 'checkpoint')
def _get_tf_checkpoint(self):
tf_checkpoint = tf.train.Checkpoint(**self.algorithm.tf_saveables)
return tf_checkpoint
def picklables(self):
return {'variant': self._variant, 'training_environment': self.training_environment, 'evaluation_environment': self.evaluation_environment, 'sampler': self.sampler, 'algorithm': self.algorithm, 'Qs': self.Qs, 'policy_weights': self.policy.get_weights()}
def _save(self, checkpoint_dir):
pickle_path = self._pickle_path(checkpoint_dir)
with open(pickle_path, 'wb') as f:
pickle.dump(self.picklables, f)
if self._variant['run_params'].get('checkpoint_replay_pool', False):
self._save_replay_pool(checkpoint_dir)
tf_checkpoint = self._get_tf_checkpoint()
tf_checkpoint.save(file_prefix=self._tf_checkpoint_prefix(checkpoint_dir), session=self._session)
return os.path.join(checkpoint_dir, '')
def _save_replay_pool(self, checkpoint_dir):
replay_pool_pickle_path = self._replay_pool_pickle_path(checkpoint_dir)
self.replay_pool.save_latest_experience(replay_pool_pickle_path)
def _restore_replay_pool(self, current_checkpoint_dir):
experiment_root = os.path.dirname(current_checkpoint_dir)
experience_paths = [self._replay_pool_pickle_path(checkpoint_dir) for checkpoint_dir in sorted(glob.iglob(os.path.join(experiment_root, 'checkpoint_*')))]
for experience_path in experience_paths:
self.replay_pool.load_experience(experience_path)
def _restore(self, checkpoint_dir):
assert isinstance(checkpoint_dir, str), checkpoint_dir
checkpoint_dir = checkpoint_dir.rstrip('/')
with self._session.as_default():
pickle_path = self._pickle_path(checkpoint_dir)
with open(pickle_path, 'rb') as f:
picklable = pickle.load(f)
training_environment = self.training_environment = picklable['training_environment']
evaluation_environment = self.evaluation_environment = picklable['evaluation_environment']
replay_pool = self.replay_pool = get_replay_pool_from_variant(self._variant, training_environment)
if self._variant['run_params'].get('checkpoint_replay_pool', False):
self._restore_replay_pool(checkpoint_dir)
sampler = self.sampler = picklable['sampler']
Qs = self.Qs = picklable['Qs']
policy = self.policy = get_policy_from_variant(self._variant, training_environment, Qs)
self.policy.set_weights(picklable['policy_weights'])
initial_exploration_policy = self.initial_exploration_policy = get_policy('UniformPolicy', training_environment)
self.algorithm = get_algorithm_from_variant(variant=self._variant, training_environment=training_environment, evaluation_environment=evaluation_environment, policy=policy, initial_exploration_policy=initial_exploration_policy, Qs=Qs, pool=replay_pool, sampler=sampler, session=self._session)
self.algorithm.__setstate__(picklable['algorithm'].__getstate__())
tf_checkpoint = self._get_tf_checkpoint()
status = tf_checkpoint.restore(tf.train.latest_checkpoint(os.path.split(self._tf_checkpoint_prefix(checkpoint_dir))[0]))
status.assert_consumed().run_restore_ops(self._session)
initialize_tf_variables(self._session, only_uninitialized=True)
for (Q, Q_target) in zip(self.algorithm._Qs, self.algorithm._Q_targets):
Q_target.set_weights(Q.get_weights())
self._built = True |
class ProjectedAdditiveExactGPModel(ExactGPModel):
def __init__(self, train_x, train_y, likelihood, kernel):
if isinstance(kernel, gpytorch.kernels.ScaleKernel):
if (not isinstance(kernel.base_kernel, GeneralizedProjectionKernel)):
raise ValueError('Not an projected additive kernel.')
elif (not isinstance(kernel, GeneralizedProjectionKernel)):
raise ValueError('Not an projected additive kernel.')
super(ProjectedAdditiveExactGPModel, self).__init__(train_x, train_y, likelihood, kernel)
def get_corresponding_additive_model(self, return_proj=True):
return convert_rp_model_to_additive_model(self, return_proj=return_proj) |
def _grouper(iterable: Iterable[Any], n: int, fillvalue=None) -> Iterator[Tuple[Any]]:
it = iter(iterable)
while True:
values = []
for _ in range(n):
try:
value = next(it)
except StopIteration:
values.extend(([fillvalue] * (n - len(values))))
(yield tuple(values))
return
values.append(value)
(yield tuple(values)) |
_torch
_retrieval
_sentencepiece
class RagTestMixin():
all_model_classes = ((RagModel, RagTokenForGeneration, RagSequenceForGeneration) if (is_torch_available() and is_datasets_available() and is_faiss_available()) else ())
retrieval_vector_size = 32
n_docs = 3
max_combined_length = 16
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
dpr_tokenizer_path = os.path.join(self.tmpdirname, 'dpr_tokenizer')
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'G', 'Gl', 'Gn', 'Glo', 'Glow', 'er', 'Glowest', 'Gnewer', 'Gwider', '<unk>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'G l', 'Gl o', 'Glo w', 'e r', '']
self.special_tokens_map = {'unk_token': '<unk>'}
bart_tokenizer_path = os.path.join(self.tmpdirname, 'bart_tokenizer')
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
t5_tokenizer = T5Tokenizer(T5_SAMPLE_VOCAB)
t5_tokenizer_path = os.path.join(self.tmpdirname, 't5_tokenizer')
t5_tokenizer.save_pretrained(t5_tokenizer_path)
_property
def dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, 'dpr_tokenizer'))
_property
def dpr_ctx_encoder_tokenizer(self) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, 'dpr_tokenizer'))
_property
def bart_tokenizer(self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, 'bart_tokenizer'))
_property
def t5_tokenizer(self) -> BartTokenizer:
return T5Tokenizer.from_pretrained(os.path.join(self.tmpdirname, 't5_tokenizer'))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_retriever(self, config):
dataset = Dataset.from_dict({'id': ['0', '1', '3'], 'text': ['foo', 'bar', 'qux'], 'title': ['Foo', 'Bar', 'Qux'], 'embeddings': [np.ones(self.retrieval_vector_size), (2 * np.ones(self.retrieval_vector_size)), (3 * np.ones(self.retrieval_vector_size))]})
dataset.add_faiss_index('embeddings', string_factory='Flat', metric_type=faiss.METRIC_INNER_PRODUCT)
tokenizer = (self.bart_tokenizer if (config.generator.model_type == 'bart') else self.t5_tokenizer)
with patch('transformers.models.rag.retrieval_rag.load_dataset') as mock_load_dataset:
mock_load_dataset.return_value = dataset
retriever = RagRetriever(config, question_encoder_tokenizer=self.dpr_tokenizer, generator_tokenizer=tokenizer)
return retriever
def check_model_with_retriever(self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_model_classes:
model = model_class(config, retriever=self.get_retriever(config)).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
outputs = model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask)
self.assertEqual(outputs.logits.shape, ((self.n_docs * decoder_input_ids.shape[0]), decoder_input_ids.shape[1], config.generator.vocab_size))
self.assertEqual(outputs.generator_enc_last_hidden_state.shape, ((self.n_docs * decoder_input_ids.shape[0]), self.max_combined_length, config.generator.hidden_size))
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def check_model_with_end2end_retriever(self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
context_encoder_tokenizer = self.dpr_ctx_encoder_tokenizer
dpr_context_encoder = DPRContextEncoder(config.question_encoder)
retriever = self.get_retriever(config)
retriever.set_ctx_encoder_tokenizer(context_encoder_tokenizer)
for model_class in [RagTokenForGeneration, RagSequenceForGeneration]:
model = model_class(config, retriever=retriever)
model.set_context_encoder_for_training(dpr_context_encoder)
model.to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
outputs = model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask)
self.assertEqual(outputs.logits.shape, ((self.n_docs * decoder_input_ids.shape[0]), decoder_input_ids.shape[1], config.generator.vocab_size))
self.assertEqual(outputs.generator_enc_last_hidden_state.shape, ((self.n_docs * decoder_input_ids.shape[0]), self.max_combined_length, config.generator.hidden_size))
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def check_model_generate_from_context_input_ids(self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(input_ids, question_hidden_states.cpu().detach().to(torch.float32).numpy(), prefix=config.generator.prefix, return_tensors='pt')
(context_input_ids, context_attention_mask, retrieved_doc_embeds) = (out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'])
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(1)
outputs = model.generate(context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, do_deduplication=True)
self.assertIsNotNone(outputs)
def check_model_generate(self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_model_classes[1:]:
model = model_class(config, retriever=self.get_retriever(config)).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
outputs = model.generate(input_ids=input_ids, num_beams=2, num_return_sequences=2, decoder_start_token_id=config.generator.eos_token_id)
self.assertIsNotNone(outputs)
def check_model_without_retriever(self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(input_ids, question_hidden_states.cpu().detach().to(torch.float32).numpy(), prefix=config.generator.prefix, return_tensors='pt')
(context_input_ids, context_attention_mask, retrieved_doc_embeds) = (out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'])
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(1)
outputs = model(context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask)
self.assertEqual(outputs.logits.shape, ((self.n_docs * decoder_input_ids.shape[0]), decoder_input_ids.shape[1], config.generator.vocab_size))
self.assertEqual(outputs.generator_enc_last_hidden_state.shape, ((self.n_docs * decoder_input_ids.shape[0]), self.max_combined_length, config.generator.hidden_size))
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def check_model_custom_n_docs(self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, n_docs, **kwargs):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(input_ids, question_hidden_states.cpu().detach().to(torch.float32).numpy(), prefix=config.generator.prefix, return_tensors='pt', n_docs=n_docs)
(context_input_ids, context_attention_mask, retrieved_doc_embeds) = (out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'])
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(1)
outputs = model(context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, n_docs=n_docs)
self.assertEqual(outputs.logits.shape, ((n_docs * decoder_input_ids.shape[0]), decoder_input_ids.shape[1], config.generator.vocab_size))
self.assertEqual(outputs.generator_enc_last_hidden_state.shape, ((n_docs * decoder_input_ids.shape[0]), self.max_combined_length, config.generator.hidden_size))
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], n_docs))
def check_model_with_mismatch_n_docs_value(self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, retriever_n_docs, generator_n_docs, **kwargs):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
retriever = self.get_retriever(config)
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
question_hidden_states = model.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = retriever(input_ids, question_hidden_states.cpu().detach().to(torch.float32).numpy(), prefix=config.generator.prefix, return_tensors='pt', n_docs=retriever_n_docs)
(context_input_ids, context_attention_mask, retrieved_doc_embeds) = (out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'])
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(1)
self.assertRaises(AssertionError, model.__call__, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, n_docs=generator_n_docs)
def check_model_with_encoder_outputs(self, config, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, **kwargs):
self.assertIsNotNone(config.question_encoder)
self.assertIsNotNone(config.generator)
for model_class in self.all_model_classes:
model = model_class(config, retriever=self.get_retriever(config)).to(torch_device)
model.eval()
self.assertTrue(model.config.is_encoder_decoder)
outputs = model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask)
encoder_outputs = BaseModelOutput(outputs.generator_enc_last_hidden_state)
outputs = model(encoder_outputs=encoder_outputs, doc_scores=outputs.doc_scores, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask)
self.assertEqual(outputs.logits.shape, ((self.n_docs * decoder_input_ids.shape[0]), decoder_input_ids.shape[1], config.generator.vocab_size))
self.assertEqual(outputs.generator_enc_last_hidden_state.shape, ((self.n_docs * decoder_input_ids.shape[0]), self.max_combined_length, config.generator.hidden_size))
self.assertEqual(outputs.doc_scores.shape, (input_ids.shape[0], self.n_docs))
def test_model_with_retriever(self):
inputs_dict = self.config_and_inputs
self.check_model_with_retriever(**inputs_dict)
def test_model_with_end2end_retriever(self):
inputs_dict = self.config_and_inputs
self.check_model_with_end2end_retriever(**inputs_dict)
def test_model_without_retriever(self):
inputs_dict = self.config_and_inputs
self.check_model_without_retriever(**inputs_dict)
def test_model_with_encoder_outputs(self):
inputs_dict = self.config_and_inputs
self.check_model_with_encoder_outputs(**inputs_dict)
def test_model_generate(self):
inputs_dict = self.config_and_inputs
self.check_model_generate(**inputs_dict)
def test_model_with_custom_n_docs(self):
inputs_dict = self.config_and_inputs
inputs_dict['n_docs'] = 1
self.check_model_custom_n_docs(**inputs_dict)
def test_model_with_mismatch_n_docs_value(self):
inputs_dict = self.config_and_inputs
inputs_dict['retriever_n_docs'] = 3
inputs_dict['generator_n_docs'] = 2
self.check_model_with_mismatch_n_docs_value(**inputs_dict) |
def test_simple():
assert (_replace_reactive_atoms('$foo $bar') == 'foo bar')
assert (_replace_reactive_atoms('$foo bar $baz42') == 'foo bar baz42')
assert (_replace_reactive_atoms('$foo $42bar $_baz42') == 'foo 42bar _baz42') |
def test_unregularized_methods(data):
(X, Y, _) = data
latent_dims = 2
methods = [rCCA(latent_dimensions=latent_dims), CCA(latent_dimensions=latent_dims), KCCA(latent_dimensions=latent_dims), PCACCA(latent_dimensions=latent_dims), TCCA(latent_dimensions=latent_dims), KTCCA(latent_dimensions=latent_dims)]
scores = [method.fit([X, Y]).average_pairwise_correlations((X, Y)) for method in methods]
for score in scores[1:]:
assert (np.testing.assert_array_almost_equal(scores[0], score, decimal=1) is None) |
class ArgDef():
def __init__(self):
self.name: str = ''
self.index: int = (- 1)
self.is_optional: bool = False
self.type: set = set()
self.default_value: str = ''
self.description: str = ''
self.case: Argument = None
self.record = {}
self.ignore: bool = False
def new(record: Dict, index: int) -> 'ArgDef':
arg = ArgDef()
if (ARG_DEFAULT_VALUE_KEY not in record):
arg.name = record['name']
arg.is_optional = record['is_optional']
arg.type = set(record['type'])
arg.default_value = record['default_value']
arg.description = record['description']
return arg
arg.name = record[ARG_NAME_KEY]
arg.index = index
arg.is_optional = record[ARG_OPTIONAL_KEY]
if (arg.name in ['*args', '**kwargs']):
arg.is_optional = True
arg.type = (set() if (ARG_TYPE_KEY not in record) else set(record[ARG_TYPE_KEY]))
arg.default_value = record[ARG_DEFAULT_VALUE_KEY]
if isinstance(arg.default_value, str):
s = arg.default_value
if ((s[0] == "'") and (s[(- 1)] == "'")):
s = s[1:(- 1)]
elif ((s[0] == '"') and (s[(- 1)] == '"')):
s = s[1:(- 1)]
arg.default_value = s
if (ARG_DESC_KEY in record):
arg.description = record[ARG_DESC_KEY]
return arg
def arg_similar(self, arg: 'ArgDef', max_num_args, w_name=1.0, w_type=1.0, w_pos=1.0):
def name_wrapper(name):
if (name == '_input_tensor'):
return 'input'
else:
return name
name_sim = self.string_similar(name_wrapper(self.name), name_wrapper(arg.name))
if ((len(self.type) == 0) or (len(arg.type) == 0)):
type_sim = 0.5
else:
type_sim = (len(self.type.intersection(arg.type)) / len(self.type))
pos_sim = (1.0 - (abs((self.index - arg.index)) / max_num_args))
return ((name_sim + type_sim) + pos_sim)
def args_similar(self, args: List['ArgDef'], max_num_args):
sims = []
for arg in args:
sims.append(self.arg_similar(arg, max_num_args))
return list(sims)
def perfect_match(self, arg: 'ArgDef'):
return ArgDef.perfect_match_(self, arg)
def similarity(argdefs_a: List['ArgDef'], argdefs_b: List['ArgDef'], verbose=True):
sim = []
max_num_args = max(len(argdefs_a), len(argdefs_b))
for def_a in argdefs_a:
temp = []
for def_b in argdefs_b:
t = def_a.arg_similar(def_b, max_num_args)
if verbose:
print(def_a.name, def_b.name, t)
temp.append(t)
sim.append(temp)
return sim
def perfect_match_(arg1: 'ArgDef', arg2: 'ArgDef'):
flag = ((arg1.name == arg2.name) and arg1.type.intersection(arg2.type))
return flag
def string_similar(s1, s2):
return textdistance.levenshtein.normalized_similarity(s1, s2) |
def get_model_fwk_name(model):
def _is_onnxruntime(model):
from importlib.util import find_spec
try:
so = ort.SessionOptions()
if ((sys.version_info < (3, 11)) and find_spec('onnxruntime_extensions')):
from onnxruntime_extensions import get_library_path
so.register_custom_ops_library(get_library_path())
if isinstance(model, str):
ort.InferenceSession(model, so, providers=ort.get_available_providers())
else:
ort.InferenceSession(model.SerializeToString(), so, providers=ort.get_available_providers())
except Exception as e:
if ('Message onnx.ModelProto exceeds maximum protobuf size of 2GB' in str(e)):
logger.warning('Please use model path instead of onnx model object to quantize')
else:
logger.warning('If you use an onnx model with custom_ops to do quantiztaion, please ensure onnxruntime-extensions is installed')
else:
return 'onnxruntime'
return 'NA'
def _is_pytorch(model):
try:
if (isinstance(model, torch.nn.Module) or isinstance(model, torch.fx.GraphModule) or isinstance(model, torch.jit._script.RecursiveScriptModule)):
return 'pytorch'
else:
return 'NA'
except:
return 'NA'
def _is_tensorflow(model):
try:
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
model_type = get_model_type(model)
except:
os.environ.pop('CUDA_DEVICE_ORDER')
os.environ.pop('CUDA_VISIBLE_DEVICES')
return 'NA'
else:
return 'tensorflow'
def _is_mxnet(model):
try:
is_mxnet = (isinstance(model, mx.gluon.HybridBlock) or (hasattr(model, '__len__') and (len(model) > 1) and isinstance(model[0], mx.symbol.Symbol)))
except:
return 'NA'
else:
return ('mxnet' if is_mxnet else 'NA')
if isinstance(model, str):
absmodel = os.path.abspath(os.path.expanduser(model))
assert (os.path.exists(absmodel) or os.path.exists((absmodel + '.pb'))), 'invalid input path, the file does not exist!'
for (name, nc_model) in MODELS.items():
if (nc_model and isinstance(model, nc_model)):
return ('pytorch' if ((name == 'pytorch_ipex') or (name == 'pytorch_fx')) else name)
if isinstance(model, TensorflowBaseModel):
return 'tensorflow'
checker = [_is_tensorflow, _is_pytorch, _is_onnxruntime, _is_mxnet]
for handler in checker:
fwk_name = handler(model)
if (fwk_name != 'NA'):
break
assert (fwk_name != 'NA'), 'Framework is not detected correctly from model format. This could be caused by unsupported model or inappropriate framework installation.'
return fwk_name |
def acquireLock(lock_f='/tmp/lockfile.LOCK'):
import fcntl
locked_file_descriptor = open(lock_f, 'w+')
fcntl.lockf(locked_file_descriptor, fcntl.LOCK_EX)
return locked_file_descriptor |
class KernelConv2D(nn.Module):
def __init__(self, kernel_size):
super(KernelConv2D, self).__init__()
assert ((kernel_size % 2) == 1)
self.kernel_size = kernel_size
self.pad = torch.nn.ReplicationPad2d([((kernel_size - 1) // 2), ((kernel_size - 1) // 2), ((kernel_size - 1) // 2), ((kernel_size - 1) // 2)])
def forward(self, input, kernel):
input_pad = self.pad(input)
return KernelConv2DFunction.apply(input_pad, kernel, self.kernel_size) |
class MaskedLMOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None |
_registry(operator_type='InnerProduct')
_registry(operator_type='InnerProductGraph')
class InnerProduct(Operator):
def __init__(self):
super().__init__() |
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) |
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=(2 ** 0.5)):
rest_dim = ([1] * ((input.ndim - bias.ndim) - 1))
input = input.cuda()
return (F.leaky_relu((input + bias.view(1, bias.shape[0], *rest_dim)), negative_slope=negative_slope) * scale) |
def test_fit_weighted_2ds(X, w):
X = [x for x in torch.tensor((numpy.array(X) + 1))]
d = [Exponential([2.1, 0.3, 0.1]), Exponential([1.5, 3.1, 2.2])]
model = DenseHMM(distributions=d, edges=[[0.1, 0.8], [0.3, 0.6]], starts=[0.2, 0.8], ends=[0.1, 0.1], max_iter=1)
model.fit(X, sample_weight=w)
d1 = model.distributions[0]
d2 = model.distributions[1]
assert_array_almost_equal(model.starts, [(- 15.399), (- 2.0519e-07)], 3)
assert_array_almost_equal(model.ends, [(- 1.732272), (- 1.609437)])
assert_array_almost_equal(model.edges, [[(- 23.970318), (- 0.194656)], [(- 11.483337), (- 0.223157)]], 5)
assert_array_almost_equal(d1.scales, [2.801925, 2.003776, 1.000194])
assert_array_almost_equal(d1._w_sum, [0.0, 0.0, 0.0])
assert_array_almost_equal(d1._xw_sum, [0.0, 0.0, 0.0])
assert_array_almost_equal(d2.scales, [2.678787, 2.060607, 2.278801])
assert_array_almost_equal(d2._w_sum, [0.0, 0.0, 0.0])
assert_array_almost_equal(d2._xw_sum, [0.0, 0.0, 0.0])
d = [Exponential([2.1, 0.3, 0.1]), Exponential([1.5, 3.1, 2.2])]
model = DenseHMM(distributions=d, edges=[[0.1, 0.8], [0.3, 0.6]], starts=[0.2, 0.8], ends=[0.1, 0.1], max_iter=5)
model.fit(X, sample_weight=w)
d1 = model.distributions[0]
d2 = model.distributions[1]
assert_array_almost_equal(model.starts, [(- 16.093), (- 1.025e-07)], 3)
assert_array_almost_equal(model.ends, [(- 1.469704), (- 1.609439)])
assert_array_almost_equal(model.edges, [[(- 24.481024), (- 0.261356)], [(- 11.632328), (- 0.223154)]], 5)
assert_array_almost_equal(d1.scales, [2.324057, 2.012569, 1.522347])
assert_array_almost_equal(d1._w_sum, [0.0, 0.0, 0.0])
assert_array_almost_equal(d1._xw_sum, [0.0, 0.0, 0.0])
assert_array_almost_equal(d2.scales, [2.678791, 2.060607, 2.278795])
assert_array_almost_equal(d2._w_sum, [0.0, 0.0, 0.0])
assert_array_almost_equal(d2._xw_sum, [0.0, 0.0, 0.0]) |
class Beam(object):
def __init__(self, beam_size, min_time_step, max_time_step, hypotheses, device):
self.beam_size = beam_size
self.min_time_step = min_time_step
self.max_time_step = max_time_step
self.completed_hypotheses = []
self.steps = 0
self.hypotheses = hypotheses
self.device = device
def merge_score(self, prev_hyp, step):
(token, score) = step
prefix = prev_hyp.seq
if (token == UNK):
return float('-inf')
new_score = (prev_hyp.score + score)
return new_score
def update(self, new_states, last_steps):
candidates = []
for (prev_hyp_idx, steps) in enumerate(last_steps):
for step in steps:
token = step[0]
score = self.merge_score(self.hypotheses[prev_hyp_idx], step)
candidates.append((prev_hyp_idx, token, score))
candidates.sort(key=(lambda x: x[(- 1)]), reverse=True)
live_nyp_num = (self.beam_size - len(self.completed_hypotheses))
candidates = candidates[:live_nyp_num]
new_hyps = []
_prev_hyp_idx = torch.tensor([x[0] for x in candidates]).cuda(self.device)
_split_state = dict()
for (k, v) in new_states.items():
split_dim = (1 if (len(v.size()) >= 3) else 0)
_split_state[k] = v.index_select(split_dim, _prev_hyp_idx).split(1, dim=split_dim)
for (idx, (prev_hyp_idx, token, score)) in enumerate(candidates):
state = dict()
for (k, v) in _split_state.items():
state[k] = _split_state[k][idx]
seq = (self.hypotheses[prev_hyp_idx].seq + [token])
new_hyps.append(Hypothesis(state, seq, score))
self.hypotheses = []
for hyp in new_hyps:
if hyp.is_completed():
if ((len(hyp) - 2) >= self.min_time_step):
self.completed_hypotheses.append(hyp)
else:
self.hypotheses.append(hyp)
self.steps += 1
def completed(self):
if ((len(self.completed_hypotheses) < self.beam_size) and (self.steps < self.max_time_step)):
return False
return True
def get_k_best(self, k, alpha):
if (len(self.completed_hypotheses) == 0):
self.completed_hypotheses = self.hypotheses
self.completed_hypotheses.sort(key=(lambda x: (x.score / ((1 + len(x.seq)) ** alpha))), reverse=True)
return self.completed_hypotheses[:k]
def print_everything(self):
print('alive:')
for x in self.hypotheses:
print(x.seq)
print('completed:')
for x in self.completed_hypotheses:
print(x.seq) |
class FlaxBartDecoderPreTrainedModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def to_json(o):
if isinstance(o, str):
return o
elif isinstance(o, type):
return o.__name__
elif isinstance(o, (list, tuple)):
return [to_json(x) for x in o]
elif isinstance(o, dict):
return {to_json(k): to_json(v) for (k, v) in o.items()}
else:
return o |
class DIML_Indoor(Dataset):
def __init__(self, data_dir_root):
import glob
self.image_files = glob.glob(os.path.join(data_dir_root, 'LR', '*', 'color', '*.png'))
self.depth_files = [r.replace('color', 'depth_filled').replace('_c.png', '_depth_filled.png') for r in self.image_files]
self.transform = ToTensor()
def __getitem__(self, idx):
image_path = self.image_files[idx]
depth_path = self.depth_files[idx]
image = (np.asarray(Image.open(image_path), dtype=np.float32) / 255.0)
depth = (np.asarray(Image.open(depth_path), dtype='uint16') / 1000.0)
depth = depth[(..., None)]
sample = dict(image=image, depth=depth)
sample = self.transform(sample)
if (idx == 0):
print(sample['image'].shape)
return sample
def __len__(self):
return len(self.image_files) |
def load_pretrained_component_from_model(component: Union[(FairseqEncoder, FairseqDecoder)], checkpoint: str):
if (not PathManager.exists(checkpoint)):
raise IOError('Model file not found: {}'.format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
if isinstance(component, FairseqEncoder):
component_type = 'encoder'
elif isinstance(component, FairseqDecoder):
component_type = 'decoder'
else:
raise ValueError('component to load must be either a FairseqEncoder or FairseqDecoder. Loading other component types are not supported.')
component_state_dict = OrderedDict()
for key in state['model'].keys():
if key.startswith(component_type):
component_subkey = key[(len(component_type) + 1):]
component_state_dict[component_subkey] = state['model'][key]
component.load_state_dict(component_state_dict, strict=True)
return component |
class WarmupLinearDecaySchedule():
def __init__(self, warmup_steps, total_steps, learning_rate, min_lr=0.0):
self.warmup_steps = warmup_steps
self.total_steps = total_steps
self.initial_learning_rate = learning_rate
self.min_lr = min_lr
self.decay_steps = max(1.0, (self.total_steps - self.warmup_steps))
def __call__(self, step):
if (step < self.warmup_steps):
learning_rate = ((self.initial_learning_rate * float(step)) / max(1.0, self.warmup_steps))
else:
decay_factor = max(0, ((self.total_steps - step) / self.decay_steps))
learning_rate = (self.min_lr + ((self.initial_learning_rate - self.min_lr) * decay_factor))
return learning_rate |
def get_dataset(root_dir, use_line_art=True, include_subfolders=False):
return DatasetFromFolder(root_dir, use_line_art, include_subfolders=include_subfolders) |
class InfBallBounded(DualObject):
def __init__(self, X, epsilon, l=0, u=1):
super(InfBallBounded, self).__init__()
self.epsilon = epsilon
self.l = (X - epsilon).clamp(min=l).view(X.size(0), 1, (- 1))
self.u = (X + epsilon).clamp(max=u).view(X.size(0), 1, (- 1))
n = X[0].numel()
self.nu_x = [X]
self.nu_1 = [X.new(n, n)]
torch.eye(n, out=self.nu_1[0])
self.nu_1[0] = self.nu_1[0].view((- 1), *X.size()[1:]).unsqueeze(0)
def apply(self, dual_layer):
self.nu_x.append(dual_layer(*self.nu_x))
self.nu_1.append(dual_layer(*self.nu_1))
def bounds(self, network=None):
if (network is None):
nu = self.nu_1[(- 1)]
else:
nu = network(self.nu_1[0])
nu_pos = nu.clamp(min=0).view(nu.size(0), nu.size(1), (- 1))
nu_neg = nu.clamp(max=0).view(nu.size(0), nu.size(1), (- 1))
zu = (self.u.matmul(nu_pos) + self.l.matmul(nu_neg)).squeeze(1)
zl = (self.u.matmul(nu_neg) + self.l.matmul(nu_pos)).squeeze(1)
return (zl.view(zl.size(0), *nu.size()[2:]), zu.view(zu.size(0), *nu.size()[2:]))
def objective(self, *nus):
nu = nus[(- 1)]
nu_pos = nu.clamp(min=0).view(nu.size(0), nu.size(1), (- 1))
nu_neg = nu.clamp(max=0).view(nu.size(0), nu.size(1), (- 1))
(u, l) = (self.u.unsqueeze(3).squeeze(1), self.l.unsqueeze(3).squeeze(1))
return ((- nu_neg.matmul(l)) - nu_pos.matmul(u)).squeeze(2) |
class Blip2VisionModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class SinkhornDivergence(OptimalTransport):
thre = 0.001
def __init__(self, dist_metric='cosine', eps=0.01, max_iter=5, bp_to_sinkhorn=False):
super().__init__()
self.dist_metric = dist_metric
self.eps = eps
self.max_iter = max_iter
self.bp_to_sinkhorn = bp_to_sinkhorn
def forward(self, x, y):
W_xy = self.transport_cost(x, y)
W_xx = self.transport_cost(x, x)
W_yy = self.transport_cost(y, y)
return (((2 * W_xy) - W_xx) - W_yy)
def transport_cost(self, x, y, return_pi=False):
C = self.distance(x, y, dist_metric=self.dist_metric)
pi = self.sinkhorn_iterate(C, self.eps, self.max_iter, self.thre)
if (not self.bp_to_sinkhorn):
pi = pi.detach()
cost = torch.sum((pi * C))
if return_pi:
return (cost, pi)
return cost
def sinkhorn_iterate(C, eps, max_iter, thre):
(nx, ny) = C.shape
mu = (torch.ones(nx, dtype=C.dtype, device=C.device) * (1.0 / nx))
nu = (torch.ones(ny, dtype=C.dtype, device=C.device) * (1.0 / ny))
u = torch.zeros_like(mu)
v = torch.zeros_like(nu)
def M(_C, _u, _v):
return ((((- _C) + _u.unsqueeze((- 1))) + _v.unsqueeze((- 2))) / eps)
real_iter = 0
for i in range(max_iter):
u0 = u
u = ((eps * (torch.log((mu + 1e-08)) - torch.logsumexp(M(C, u, v), dim=1))) + u)
v = ((eps * (torch.log((nu + 1e-08)) - torch.logsumexp(M(C, u, v).permute(1, 0), dim=1))) + v)
err = (u - u0).abs().sum()
real_iter += 1
if (err.item() < thre):
break
return torch.exp(M(C, u, v)) |
def load_person_names(path):
data = []
with open(path, 'r', encoding='utf8') as f:
for line in f:
data.append(line.strip().replace(' ', '_'))
return set(data) |
class SkNetEncoder(ResNet, EncoderMixin):
def __init__(self, out_channels, depth=5, **kwargs):
super().__init__(**kwargs)
self._depth = depth
self._out_channels = out_channels
self._in_channels = 3
del self.fc
del self.global_pool
def get_stages(self):
return [nn.Identity(), nn.Sequential(self.conv1, self.bn1, self.act1), nn.Sequential(self.maxpool, self.layer1), self.layer2, self.layer3, self.layer4]
def forward(self, x):
stages = self.get_stages()
features = []
for i in range((self._depth + 1)):
x = stages[i](x)
features.append(x)
return features
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop('fc.bias')
state_dict.pop('fc.weight')
super().load_state_dict(state_dict, **kwargs) |
class FacesHQTrain(Dataset):
def __init__(self, size, keys=None, crop_size=None, coord=False):
d1 = CelebAHQTrain(size=size, keys=keys)
d2 = FFHQTrain(size=size, keys=keys)
self.data = ConcatDatasetWithIndex([d1, d2])
self.coord = coord
if (crop_size is not None):
self.cropper = albumentations.RandomCrop(height=crop_size, width=crop_size)
if self.coord:
self.cropper = albumentations.Compose([self.cropper], additional_targets={'coord': 'image'})
def __len__(self):
return len(self.data)
def __getitem__(self, i):
(ex, y) = self.data[i]
if hasattr(self, 'cropper'):
if (not self.coord):
out = self.cropper(image=ex['image'])
ex['image'] = out['image']
else:
(h, w, _) = ex['image'].shape
coord = (np.arange((h * w)).reshape(h, w, 1) / (h * w))
out = self.cropper(image=ex['image'], coord=coord)
ex['image'] = out['image']
ex['coord'] = out['coord']
ex['class'] = y
return ex |
def generate_benchmark_table():
res_root = '../eval/EvaluationResults_ablation_script_new_3'
data_lst = ['CHAMELEON', 'CAMO', 'COD10K']
model_lst = ['-Network_Res2Net_GRA_NCD_GSize_32_32_32']
for i in range(len(model_lst)):
for j in range(len(data_lst)):
txt_path = os.path.join(res_root, '{}_result.txt'.format(data_lst[j]))
if (not os.path.exists(txt_path)):
print('& - & - & - & -', end='\n')
else:
with open(txt_path) as f:
line_ori = f.readlines()
for k in range(len(line_ori)):
line = line_ori[k]
if (line.split('Model:')[1].split(') Smeasure')[0] in model_lst[i]):
if ('NaN' in line):
S_measure = '-'
w_F = '-'
mean_E_m = '-'
MAE = '-'
else:
S_measure = line.split('Smeasure:')[1].split('; wFmeasure')[0]
w_F = line.split('wFmeasure:')[1].split(';MAE')[0]
mean_E_m = line.split('meanEm:')[1].split('; maxEm')[0]
MAE = line.split('MAE:')[1].split('; adpEm')[0]
print('& {} & {} & {} & {}'.format(S_measure, mean_E_m, w_F, MAE), end='\n') |
def test(model):
model.eval()
loss = 0
correct = 0
(pred_list, label_list) = ([], [])
with torch.no_grad():
for (data, label) in test_loader:
(data, label) = (data.cuda(), label.cuda())
label = (label - 1)
out = model_TST(data, label)
pred = out[0].data.max(1)[1]
pred_list.append(pred.cpu().numpy())
label_list.append(label.cpu().numpy())
loss += F.nll_loss(F.log_softmax(out[0], dim=1), label.long()).item()
correct += pred.eq(label.data.view_as(pred)).cpu().sum()
loss /= len_tar_loader
print('Testing...')
print('{} set: Average test loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n, | Test sample number: {:6}'.format(args.target_name, loss, correct, len_tar_dataset, ((100.0 * correct) / len_tar_dataset), len_tar_dataset))
return (correct, (correct.item() / len_tar_dataset), pred_list, label_list) |
def color_transfer(source, target, clip=True, preserve_paper=True, mask=None):
source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype('float32')
target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype('float32')
(lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source, mask)
(lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target, mask)
(l, a, b) = cv2.split(target)
l -= lMeanTar
a -= aMeanTar
b -= bMeanTar
if preserve_paper:
l = ((lStdTar / lStdSrc) * l)
a = ((aStdTar / aStdSrc) * a)
b = ((bStdTar / bStdSrc) * b)
else:
l = ((lStdSrc / lStdTar) * l)
a = ((aStdSrc / aStdTar) * a)
b = ((bStdSrc / bStdTar) * b)
l += lMeanSrc
a += aMeanSrc
b += bMeanSrc
l = _scale_array(l, clip=clip)
a = _scale_array(a, clip=clip)
b = _scale_array(b, clip=clip)
transfer = cv2.merge([l, a, b])
transfer = cv2.cvtColor(transfer.astype('uint8'), cv2.COLOR_LAB2BGR)
return transfer |
class ConcatDataset(Dataset):
def __init__(self, datasets):
super(ConcatDataset, self).__init__()
self.datasets = list(datasets)
assert (len(datasets) > 0), 'datasets should not be an empty iterable'
self.cum_sizes = np.cumsum([len(x) for x in self.datasets])
def __len__(self):
return self.cum_sizes[(- 1)]
def __getitem__(self, idx):
super(ConcatDataset, self).__getitem__(idx)
dataset_index = self.cum_sizes.searchsorted(idx, 'right')
if (dataset_index == 0):
dataset_idx = idx
else:
dataset_idx = (idx - self.cum_sizes[(dataset_index - 1)])
return self.datasets[dataset_index][dataset_idx] |
def set_param_grad_off(module):
for param in module.parameters():
param.requires_grad = False |
class Test_lpoly(unittest.TestCase):
def test_simple_unitary_from_angles1(self):
phiset = [0]
ualg = LPoly.LAlg.unitary_from_angles(phiset)
print(f'For phiset={phiset}, U={ualg}')
print(f'diagonal poly = {ualg.IPoly}')
assert (ualg.IPoly == LPoly.LPoly([1]))
def test_simple_unitary_from_angles2(self):
phiset = [0, 0]
ualg = LPoly.LAlg.unitary_from_angles(phiset)
print(f'For phiset={phiset}, U={ualg}')
print(f'diagonal poly = {ualg.IPoly}')
assert (ualg.IPoly == LPoly.LPoly([0, 1], (- 1)))
def test_simple_unitary_from_angles3(self):
phiset = [0, 0, 0]
ualg = LPoly.LAlg.unitary_from_angles(phiset)
print(f'For phiset={phiset}, U={ualg}')
print(f'diagonal poly = {ualg.IPoly}')
assert (ualg.IPoly == LPoly.LPoly([0, 0, 1], (- 2)))
def test_simple_unitary_from_angles4(self):
phiset = [((- np.pi) / 4), 0, (np.pi / 4)]
ualg = LPoly.LAlg.unitary_from_angles(phiset)
print(f'For phiset={phiset}, U={ualg}')
print(f'diagonal poly = {ualg.IPoly}')
assert (ualg.IPoly == LPoly.LPoly([0.5, 0, 0.5], (- 2)))
def test_simple_unitary_from_angles5(self):
phiset = [((- np.pi) / 4), 0, 0, 0, (np.pi / 4)]
ualg = LPoly.LAlg.unitary_from_angles(phiset)
print(f'For phiset={phiset}, U={ualg}')
print(f'diagonal poly = {ualg.IPoly}')
assert (ualg.IPoly == LPoly.LPoly([0.5, 0, 0, 0, 0.5], (- 4)))
def test_lpoly1(self):
w = LPoly.w
Q0 = LPoly.LAlg.rotation((np.pi / 2))
Q0.IPoly.round_zeros()
prod = (Q0 * w)
print(f'w = {w}')
print(f'~w = {(~ w)}')
print(f'Q0 = {Q0}')
print(f'Q0 * w = {prod}')
print(f'poly([1,0], -1) = {LPoly.LPoly([1], (- 1))}')
print(f'prod.XPoly coefs={prod.XPoly.coefs}, dmin={prod.XPoly.dmin}')
assert (prod.XPoly == LPoly.LPoly([1], (- 1)))
def test_LPoly_mul1(self):
lp1 = LPoly.LPoly([(1 / 2), (1 / 2)], (- 1))
lp2 = (lp1 * lp1)
print(f'lp1={lp1}')
print(f'lp1 * lp1 = {lp2}')
print(f'lp2 coefs={lp2.coefs}')
assert (lp2.dmin == (- 2))
assert (abs((lp2.coefs - np.array([0.25, 0.5, 0.25]))).sum() < 0.0001) |
class NoisyTopkErrorRate(TopkErrorRate):
def __init__(self, model, noise=None, k=1):
super().__init__(model, k)
if (not noise):
noise = (lambda x: x)
self.noise = noise
def update(self, inputs, labels):
noisy = self.noise(inputs)
return super().update(noisy, labels) |
def inf_generator(iterable):
iterator = iterable.__iter__()
while True:
try:
(yield iterator.__next__())
except StopIteration:
iterator = iterable.__iter__() |
def trans(args):
set_deterministic_pytorch(args)
(model, train_args) = load_trained_model(args.model)
assert isinstance(model, STInterface)
model.trans_args = args
if (args.ngpu == 1):
gpu_id = list(range(args.ngpu))
logging.info(('gpu id: ' + str(gpu_id)))
model.cuda()
with open(args.trans_json, 'rb') as f:
js = json.load(f)['utts']
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(mode='asr', load_output=False, sort_in_input_length=False, preprocess_conf=(train_args.preprocess_conf if (args.preprocess_conf is None) else args.preprocess_conf), preprocess_args={'train': False})
if (args.batchsize == 0):
with torch.no_grad():
for (idx, name) in enumerate(js.keys(), 1):
logging.info(('(%d/%d) decoding ' + name), idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)[0][0]
nbest_hyps = model.translate(feat, args, train_args.char_list)
new_js[name] = add_results_to_json(js[name], nbest_hyps, train_args.char_list)
else:
def grouper(n, iterable, fillvalue=None):
kargs = ([iter(iterable)] * n)
return zip_longest(*kargs, fillvalue=fillvalue)
keys = list(js.keys())
if (args.batchsize > 1):
feat_lens = [js[key]['input'][0]['shape'][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=(lambda i: (- feat_lens[i])))
keys = [keys[i] for i in sorted_index]
with torch.no_grad():
for names in grouper(args.batchsize, keys, None):
names = [name for name in names if name]
batch = [(name, js[name]) for name in names]
feats = load_inputs_and_targets(batch)[0]
nbest_hyps = model.translate_batch(feats, args, train_args.char_list)
for (i, nbest_hyp) in enumerate(nbest_hyps):
name = names[i]
new_js[name] = add_results_to_json(js[name], nbest_hyp, train_args.char_list)
with open(args.result_label, 'wb') as f:
f.write(json.dumps({'utts': new_js}, indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.