code stringlengths 101 5.91M |
|---|
_model
def efficientnet_b2a(pretrained=False, **kwargs):
return efficientnet_b2(pretrained=pretrained, **kwargs) |
class Embedding(nn.Module):
def __init__(self, feature_dim, embed_dim=256, type='ori'):
super(Embedding, self).__init__()
self.bn = nn.BatchNorm1d(embed_dim, affine=True)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(p=0.5)
self.bottleneck = nn.Linear(feature_dim, embed_dim)
self.bottleneck.apply(init_weights)
self.type = type
def forward(self, x):
x = self.bottleneck(x)
if (self.type == 'bn'):
x = self.bn(x)
return x |
class BartTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = BartTokenizer
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, trim_offsets=True, **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs)
pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if (pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space):
pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type'))
pre_tok_state['add_prefix_space'] = add_prefix_space
self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
self.add_prefix_space = add_prefix_space
tokenizer_component = 'post_processor'
tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
if tokenizer_component_instance:
state = json.loads(tokenizer_component_instance.__getstate__())
if ('sep' in state):
state['sep'] = tuple(state['sep'])
if ('cls' in state):
state['cls'] = tuple(state['cls'])
changes_to_apply = False
if (state.get('add_prefix_space', add_prefix_space) != add_prefix_space):
state['add_prefix_space'] = add_prefix_space
changes_to_apply = True
if (state.get('trim_offsets', trim_offsets) != trim_offsets):
state['trim_offsets'] = trim_offsets
changes_to_apply = True
if changes_to_apply:
component_class = getattr(processors, state.pop('type'))
new_value = component_class(**state)
setattr(self.backend_tokenizer, tokenizer_component, new_value)
def mask_token(self) -> str:
if (self._mask_token is None):
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
_token.setter
def mask_token(self, value):
value = (AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value)
self._mask_token = value
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
if (is_split_into_words and (not self.add_prefix_space)):
raise ValueError(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.')
return super()._batch_encode_plus(*args, **kwargs)
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
if (is_split_into_words and (not self.add_prefix_space)):
raise ValueError(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.')
return super()._encode_plus(*args, **kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = (([self.bos_token_id] + token_ids_0) + [self.eos_token_id])
if (token_ids_1 is None):
return output
return (((output + [self.eos_token_id]) + token_ids_1) + [self.eos_token_id])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0]) |
class GraphDerivations():
def __init__(self, graph, derivations):
p = Process.from_modgraph(graph)
self.graph = graphGMLString(graph.getGMLString(), str(p))
setImage(self.graph)
self.derivations = derivations |
def weight_variable_devonc(shape, stddev=0.1, name='weight_devonc'):
return tf.Variable(tf.truncated_normal(shape, stddev=stddev), name=name) |
def _setup_random_policy(cfg: DictConfig, env: Environment) -> RandomPolicy:
assert (cfg.agent == 'random')
if (cfg.env.name == 'bin_pack'):
assert isinstance(env.unwrapped, BinPack)
random_policy = networks.make_random_policy_bin_pack(bin_pack=env.unwrapped)
elif (cfg.env.name == 'snake'):
assert isinstance(env.unwrapped, Snake)
random_policy = networks.make_random_policy_snake()
elif (cfg.env.name == 'tsp'):
assert isinstance(env.unwrapped, TSP)
random_policy = networks.make_random_policy_tsp()
elif (cfg.env.name == 'knapsack'):
assert isinstance(env.unwrapped, Knapsack)
random_policy = networks.make_random_policy_knapsack()
elif (cfg.env.name == 'job_shop'):
assert isinstance(env.unwrapped, JobShop)
random_policy = networks.make_random_policy_job_shop()
elif (cfg.env.name == 'cvrp'):
assert isinstance(env.unwrapped, CVRP)
random_policy = networks.make_random_policy_cvrp()
elif (cfg.env.name == 'multi_cvrp'):
assert isinstance(env.unwrapped, MultiCVRP)
random_policy = networks.make_random_policy_multicvrp()
elif (cfg.env.name == 'rubiks_cube'):
assert isinstance(env.unwrapped, RubiksCube)
random_policy = networks.make_random_policy_rubiks_cube(rubiks_cube=env.unwrapped)
elif (cfg.env.name == 'minesweeper'):
assert isinstance(env.unwrapped, Minesweeper)
random_policy = networks.make_random_policy_minesweeper(minesweeper=env.unwrapped)
elif (cfg.env.name == 'game_2048'):
assert isinstance(env.unwrapped, Game2048)
random_policy = networks.make_random_policy_game_2048()
elif (cfg.env.name == 'sudoku'):
assert isinstance(env.unwrapped, Sudoku)
random_policy = networks.make_random_policy_sudoku(sudoku=env.unwrapped)
elif (cfg.env.name == 'cleaner'):
assert isinstance(env.unwrapped, Cleaner)
random_policy = networks.make_random_policy_cleaner()
elif (cfg.env.name == 'maze'):
assert isinstance(env.unwrapped, Maze)
random_policy = networks.make_random_policy_maze()
elif (cfg.env.name == 'connector'):
assert isinstance(env.unwrapped, Connector)
random_policy = networks.make_random_policy_connector()
elif (cfg.env.name == 'tetris'):
assert isinstance(env.unwrapped, Tetris)
random_policy = networks.make_random_policy_tetris(tetris=env.unwrapped)
elif (cfg.env.name == 'mmst'):
assert isinstance(env.unwrapped, MMST)
random_policy = networks.make_random_policy_mmst()
elif (cfg.env.name == 'robot_warehouse'):
assert isinstance(env.unwrapped, RobotWarehouse)
random_policy = networks.make_random_policy_robot_warehouse()
elif (cfg.env.name == 'graph_coloring'):
assert isinstance(env.unwrapped, GraphColoring)
random_policy = networks.make_random_policy_graph_coloring()
else:
raise ValueError(f'Environment name not found. Got {cfg.env.name}.')
return random_policy |
class CanonicalHFIndex(HFIndexBase):
def __init__(self, vector_size: int, dataset_name: str='wiki_dpr', dataset_split: str='train', index_name: Optional[str]=None, index_path: Optional[str]=None, use_dummy_dataset=False):
if ((int((index_path is None)) + int((index_name is None))) != 1):
raise ValueError('Please provide `index_name` or `index_path`.')
self.dataset_name = dataset_name
self.dataset_split = dataset_split
self.index_name = index_name
self.index_path = index_path
self.use_dummy_dataset = use_dummy_dataset
logger.info('Loading passages from {}'.format(self.dataset_name))
dataset = load_dataset(self.dataset_name, with_index=False, split=self.dataset_split, dummy=self.use_dummy_dataset)
super().__init__(vector_size, dataset, index_initialized=False)
def init_index(self):
if (self.index_path is not None):
logger.info('Loading index from {}'.format(self.index_path))
self.dataset.load_faiss_index('embeddings', file=self.index_path)
else:
logger.info('Loading index from {}'.format(((self.dataset_name + ' with index name ') + self.index_name)))
self.dataset = load_dataset(self.dataset_name, with_embeddings=True, with_index=True, split=self.dataset_split, index_name=self.index_name, dummy=self.use_dummy_dataset)
self.dataset.set_format('numpy', columns=['embeddings'], output_all_columns=True)
self._index_initialized = True |
def build_arg_parser2():
usage_str = 'Smatch calculator -- arguments'
parser = optparse.OptionParser(usage=usage_str)
parser.add_option('-f', '--files', nargs=2, dest='f', type='string', help='Two files containing AMR pairs. AMRs in each file are separated by a single blank line. This option is required.')
parser.add_option('-r', '--restart', dest='r', type='int', help='Restart number (Default: 4)')
parser.add_option('--significant', dest='significant', type='int', default=2, help='significant digits to output (default: 2)')
parser.add_option('-v', '--verbose', action='store_true', dest='v', help='Verbose output (Default:False)')
parser.add_option('--vv', '--veryverbose', action='store_true', dest='vv', help='Very Verbose output (Default:False)')
parser.add_option('--ms', '--multiple_score', action='store_true', dest='ms', help='Output multiple scores (one AMR pair a score) instead of a single document-level smatch score (Default: False)')
parser.add_option('--pr', '--precision_recall', action='store_true', dest='pr', help='Output precision and recall as well as the f-score. Default: false')
parser.add_option('--justinstance', action='store_true', default=False, help='just pay attention to matching instances')
parser.add_option('--justattribute', action='store_true', default=False, help='just pay attention to matching attributes')
parser.add_option('--justrelation', action='store_true', default=False, help='just pay attention to matching relations')
parser.set_defaults(r=4, v=False, ms=False, pr=False)
return parser |
def raw_npy_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
try:
npy_data = np.load(six.BytesIO(bin_data))
except Exception as e:
print(path)
npy_data = None
print(e)
return (bin_data, npy_data) |
_model('s2t_transformer')
class S2TTransformerModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def add_args(parser):
parser.add_argument('--conv-kernel-sizes', type=str, metavar='N', help='kernel sizes of Conv1d subsampling layers')
parser.add_argument('--conv-channels', type=int, metavar='N', help='# of channels in Conv1d subsampling layers')
parser.add_argument('--activation-fn', type=str, default='relu', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--layernorm-embedding', action='store_true', help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true', help='if True, dont scale embeddings')
parser.add_argument('--load-pretrained-encoder-from', type=str, metavar='STR', help='model to take encoder weights from (for initialization)')
def build_encoder(cls, args):
encoder = S2TTransformerEncoder(args)
if getattr(args, 'load_pretrained_encoder_from', None):
encoder = checkpoint_utils.load_pretrained_component_from_model(component=encoder, checkpoint=args.load_pretrained_encoder_from)
logger.info(f'loaded pretrained encoder from: {args.load_pretrained_encoder_from}')
return encoder
def build_decoder(cls, args, task, embed_tokens):
return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens)
def build_model(cls, args, task):
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(task.target_dictionary, args.decoder_embed_dim)
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output: Tuple[(Tensor, Optional[Dict[(str, List[Optional[Tensor]])]])], log_probs: bool, sample: Optional[Dict[(str, Tensor)]]=None):
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def forward(self, src_tokens, src_lengths, prev_output_tokens):
encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths)
decoder_out = self.decoder(prev_output_tokens=prev_output_tokens, encoder_out=encoder_out)
return decoder_out |
class BalancedPositiveNegativeSampler(object):
def __init__(self, batch_size_per_image, positive_fraction):
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
def __call__(self, matched_idxs):
pos_idx = []
neg_idx = []
for matched_idxs_per_image in matched_idxs:
positive = torch.nonzero((matched_idxs_per_image >= 1), as_tuple=False).squeeze(1)
negative = torch.nonzero((matched_idxs_per_image == 0), as_tuple=False).squeeze(1)
num_pos = int((self.batch_size_per_image * self.positive_fraction))
num_pos = min(positive.numel(), num_pos)
num_neg = (self.batch_size_per_image - num_pos)
num_neg = min(negative.numel(), num_neg)
perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
pos_idx_per_image = positive[perm1]
neg_idx_per_image = negative[perm2]
pos_idx_per_image_mask = torch.zeros_like(matched_idxs_per_image, dtype=torch.bool)
neg_idx_per_image_mask = torch.zeros_like(matched_idxs_per_image, dtype=torch.bool)
pos_idx_per_image_mask[pos_idx_per_image] = 1
neg_idx_per_image_mask[neg_idx_per_image] = 1
pos_idx.append(pos_idx_per_image_mask)
neg_idx.append(neg_idx_per_image_mask)
return (pos_idx, neg_idx) |
def parallel_apply(flows, inputs, kwargs_tup=None, devices=None, backward=False):
assert (len(flows) == len(inputs))
if (kwargs_tup is not None):
assert (len(flows) == len(kwargs_tup))
else:
kwargs_tup = (({},) * len(flows))
if (devices is not None):
assert (len(flows) == len(devices))
else:
devices = ([None] * len(flows))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, flow, input, kwargs, device=None, back=False):
torch.set_grad_enabled(grad_enabled)
if (device is None):
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
if (not isinstance(input, (list, tuple))):
input = (input,)
output = (flow.backward(*input, **kwargs) if back else flow.forward(*input, **kwargs))
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
if (len(flows) > 1):
threads = [threading.Thread(target=_worker, args=(i, flow, input, kwargs, device, backward)) for (i, (flow, input, kwargs, device)) in enumerate(zip(flows, inputs, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, flows[0], inputs[0], kwargs_tup[0], devices[0], backward)
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs |
class Bottleneck(nn.Module):
def __init__(self, in_chs, out_chs, stride=1, dilation=1, bottleneck_ratio=1, group_width=1, se_ratio=0.25, downsample=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_block=None, drop_path=None):
super(Bottleneck, self).__init__()
bottleneck_chs = int(round((out_chs * bottleneck_ratio)))
groups = (bottleneck_chs // group_width)
cargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block)
self.conv1 = ConvBnAct(in_chs, bottleneck_chs, kernel_size=1, **cargs)
self.conv2 = ConvBnAct(bottleneck_chs, bottleneck_chs, kernel_size=3, stride=stride, dilation=dilation, groups=groups, **cargs)
if se_ratio:
se_channels = int(round((in_chs * se_ratio)))
self.se = SEModule(bottleneck_chs, reduction_channels=se_channels)
else:
self.se = None
cargs['act_layer'] = None
self.conv3 = ConvBnAct(bottleneck_chs, out_chs, kernel_size=1, **cargs)
self.act3 = act_layer(inplace=True)
self.downsample = downsample
self.drop_path = drop_path
def zero_init_last_bn(self):
nn.init.zeros_(self.conv3.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
if (self.se is not None):
x = self.se(x)
x = self.conv3(x)
if (self.drop_path is not None):
x = self.drop_path(x)
if (self.downsample is not None):
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act3(x)
return x |
def get_data(files, opt):
rst = {'src': None, 'tgt': None, 'ans': None, 'feature': None, 'ans_feature': None}
for (k, v) in files.items():
if isinstance(v, list):
rst[k] = [load_file(f) for f in v]
else:
rst[k] = load_file(v)
return rst |
def get_dataset(opt: dict, data_dir, use_lcc: bool=False) -> InMemoryDataset:
ds = opt['dataset']
path = os.path.join(data_dir, ds)
if (ds in ['Cora', 'Citeseer', 'Pubmed']):
dataset = Planetoid(path, ds)
elif (ds in ['Computers', 'Photo']):
dataset = Amazon(path, ds)
elif (ds == 'CoauthorCS'):
dataset = Coauthor(path, 'CS')
elif (ds in ['cornell', 'texas', 'wisconsin']):
dataset = WebKB(root=path, name=ds, transform=T.NormalizeFeatures())
elif (ds in ['chameleon', 'squirrel']):
dataset = WikipediaNetwork(root=path, name=ds, transform=T.NormalizeFeatures())
elif (ds == 'film'):
dataset = Actor(root=path, transform=T.NormalizeFeatures())
elif (ds == 'ogbn-arxiv'):
dataset = PygNodePropPredDataset(name=ds, root=path, transform=T.ToSparseTensor())
use_lcc = False
else:
raise Exception('Unknown dataset.')
if use_lcc:
lcc = get_largest_connected_component(dataset)
x_new = dataset.data.x[lcc]
y_new = dataset.data.y[lcc]
(row, col) = dataset.data.edge_index.numpy()
edges = [[i, j] for (i, j) in zip(row, col) if ((i in lcc) and (j in lcc))]
edges = remap_edges(edges, get_node_mapper(lcc))
data = Data(x=x_new, edge_index=torch.LongTensor(edges), y=y_new, train_mask=torch.zeros(y_new.size()[0], dtype=torch.bool), test_mask=torch.zeros(y_new.size()[0], dtype=torch.bool), val_mask=torch.zeros(y_new.size()[0], dtype=torch.bool))
dataset.data = data
if (opt['rewiring'] is not None):
dataset.data = rewire(dataset.data, opt, data_dir)
train_mask_exists = True
try:
dataset.data.train_mask
except AttributeError:
train_mask_exists = False
if (ds == 'ogbn-arxiv'):
split_idx = dataset.get_idx_split()
ei = to_undirected(dataset.data.edge_index)
data = Data(x=dataset.data.x, edge_index=ei, y=dataset.data.y, train_mask=split_idx['train'], test_mask=split_idx['test'], val_mask=split_idx['valid'])
dataset.data = data
train_mask_exists = True
if ((use_lcc or (not train_mask_exists)) and (not opt['geom_gcn_splits'])):
dataset.data = set_train_val_test_split(12345, dataset.data, num_development=(5000 if (ds == 'CoauthorCS') else 1500))
return dataset |
class BERT_CNN(nn.Module):
def __init__(self):
super(BERT_CNN, self).__init__()
self.bert = BertModel.from_pretrained('bert-base-uncased')
self.conv = nn.Conv2d(in_channels=13, out_channels=13, kernel_size=(3, 768), padding=True)
self.relu = nn.ReLU()
self.pool = nn.MaxPool2d(kernel_size=3, stride=1)
self.dropout = nn.Dropout(0.1)
self.fc = nn.Linear(442, 3)
self.flat = nn.Flatten()
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, sent_id, mask):
(_, _, all_layers) = self.bert(sent_id, attention_mask=mask, output_hidden_states=True)
x = torch.transpose(torch.cat(tuple([t.unsqueeze(0) for t in all_layers]), 0), 0, 1)
del all_layers
gc.collect()
torch.cuda.empty_cache()
x = self.pool(self.dropout(self.relu(self.conv(self.dropout(x)))))
x = self.fc(self.dropout(self.flat(self.dropout(x))))
return self.softmax(x) |
def apply_spectral_norm(m):
for layer in m.modules():
if isinstance(layer, nn.Conv2d):
spectral_norm(layer)
elif isinstance(layer, nn.Linear):
spectral_norm(layer)
elif isinstance(layer, nn.Embedding):
spectral_norm(layer) |
def ggml_compute_forward_mul_mat_q_fp32(src_0_ne, src_0_data, src_0_qtype, src_1_ne, src_1_data, result) -> None:
return _lib.ggml_compute_forward_mul_mat_q_fp32(src_0_ne, src_0_data, src_0_qtype, src_1_ne, src_1_data, result) |
def gdt(p1, p2, mask, cutoffs):
n = torch.sum(mask, dim=(- 1))
p1 = p1.float()
p2 = p2.float()
distances = torch.sqrt(torch.sum(((p1 - p2) ** 2), dim=(- 1)))
scores = []
for c in cutoffs:
score = (torch.sum(((distances <= c) * mask), dim=(- 1)) / n)
scores.append(score)
return (sum(scores) / len(scores)) |
class Net(nn.Module):
def __init__(self, alpha=1):
super(Net, self).__init__()
self.alpha = alpha
def forward(self, M, batch1, batch2):
return torch.baddbmm(M, batch1, batch2, alpha=self.alpha) |
def dfsCheck(dfsInput, gmlInput):
print('DFS:', dfsInput)
dfs = Rule.fromDFS(dfsInput)
gml = Rule.fromGMLString(('rule [ %s ]' % gmlInput))
if (dfs.isomorphism(gml) != 1):
print('DFS Input:', dfs)
print(('GML Input: rule [\n%s\n]' % gmlInput))
dfs.name = 'DFS'
gml.name = 'GML'
dfs.print()
gml.print()
post.enableInvokeMake()
assert False, 'Run mod_post to see rules.'
commonChecks(dfs) |
class SingleClassTpFpWithDifficultBoxesTest(tf.test.TestCase):
def setUp(self):
num_groundtruth_classes = 1
matching_iou_threshold = 0.5
nms_iou_threshold = 1.0
nms_max_output_boxes = 10000
self.eval = per_image_evaluation.PerImageEvaluation(num_groundtruth_classes, matching_iou_threshold, nms_iou_threshold, nms_max_output_boxes)
self.detected_boxes = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)
self.detected_scores = np.array([0.6, 0.8, 0.5], dtype=float)
self.groundtruth_boxes = np.array([[0, 0, 1, 1], [0, 0, 10, 10]], dtype=float)
def test_match_to_not_difficult_box(self):
groundtruth_groundtruth_is_difficult_list = np.array([False, True], dtype=bool)
(scores, tp_fp_labels) = self.eval._compute_tp_fp_for_single_class(self.detected_boxes, self.detected_scores, self.groundtruth_boxes, groundtruth_groundtruth_is_difficult_list)
expected_scores = np.array([0.8, 0.6, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, True, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels))
def test_match_to_difficult_box(self):
groundtruth_groundtruth_is_difficult_list = np.array([True, False], dtype=bool)
(scores, tp_fp_labels) = self.eval._compute_tp_fp_for_single_class(self.detected_boxes, self.detected_scores, self.groundtruth_boxes, groundtruth_groundtruth_is_difficult_list)
expected_scores = np.array([0.8, 0.5], dtype=float)
expected_tp_fp_labels = np.array([False, False], dtype=bool)
self.assertTrue(np.allclose(expected_scores, scores))
self.assertTrue(np.allclose(expected_tp_fp_labels, tp_fp_labels)) |
class ConvTranspose_synapse(SynapseModel):
def __init__(self, conn, **kwargs):
super(ConvTranspose_synapse, self).__init__(conn)
self._syn_operations.append([(conn.post_var_name + '[post]'), 'conv_trans2d', self.input_name, 'weight[link]', 'stride[link]', 'padding[link]', 'dilation[link]', 'groups[link]'])
if ('bias_flag' in conn.__dict__.keys()):
if conn.bias_flag:
raise ValueError('bias for conv_transpose is not supported') |
def train(net, optimizer, lr_scheduler, train_loader, train_sampler, metrics, begin_epoch, end_epoch, logger, rank=None, batch_end_callbacks=None, epoch_end_callbacks=None, writer=None, validation_monitor=None, fp16=False, clip_grad_norm=(- 1), gradient_accumulate_steps=1):
assert (isinstance(gradient_accumulate_steps, int) and (gradient_accumulate_steps >= 1))
for epoch in range(begin_epoch, end_epoch):
print(('PROGRESS: %.2f%%' % ((100.0 * epoch) / end_epoch)))
if ((train_sampler is not None) and hasattr(train_sampler, 'set_epoch')):
train_sampler.set_epoch(epoch)
metrics.reset()
net.train()
end_time = time.time()
if isinstance(lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
(name, value) = validation_monitor.metrics.get()
val = value[name.index(validation_monitor.host_metric_name)]
lr_scheduler.step(val, epoch)
for (nbatch, batch) in enumerate(train_loader):
global_steps = ((len(train_loader) * epoch) + nbatch)
os.environ['global_steps'] = str(global_steps)
data_in_time = (time.time() - end_time)
data_transfer_time = time.time()
batch = to_cuda(batch)
data_transfer_time = (time.time() - data_transfer_time)
forward_time = time.time()
(outputs, loss) = net(*batch)
loss = loss.mean()
if (gradient_accumulate_steps > 1):
loss = (loss / gradient_accumulate_steps)
forward_time = (time.time() - forward_time)
backward_time = time.time()
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
backward_time = (time.time() - backward_time)
optimizer_time = time.time()
if (((global_steps + 1) % gradient_accumulate_steps) == 0):
if ((lr_scheduler is not None) and (not isinstance(lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau))):
lr_scheduler.step()
if (clip_grad_norm > 0):
if fp16:
total_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), clip_grad_norm)
else:
total_norm = torch.nn.utils.clip_grad_norm_(net.parameters(), clip_grad_norm)
if (writer is not None):
writer.add_scalar(tag='grad-para/Total-Norm', scalar_value=float(total_norm), global_step=global_steps)
optimizer.step()
optimizer.zero_grad()
optimizer_time = (time.time() - optimizer_time)
metric_time = time.time()
metrics.update(outputs)
if (writer is not None):
with torch.no_grad():
for (group_i, param_group) in enumerate(optimizer.param_groups):
writer.add_scalar(tag='Initial-LR/Group_{}'.format(group_i), scalar_value=param_group['initial_lr'], global_step=global_steps)
writer.add_scalar(tag='LR/Group_{}'.format(group_i), scalar_value=param_group['lr'], global_step=global_steps)
writer.add_scalar(tag='Train-Loss', scalar_value=float(loss.item()), global_step=global_steps)
(name, value) = metrics.get()
for (n, v) in zip(name, value):
writer.add_scalar(tag=('Train-' + n), scalar_value=v, global_step=global_steps)
metric_time = (time.time() - metric_time)
if (batch_end_callbacks is not None):
batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch, add_step=True, rank=rank, data_in_time=data_in_time, data_transfer_time=data_transfer_time, forward_time=forward_time, backward_time=backward_time, optimizer_time=optimizer_time, metric_time=metric_time, eval_metric=metrics, locals=locals())
_multiple_callbacks(batch_end_callbacks, batch_end_params)
end_time = time.time()
if (validation_monitor is not None):
validation_monitor(epoch, net, optimizer, writer)
if (epoch_end_callbacks is not None):
_multiple_callbacks(epoch_end_callbacks, epoch, net, optimizer, writer, validation_monitor=validation_monitor) |
class ExplainableBoostingRegressor(EBMModel, RegressorMixin, ExplainerMixin):
n_features_in_: int
term_names_: List[str]
bins_: List[Union[(List[Dict[(str, int)]], List[np.ndarray])]]
feature_names_in_: List[str]
feature_types_in_: List[str]
feature_bounds_: np.ndarray
term_features_: List[Tuple[(int, ...)]]
bin_weights_: List[np.ndarray]
bagged_scores_: List[np.ndarray]
term_scores_: List[np.ndarray]
standard_deviations_: List[np.ndarray]
link_: str
link_param_: float
bag_weights_: np.ndarray
breakpoint_iteration_: np.ndarray
histogram_edges_: List[Union[(None, np.ndarray)]]
histogram_weights_: List[np.ndarray]
unique_val_counts_: np.ndarray
intercept_: float
bagged_intercept_: np.ndarray
min_target_: float
max_target_: float
available_explanations = ['global', 'local']
explainer_type = 'model'
def __init__(self, feature_names: Optional[Sequence[Union[(None, str)]]]=None, feature_types: Optional[Sequence[Union[(None, str, Sequence[str], Sequence[float])]]]=None, max_bins: int=256, max_interaction_bins: int=32, interactions: Optional[Union[(int, float, Sequence[Union[(int, str, Sequence[Union[(int, str)]])]])]]=10, exclude: Optional[Sequence[Union[(int, str, Sequence[Union[(int, str)]])]]]=[], validation_size: Optional[Union[(int, float)]]=0.15, outer_bags: int=8, inner_bags: Optional[int]=0, learning_rate: float=0.01, greediness: Optional[float]=0.0, smoothing_rounds: Optional[int]=0, max_rounds: Optional[int]=5000, early_stopping_rounds: Optional[int]=50, early_stopping_tolerance: Optional[float]=0.0001, min_samples_leaf: Optional[int]=2, max_leaves: int=3, objective: str='rmse', n_jobs: Optional[int]=(- 2), random_state: Optional[int]=42):
super(ExplainableBoostingRegressor, self).__init__(feature_names=feature_names, feature_types=feature_types, max_bins=max_bins, max_interaction_bins=max_interaction_bins, interactions=interactions, exclude=exclude, validation_size=validation_size, outer_bags=outer_bags, inner_bags=inner_bags, learning_rate=learning_rate, greediness=greediness, smoothing_rounds=smoothing_rounds, max_rounds=max_rounds, early_stopping_rounds=early_stopping_rounds, early_stopping_tolerance=early_stopping_tolerance, min_samples_leaf=min_samples_leaf, max_leaves=max_leaves, objective=objective, n_jobs=n_jobs, random_state=random_state)
def predict(self, X, init_score=None):
scores = self._predict_score(X, init_score)
return inv_link(scores, self.link_, self.link_param_) |
class E2E(E2ETransformer):
def add_arguments(parser):
E2ETransformer.add_arguments(parser)
E2E.add_conformer_arguments(parser)
return parser
def add_conformer_arguments(parser):
group = parser.add_argument_group('conformer model specific setting')
group = add_arguments_conformer_common(group)
return parser
def __init__(self, idim, odim, args, ignore_id=(- 1)):
super().__init__(idim, odim, args, ignore_id)
if (args.transformer_attn_dropout_rate is None):
args.transformer_attn_dropout_rate = args.dropout_rate
args = verify_rel_pos_type(args)
self.encoder = Encoder(idim=idim, attention_dim=args.adim, attention_heads=args.aheads, linear_units=args.eunits, num_blocks=args.elayers, input_layer=args.transformer_input_layer, dropout_rate=args.dropout_rate, positional_dropout_rate=args.dropout_rate, attention_dropout_rate=args.transformer_attn_dropout_rate, pos_enc_layer_type=args.transformer_encoder_pos_enc_layer_type, selfattention_layer_type=args.transformer_encoder_selfattn_layer_type, activation_type=args.transformer_encoder_activation_type, macaron_style=args.macaron_style, use_cnn_module=args.use_cnn_module, zero_triu=args.zero_triu, cnn_module_kernel=args.cnn_module_kernel)
self.reset_parameters(args) |
def _support_choice_with_dot_py(choice):
if choice.endswith('.py'):
return choice[:(- 3)]
return choice |
class ContinuousConv(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, activation=None, use_bias=True, kernel_initializer='uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, align_corners=True, coordinate_mapping='ball_to_cube_radial', interpolation='linear', normalize=True, radius_search_ignore_query_points=False, radius_search_metric='L2', offset=None, window_function=None, use_dense_layer_for_center=False, dense_kernel_initializer='glorot_uniform', dense_kernel_regularizer=None, in_channels=None, **kwargs):
from tensorflow.keras import activations, initializers, regularizers
self.filters = filters
self.kernel_size = kernel_size
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.align_corners = align_corners
self.coordinate_mapping = coordinate_mapping
self.interpolation = interpolation
self.normalize = normalize
self.radius_search_ignore_query_points = radius_search_ignore_query_points
self.radius_search_metric = radius_search_metric
self.dense_kernel_initializer = initializers.get(dense_kernel_initializer)
self.dense_kernel_regularizer = regularizers.get(dense_kernel_regularizer)
if (offset is None):
self.offset = tf.zeros(shape=(3,))
else:
self.offset = offset
self.window_function = window_function
self.fixed_radius_search = FixedRadiusSearch(metric=self.radius_search_metric, ignore_query_point=self.radius_search_ignore_query_points, return_distances=(not (self.window_function is None)))
self.radius_search = RadiusSearch(metric=self.radius_search_metric, ignore_query_point=self.radius_search_ignore_query_points, return_distances=(not (self.window_function is None)), normalize_distances=(not (self.window_function is None)))
self.use_dense_layer_for_center = use_dense_layer_for_center
if self.use_dense_layer_for_center:
self.dense = tf.keras.layers.Dense(self.filters, kernel_initializer=dense_kernel_initializer, kernel_regularizer=dense_kernel_regularizer, use_bias=False)
super().__init__(**kwargs)
def build(self, inp_features_shape):
self.in_channels = inp_features_shape[(- 1)]
kernel_shape = tf.TensorShape((*self.kernel_size, self.in_channels, self.filters))
self.kernel = self.add_weight(name='kernel', shape=kernel_shape, initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, trainable=self.trainable)
if self.use_bias:
bias_shape = tf.TensorShape((self.filters,))
self.bias = self.add_weight(name='bias', shape=bias_shape, initializer=self.bias_initializer, regularizer=self.bias_regularizer, trainable=self.trainable)
super().build(inp_features_shape)
def call(self, inp_features, inp_positions, out_positions, extents, inp_importance=None, fixed_radius_search_hash_table=None, user_neighbors_index=None, user_neighbors_row_splits=None, user_neighbors_importance=None):
offset = self.offset
if (inp_importance is None):
inp_importance = tf.ones((0,), dtype=tf.float32)
extents = tf.convert_to_tensor(extents)
return_distances = (not (self.window_function is None))
if ((not (user_neighbors_index is None)) and (not (user_neighbors_row_splits is None))):
if (user_neighbors_importance is None):
neighbors_importance = tf.ones((0,), dtype=tf.float32)
else:
neighbors_importance = user_neighbors_importance
neighbors_index = user_neighbors_index
neighbors_row_splits = user_neighbors_row_splits
else:
if (extents.shape.rank == 0):
radius = (0.5 * extents)
self.nns = self.fixed_radius_search(inp_positions, queries=out_positions, radius=radius, hash_table=fixed_radius_search_hash_table)
if return_distances:
if (self.radius_search_metric == 'L2'):
neighbors_distance_normalized = (self.nns.neighbors_distance / (radius * radius))
else:
neighbors_distance_normalized = (self.nns.neighbors_distance / radius)
elif (extents.shape.rank == 1):
radii = (0.5 * extents)
self.nns = self.radius_search(inp_positions, queries=out_positions, radii=radii)
else:
raise Exception('extents rank must be 0 or 1')
if (self.window_function is None):
neighbors_importance = tf.ones((0,), dtype=tf.float32)
else:
neighbors_importance = self.window_function(neighbors_distance_normalized)
neighbors_index = self.nns.neighbors_index
neighbors_row_splits = self.nns.neighbors_row_splits
num_pairs = tf.shape(neighbors_index)[0]
self._avg_neighbors = (tf.dtypes.cast(num_pairs, tf.float32) / tf.dtypes.cast(tf.shape(out_positions)[0], tf.float32))
extents_rank2 = extents
while (extents_rank2.shape.rank < 2):
extents_rank2 = tf.expand_dims(extents_rank2, axis=(- 1))
self._conv_values = {'filters': self.kernel, 'out_positions': out_positions, 'extents': extents_rank2, 'offset': offset, 'inp_positions': inp_positions, 'inp_features': inp_features, 'inp_importance': inp_importance, 'neighbors_index': neighbors_index, 'neighbors_row_splits': neighbors_row_splits, 'neighbors_importance': neighbors_importance, 'align_corners': self.align_corners, 'coordinate_mapping': self.coordinate_mapping, 'interpolation': self.interpolation, 'normalize': self.normalize}
out_features = ops.continuous_conv(**self._conv_values)
self._conv_output = out_features
if self.use_dense_layer_for_center:
self._dense_output = self.dense(inp_features)
out_features = (out_features + self._dense_output)
if self.use_bias:
out_features += self.bias
out_features = self.activation(out_features)
return out_features
def compute_output_shape(self, inp_features_shape):
return tf.TensorShape((None, self.filters)) |
class SummertimeScisummnet(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version('1.1.0')
BUILDER_CONFIGS = [datasets.BuilderConfig()]
def _info(self):
features = datasets.Features({'entry_number': datasets.Value('string'), 'document_xml': datasets.Value('string'), 'citing_sentences_annotated.json': datasets.Value('string'), 'summary': datasets.Value('string')})
return datasets.DatasetInfo(description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION)
def _split_generators(self, dl_manager):
my_urls = _URLs
path = dl_manager.download_and_extract(my_urls)
trainpath = os.path.join(path, 'scisummnet_release1.1__', 'top1000_complete')
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'extraction_path': trainpath, 'split': 'train'})]
def _generate_examples(self, extraction_path, split):
for folder in os.listdir(extraction_path):
entry = {}
entry['entry_number'] = folder
doc_xml_path = os.path.join(extraction_path, folder, 'Documents_xml', (folder + '.xml'))
with open(doc_xml_path, 'r', encoding='utf-8') as f:
entry['document_xml'] = f.read()
cite_annot_path = os.path.join(extraction_path, folder, 'citing_sentences_annotated.json')
with open(cite_annot_path, 'r', encoding='utf-8') as f:
entry['citing_sentences_annotated.json'] = f.read()
summary_path = os.path.join(extraction_path, folder, 'summary', (folder + '.gold.txt'))
with open(summary_path, 'r', encoding='utf-8') as f:
entry['summary'] = f.read()
(yield (entry['entry_number'], entry)) |
def test_parse_config_with_invalid_flag(mocker):
flag_values = flags.FlagValues()
mocker.patch('sys.argv', ['vmcnet', '--config.model.type=not_a_real_model'])
with pytest.raises(KeyError):
parse_flags(flag_values) |
def json_to_text(file_path, data):
if (not isinstance(file_path, Path)):
file_path = Path(file_path)
with open(str(file_path), 'w') as fw:
for line in data:
line = json.dumps(line, ensure_ascii=False)
fw.write((line + '\n')) |
def conv_dw(in_channels, out_channels, kernel_size=3, padding=1, stride=1, dilation=1):
return nn.Sequential(nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation=dilation, groups=in_channels, bias=False), nn.BatchNorm2d(in_channels), nn.ReLU(inplace=True), nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True)) |
def get_parser(additional_commands=None):
commands = ['retrain', 'resume', 'eval', 'eval-init', 'slurm']
if additional_commands:
commands += additional_commands
parser = argparse.ArgumentParser()
parser.add_argument('--cmd', type=str, default='resume', choices=commands)
parser.add_argument('--log-env-info', type=utils.str2bool, default=False)
parser.add_argument('--iter', type=str, nargs='*', default=[])
parser.add_argument('--eval-net-root', type=str, default='')
parser.add_argument('--experiments-root', type=str, default='./experiments')
parser.add_argument('--slurm-cmd', type=str, default='resume')
parser.add_argument('--slurm-queue', type=str, default='gpu')
parser.add_argument('--slurm-qos', type=str, default='normal')
parser.add_argument('--slurm-n-gpus', type=int, default=1)
parser.add_argument('--slurm-n-cpus', type=int, default=(- 1))
parser.add_argument('--slurm-time', type=str, default='2-00:00', help='Acceptable time formats include "minutes", "minutes:seconds", "hours:minutes:seconds", "days-hours", "days-hours:minutes" and "days-hours:minutes:seconds"')
return parser |
def get_args_parser():
detection_parser = detection.get_args_parser()
parser = argparse.ArgumentParser('Get predictions for GQA and dump to file', parents=[detection_parser], add_help=False)
parser.add_argument('--split', type=str, default='testdev', choices=('testdev', 'test', 'challenge', 'submission'))
return parser |
class Polygon():
def __init__(self, vertices, width, height):
self.vertices = vertices
self.width = width
self.height = height
self.color = (generate_color(), generate_color(), generate_color())
self.alpha = generate_alpha()
self.coordinates = generate_polygon_coordinates(self)
def __repr__(self) -> str:
return ('%s , %s ,\n %s' % (self.color, self.alpha, self.coordinates)) |
def preresnet18_wd4(**kwargs):
return get_preresnet(blocks=18, width_scale=0.25, model_name='preresnet18_wd4', **kwargs) |
class Trainer(TrainerAbstract, TrainerLoss, TrainerIteration, TrainerDataset, TrainerModel):
def __init__(self, opt):
super(Trainer, self).__init__(opt)
self.dataset_train = None
self.opt.training_media_path = os.path.join(self.opt.dir_name, 'training_media')
if (not os.path.exists(self.opt.training_media_path)):
os.mkdir(self.opt.training_media_path)
self.flags = EasyDict()
self.flags.media_count = 0
self.flags.add_log = True
self.flags.build_website = False
self.flags.get_closer_neighbourg = False
self.flags.compute_clustering_errors = False
self.display = EasyDict({'recons': []})
self.colormap = mesh_processor.ColorMap()
def train_loop(self):
iterator = self.datasets.dataloader_train.__iter__()
for data in iterator:
self.increment_iteration()
self.data = EasyDict(data)
self.data.points = self.data.points.to(self.opt.device)
if ((self.datasets.data_augmenter is not None) and (not self.opt.SVR)):
self.datasets.data_augmenter(self.data.points)
self.train_iteration()
def train_epoch(self):
self.flags.train = True
if ((self.epoch == (self.opt.nepoch - 1)) and (not self.opt.custom_data)):
self.flags.build_website = True
self.log.reset()
if (not self.opt.no_learning):
self.network.train()
else:
self.network.eval()
self.learning_rate_scheduler()
self.reset_iteration()
for i in range(self.opt.loop_per_epoch):
self.train_loop()
def test_loop(self, pc_recon=None):
iterator = self.datasets.dataloader_test.__iter__()
self.reset_iteration()
num_iter = 0
for data in iterator:
self.increment_iteration()
self.data = EasyDict(data)
self.data.points = self.data.points.to(self.opt.device)
if (pc_recon is None):
self.test_iteration()
else:
pc_recon_iter = self.test_iteration(return_recon=True)
pc_recon[(num_iter * self.opt.batch_size_test):((num_iter * self.opt.batch_size_test) + pc_recon_iter.shape[0])] = pc_recon_iter.cpu().detach().numpy()
num_iter += 1
return pc_recon
def test_epoch(self, pc_recon=None):
self.flags.train = False
self.sum_loss = 0.0
self.sum_fscore = 0.0
self.network.eval()
pc_recon = self.test_loop(pc_recon)
self.log.end_epoch()
try:
self.log.update_curves(self.visualizer.vis, self.opt.dir_name)
except:
print('could not update curves')
print(f'Sampled {self.num_val_points} regular points for evaluation')
self.metro_results = 0
if ((self.flags.build_website or self.opt.run_single_eval) and (not self.opt.no_metro)):
self.metro()
if self.flags.build_website:
self.html_report_data = EasyDict()
self.html_report_data.output_meshes = [self.generate_random_mesh() for i in range(3)]
log_curves = ['loss_val', 'loss_train_total']
self.html_report_data.data_curve = {key: [np.log(val) for val in self.log.curves[key]] for key in log_curves}
self.html_report_data.fscore_curve = {'fscore': self.log.curves['fscore']}
html_report.main(self, outHtml='index.html')
return pc_recon
def generate_random_mesh(self):
index = np.random.randint(self.datasets.len_dataset_test)
self.data = EasyDict(self.datasets.dataset_test[index])
self.data.points.unsqueeze_(0)
if self.opt.SVR:
self.data.image.unsqueeze_(0)
return self.generate_mesh()
def generate_mesh(self):
self.make_network_input()
mesh = self.network.module.generate_mesh(self.data.network_input)
path = ('/'.join([self.opt.training_media_path, str(self.flags.media_count)]) + '.obj')
image_path = '/'.join([self.data.image_path, '00.png'])
mesh_processor.save(mesh, path, self.colormap)
self.flags.media_count += 1
return {'output_path': path, 'image_path': image_path}
def demo(self, demo_path, input_path_points=None):
ext = demo_path.split('.')[(- 1)]
self.data = self.datasets.dataset_train.load(demo_path)
self.data = EasyDict(self.data)
if (input_path_points is None):
input_path_points = demo_path
get_normalization = self.datasets.dataset_train.load(input_path_points)
get_normalization = EasyDict(get_normalization)
self.make_network_input()
mesh = self.network.module.generate_mesh(self.data.network_input)
if (get_normalization.operation is not None):
vertices = torch.from_numpy(mesh.vertices).clone().unsqueeze(0)
get_normalization.operation.invert()
unnormalized_vertices = get_normalization.operation.apply(vertices)
mesh = pymesh.form_mesh(vertices=unnormalized_vertices.squeeze().numpy(), faces=mesh.faces)
if self.opt.demo:
path = demo_path.split('.')
path[(- 2)] += 'AtlasnetReconstruction'
path[(- 1)] = 'ply'
path = '.'.join(path)
else:
path = ('/'.join([self.opt.training_media_path, str(self.flags.media_count)]) + '.ply')
self.flags.media_count += 1
print(f'Atlasnet generated mesh at {path}!')
mesh_processor.save(mesh, path, self.colormap)
return path |
class Meta(Component):
def __init__(self, source, pivots, dimension_names=None):
self.fields = locals()
del self.fields['self'] |
def main(args):
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, logging_dir=logging_dir)
if (args.train_text_encoder and (args.gradient_accumulation_steps > 1) and (accelerator.num_processes > 1)):
raise ValueError('Gradient accumulation is not supported when training the text encoder in distributed training. Please set gradient_accumulation_steps to 1. This feature will be supported in the future.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
if (args.seed is not None):
set_seed(args.seed)
if args.with_prior_preservation:
class_images_dir = Path(args.class_data_dir)
if (not class_images_dir.exists()):
class_images_dir.mkdir(parents=True)
cur_class_images = len(list(class_images_dir.iterdir()))
if (cur_class_images < args.num_class_images):
torch_dtype = (torch.float16 if (accelerator.device.type == 'cuda') else torch.float32)
if (args.prior_generation_precision == 'fp32'):
torch_dtype = torch.float32
elif (args.prior_generation_precision == 'fp16'):
torch_dtype = torch.float16
elif (args.prior_generation_precision == 'bf16'):
torch_dtype = torch.bfloat16
pipeline = DiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision)
pipeline.set_progress_bar_config(disable=True)
num_new_images = (args.num_class_images - cur_class_images)
logger.info(f'Number of class images to sample: {num_new_images}.')
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
sample_dataloader = accelerator.prepare(sample_dataloader)
pipeline.to(accelerator.device)
for example in tqdm(sample_dataloader, desc='Generating class images', disable=(not accelerator.is_local_main_process)):
images = pipeline(example['prompt']).images
for (i, image) in enumerate(images):
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
image_filename = (class_images_dir / f"{(example['index'][i] + cur_class_images)}-{hash_image}.jpg")
image.save(image_filename)
del pipeline
if torch.cuda.is_available():
torch.cuda.empty_cache()
if accelerator.is_main_process:
if args.push_to_hub:
if (args.hub_model_id is None):
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
with open(os.path.join(args.output_dir, '.gitignore'), 'w+') as gitignore:
if ('step_*' not in gitignore):
gitignore.write('step_*\n')
if ('epoch_*' not in gitignore):
gitignore.write('epoch_*\n')
elif (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
elif args.pretrained_model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer', revision=args.revision, use_fast=False)
text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule='scaled_linear', num_train_timesteps=1000)
text_encoder = text_encoder_cls.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder', revision=args.revision)
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae', revision=args.revision)
unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet', revision=args.revision)
if args.use_lora:
config = LoraConfig(r=args.lora_r, lora_alpha=args.lora_alpha, target_modules=UNET_TARGET_MODULES, lora_dropout=args.lora_dropout, bias=args.lora_bias)
unet = get_peft_model(unet, config)
unet.print_trainable_parameters()
print(unet)
vae.requires_grad_(False)
if (not args.train_text_encoder):
text_encoder.requires_grad_(False)
elif (args.train_text_encoder and args.use_lora):
config = LoraConfig(r=args.lora_text_encoder_r, lora_alpha=args.lora_text_encoder_alpha, target_modules=TEXT_ENCODER_TARGET_MODULES, lora_dropout=args.lora_text_encoder_dropout, bias=args.lora_text_encoder_bias)
text_encoder = get_peft_model(text_encoder, config)
text_encoder.print_trainable_parameters()
print(text_encoder)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError('xformers is not available. Make sure it is installed correctly')
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
if (args.train_text_encoder and (not args.use_lora)):
text_encoder.gradient_checkpointing_enable()
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (((args.learning_rate * args.gradient_accumulation_steps) * args.train_batch_size) * accelerator.num_processes)
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError('To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`.')
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
params_to_optimize = (itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters())
optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon)
train_dataset = DreamBoothDataset(instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=(args.class_data_dir if args.with_prior_preservation else None), class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=(lambda examples: collate_fn(examples, args.with_prior_preservation)), num_workers=1)
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
overrode_max_train_steps = True
lr_scheduler = get_scheduler(args.lr_scheduler, optimizer=optimizer, num_warmup_steps=(args.lr_warmup_steps * args.gradient_accumulation_steps), num_training_steps=(args.max_train_steps * args.gradient_accumulation_steps), num_cycles=args.lr_num_cycles, power=args.lr_power)
if args.train_text_encoder:
(unet, text_encoder, optimizer, train_dataloader, lr_scheduler) = accelerator.prepare(unet, text_encoder, optimizer, train_dataloader, lr_scheduler)
else:
(unet, optimizer, train_dataloader, lr_scheduler) = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)
weight_dtype = torch.float32
if (accelerator.mixed_precision == 'fp16'):
weight_dtype = torch.float16
elif (accelerator.mixed_precision == 'bf16'):
weight_dtype = torch.bfloat16
vae.to(accelerator.device, dtype=weight_dtype)
if (not args.train_text_encoder):
text_encoder.to(accelerator.device, dtype=weight_dtype)
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if overrode_max_train_steps:
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
if accelerator.is_main_process:
accelerator.init_trackers('dreambooth', config=vars(args))
total_batch_size = ((args.train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num batches each epoch = {len(train_dataloader)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
global_step = 0
first_epoch = 0
if args.resume_from_checkpoint:
if (args.resume_from_checkpoint != 'latest'):
path = os.path.basename(args.resume_from_checkpoint)
else:
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith('checkpoint')]
dirs = sorted(dirs, key=(lambda x: int(x.split('-')[1])))
path = dirs[(- 1)]
accelerator.print(f'Resuming from checkpoint {path}')
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split('-')[1])
resume_global_step = (global_step * args.gradient_accumulation_steps)
first_epoch = (resume_global_step // num_update_steps_per_epoch)
resume_step = (resume_global_step % num_update_steps_per_epoch)
progress_bar = tqdm(range(global_step, args.max_train_steps), disable=(not accelerator.is_local_main_process))
progress_bar.set_description('Steps')
for epoch in range(first_epoch, args.num_train_epochs):
unet.train()
if args.train_text_encoder:
text_encoder.train()
with TorchTracemalloc() as tracemalloc:
for (step, batch) in enumerate(train_dataloader):
if (args.resume_from_checkpoint and (epoch == first_epoch) and (step < resume_step)):
if ((step % args.gradient_accumulation_steps) == 0):
progress_bar.update(1)
continue
with accelerator.accumulate(unet):
latents = vae.encode(batch['pixel_values'].to(dtype=weight_dtype)).latent_dist.sample()
latents = (latents * 0.18215)
noise = torch.randn_like(latents)
bsz = latents.shape[0]
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
encoder_hidden_states = text_encoder(batch['input_ids'])[0]
model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
if (noise_scheduler.config.prediction_type == 'epsilon'):
target = noise
elif (noise_scheduler.config.prediction_type == 'v_prediction'):
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(f'Unknown prediction type {noise_scheduler.config.prediction_type}')
if args.with_prior_preservation:
(model_pred, model_pred_prior) = torch.chunk(model_pred, 2, dim=0)
(target, target_prior) = torch.chunk(target, 2, dim=0)
loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean')
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction='mean')
loss = (loss + (args.prior_loss_weight * prior_loss))
else:
loss = F.mse_loss(model_pred.float(), target.float(), reduction='mean')
accelerator.backward(loss)
if accelerator.sync_gradients:
params_to_clip = (itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters())
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
logs = {'loss': loss.detach().item(), 'lr': lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if ((args.validation_prompt is not None) and (((step + (num_update_steps_per_epoch * epoch)) % args.validation_steps) == 0)):
logger.info(f'''Running validation...
Generating {args.num_validation_images} images with prompt: {args.validation_prompt}.''')
pipeline = DiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, safety_checker=None, revision=args.revision)
pipeline.unet = accelerator.unwrap_model(unet, keep_fp32_wrapper=True)
pipeline.text_encoder = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True)
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
if (args.seed is not None):
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
else:
generator = None
images = []
for _ in range(args.num_validation_images):
image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
images.append(image)
for tracker in accelerator.trackers:
if (tracker.name == 'tensorboard'):
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images('validation', np_images, epoch, dataformats='NHWC')
if (tracker.name == 'wandb'):
import wandb
tracker.log({'validation': [wandb.Image(image, caption=f'{i}: {args.validation_prompt}') for (i, image) in enumerate(images)]})
del pipeline
torch.cuda.empty_cache()
if (global_step >= args.max_train_steps):
break
accelerator.print('GPU Memory before entering the train : {}'.format(b2mb(tracemalloc.begin)))
accelerator.print('GPU Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used))
accelerator.print('GPU Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked))
accelerator.print('GPU Total Peak Memory consumed during the train (max): {}'.format((tracemalloc.peaked + b2mb(tracemalloc.begin))))
accelerator.print('CPU Memory before entering the train : {}'.format(b2mb(tracemalloc.cpu_begin)))
accelerator.print('CPU Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.cpu_used))
accelerator.print('CPU Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.cpu_peaked))
accelerator.print('CPU Total Peak Memory consumed during the train (max): {}'.format((tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin))))
accelerator.wait_for_everyone()
if accelerator.is_main_process:
if args.use_lora:
unwarpped_unet = accelerator.unwrap_model(unet)
unwarpped_unet.save_pretrained(os.path.join(args.output_dir, 'unet'), state_dict=accelerator.get_state_dict(unet))
if args.train_text_encoder:
unwarpped_text_encoder = accelerator.unwrap_model(text_encoder)
unwarpped_text_encoder.save_pretrained(os.path.join(args.output_dir, 'text_encoder'), state_dict=accelerator.get_state_dict(text_encoder))
else:
pipeline = DiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), revision=args.revision)
pipeline.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message='End of training', blocking=False, auto_lfs_prune=True)
accelerator.end_training() |
class VarRNNBase(nn.Module):
def __init__(self, Cell, input_size, hidden_size, num_layers=1, bias=True, batch_first=False, dropout=(0, 0), bidirectional=False, **kwargs):
super(VarRNNBase, self).__init__()
self.Cell = Cell
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.bidirectional = bidirectional
self.lstm = False
num_directions = (2 if bidirectional else 1)
self.all_cells = []
for layer in range(num_layers):
for direction in range(num_directions):
layer_input_size = (input_size if (layer == 0) else (hidden_size * num_directions))
cell = self.Cell(layer_input_size, hidden_size, self.bias, p=dropout, **kwargs)
self.all_cells.append(cell)
self.add_module(('cell%d' % ((layer * num_directions) + direction)), cell)
def reset_parameters(self):
for cell in self.all_cells:
cell.reset_parameters()
def reset_noise(self, batch_size):
for cell in self.all_cells:
cell.reset_noise(batch_size)
def forward(self, input, mask=None, hx=None):
batch_size = (input.size(0) if self.batch_first else input.size(1))
if (hx is None):
num_directions = (2 if self.bidirectional else 1)
hx = input.new_zeros((self.num_layers * num_directions), batch_size, self.hidden_size)
if self.lstm:
hx = (hx, hx)
func = rnn_F.AutogradVarRNN(num_layers=self.num_layers, batch_first=self.batch_first, bidirectional=self.bidirectional, lstm=self.lstm)
self.reset_noise(batch_size)
(output, hidden) = func(input, self.all_cells, hx, (None if (mask is None) else mask.view((mask.size() + (1,)))))
return (output, hidden)
def step(self, input, hx=None, mask=None):
assert (not self.bidirectional), 'step only cannot be applied to bidirectional RNN.'
batch_size = input.size(0)
if (hx is None):
hx = input.new_zeros(self.num_layers, batch_size, self.hidden_size)
if self.lstm:
hx = (hx, hx)
func = rnn_F.AutogradVarRNNStep(num_layers=self.num_layers, lstm=self.lstm)
(output, hidden) = func(input, self.all_cells, hx, mask)
return (output, hidden) |
class SupCEResNet(nn.Module):
def __init__(self, name='resnet50', num_classes=10):
super(SupCEResNet, self).__init__()
(model_fun, dim_in) = model_dict[name]
self.encoder = model_fun()
self.fc = nn.Linear(dim_in, num_classes)
def forward(self, x):
return self.fc(self.encoder(x)) |
def make_hashable(x):
if isinstance(x, list):
return tuple(map(make_hashable, x))
if isinstance(x, dict):
return tuple(sorted(((k, make_hashable(v)) for (k, v) in x.items())))
return x |
def seasonality(time, period, amplitude=1, phase=0):
season_time = (((time + phase) % period) / period)
return (amplitude * seasonal_pattern(season_time)) |
def convert_bertabs_checkpoints(path_to_checkpoints, dump_path):
config = BertAbsConfig(temp_dir='.', finetune_bert=False, large=False, share_emb=True, use_bert_emb=False, encoder='bert', max_pos=512, enc_layers=6, enc_hidden_size=512, enc_heads=8, enc_ff_size=512, enc_dropout=0.2, dec_layers=6, dec_hidden_size=768, dec_heads=8, dec_ff_size=2048, dec_dropout=0.2)
checkpoints = torch.load(path_to_checkpoints, (lambda storage, loc: storage))
original = AbsSummarizer(config, torch.device('cpu'), checkpoints)
original.eval()
new_model = BertAbsSummarizer(config, torch.device('cpu'))
new_model.eval()
logging.info('convert the model')
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
logging.info("Make sure that the models' outputs are identical")
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
encoder_input_ids = tokenizer.encode("This is sample eaalj'-.")
encoder_input_ids.extend(([tokenizer.pad_token_id] * (512 - len(encoder_input_ids))))
encoder_input_ids = torch.tensor(encoder_input_ids).unsqueeze(0)
decoder_input_ids = tokenizer.encode("This is sample 3 eaalj'-.")
decoder_input_ids.extend(([tokenizer.pad_token_id] * (512 - len(decoder_input_ids))))
decoder_input_ids = torch.tensor(decoder_input_ids).unsqueeze(0)
assert (torch.max(torch.abs((original.generator[0].weight - new_model.generator[0].weight))) == 0)
src = encoder_input_ids
tgt = decoder_input_ids
segs = token_type_ids = None
clss = None
mask_src = encoder_attention_mask = None
mask_tgt = decoder_attention_mask = None
mask_cls = None
output_original_model = original(src, tgt, segs, clss, mask_src, mask_tgt, mask_cls)[0]
output_original_generator = original.generator(output_original_model)
output_converted_model = new_model(encoder_input_ids, decoder_input_ids, token_type_ids, encoder_attention_mask, decoder_attention_mask)[0]
output_converted_generator = new_model.generator(output_converted_model)
maximum_absolute_difference = torch.max(torch.abs((output_converted_model - output_original_model))).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(maximum_absolute_difference))
maximum_absolute_difference = torch.max(torch.abs((output_converted_generator - output_original_generator))).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(maximum_absolute_difference))
are_identical = torch.allclose(output_converted_model, output_original_model, atol=0.001)
if are_identical:
logging.info('all weights are equal up to 1e-3')
else:
raise ValueError('the weights are different. The new model is likely different from the original one.')
logging.info("saving the model's state dictionary")
torch.save(new_model.state_dict(), './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin') |
def compute_precision(guessed_sentences, correct_sentences):
assert (len(guessed_sentences) == len(correct_sentences))
correctCount = 0
count = 0
for sentenceIdx in range(len(guessed_sentences)):
guessed = guessed_sentences[sentenceIdx]
correct = correct_sentences[sentenceIdx]
assert (len(guessed) == len(correct))
idx = 0
while (idx < len(guessed)):
if (guessed[idx][0] == 'B'):
count += 1
if (guessed[idx] == correct[idx]):
idx += 1
correctlyFound = True
while ((idx < len(guessed)) and (guessed[idx][0] == 'I')):
if (guessed[idx] != correct[idx]):
correctlyFound = False
idx += 1
if (idx < len(guessed)):
if (correct[idx][0] == 'I'):
correctlyFound = False
if correctlyFound:
correctCount += 1
else:
idx += 1
else:
idx += 1
precision = 0
if (count > 0):
precision = (float(correctCount) / count)
return precision |
class Node1(nn.Module):
def __init__(self, node1_cls):
super(Node1, self).__init__()
self.conv0 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, padding=1, dilation=1, bias=False), BatchNorm2d(512), nn.ReLU(inplace=False))
self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1, dilation=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=False))
self.conv2 = nn.Sequential(nn.Conv2d(256, 48, kernel_size=1, stride=1, padding=0, dilation=1, bias=False), BatchNorm2d(48), nn.ReLU(inplace=False))
self.conv3 = nn.Sequential(nn.Conv2d(304, 256, kernel_size=1, padding=0, dilation=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=False), nn.Conv2d(256, 256, kernel_size=1, padding=0, dilation=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=False))
self.conv4 = nn.Conv2d(256, node1_cls, kernel_size=1, padding=0, dilation=1, bias=True)
self.alpha = nn.Parameter(torch.ones(1))
def forward(self, xt, xm, xl):
(_, _, h, w) = xm.size()
xt = self.conv0((F.interpolate(xt, size=(h, w), mode='bilinear', align_corners=True) + (self.alpha * xm)))
(_, _, th, tw) = xl.size()
xt = F.interpolate(self.conv1(xt), size=(th, tw), mode='bilinear', align_corners=True)
xl = self.conv2(xl)
x = torch.cat([xt, xl], dim=1)
x = self.conv3(x)
output = self.conv4(x)
return output |
def get_fused_adam_class():
try:
global fused_adam_cuda
import importlib
fused_adam_cuda = importlib.import_module('fused_adam_cuda')
return FusedAdamV1
except ImportError:
try:
from apex.multi_tensor_apply import multi_tensor_applier
from apex.optimizers import FusedAdam as _FusedAdam
if multi_tensor_applier.available:
return FusedAdamV2
except ImportError:
pass
return None |
class NSGAII(GeneticAlgorithm[(S, R)]):
def __init__(self, problem: Problem, population_size: int, offspring_population_size: int, mutation: Mutation, crossover: Crossover, selection: Selection=BinaryTournamentSelection(MultiComparator([FastNonDominatedRanking.get_comparator(), CrowdingDistance.get_comparator()])), termination_criterion: TerminationCriterion=store.default_termination_criteria, population_generator: Generator=store.default_generator, population_evaluator: Evaluator=store.default_evaluator, dominance_comparator: Comparator=store.default_comparator):
super(NSGAII, self).__init__(problem=problem, population_size=population_size, offspring_population_size=offspring_population_size, mutation=mutation, crossover=crossover, selection=selection, termination_criterion=termination_criterion, population_evaluator=population_evaluator, population_generator=population_generator)
self.dominance_comparator = dominance_comparator
def replacement(self, population: List[S], offspring_population: List[S]) -> List[List[S]]:
ranking = FastNonDominatedRanking(self.dominance_comparator)
density_estimator = CrowdingDistance()
r = RankingAndDensityEstimatorReplacement(ranking, density_estimator, RemovalPolicyType.ONE_SHOT)
solutions = r.replace(population, offspring_population)
return solutions
def get_result(self) -> R:
return self.solutions
def get_name(self) -> str:
return 'NSGAII' |
class DecoderBase(nn.Module):
def __init__(self, attentional=True):
super(DecoderBase, self).__init__()
self.attentional = attentional
def from_opt(cls, opt, embeddings):
raise NotImplementedError |
_module()
class ICNet(BaseModule):
def __init__(self, backbone_cfg, in_channels=3, layer_channels=(512, 2048), light_branch_middle_channels=32, psp_out_channels=512, out_channels=(64, 256, 256), pool_scales=(1, 2, 3, 6), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='ReLU'), align_corners=False, init_cfg=None):
if (backbone_cfg is None):
raise TypeError('backbone_cfg must be passed from config file!')
if (init_cfg is None):
init_cfg = [dict(type='Kaiming', mode='fan_out', layer='Conv2d'), dict(type='Constant', val=1, layer='_BatchNorm'), dict(type='Normal', mean=0.01, layer='Linear')]
super(ICNet, self).__init__(init_cfg=init_cfg)
self.align_corners = align_corners
self.backbone = build_backbone(backbone_cfg)
self.backbone.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)
self.psp_modules = PPM(pool_scales=pool_scales, in_channels=layer_channels[1], channels=psp_out_channels, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, align_corners=align_corners)
self.psp_bottleneck = ConvModule((layer_channels[1] + (len(pool_scales) * psp_out_channels)), psp_out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv_sub1 = nn.Sequential(ConvModule(in_channels=in_channels, out_channels=light_branch_middle_channels, kernel_size=3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg), ConvModule(in_channels=light_branch_middle_channels, out_channels=light_branch_middle_channels, kernel_size=3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg), ConvModule(in_channels=light_branch_middle_channels, out_channels=out_channels[0], kernel_size=3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg))
self.conv_sub2 = ConvModule(layer_channels[0], out_channels[1], 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg)
self.conv_sub4 = ConvModule(psp_out_channels, out_channels[2], 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg)
def forward(self, x):
output = []
output.append(self.conv_sub1(x))
x = resize(x, scale_factor=0.5, mode='bilinear', align_corners=self.align_corners)
x = self.backbone.stem(x)
x = self.backbone.maxpool(x)
x = self.backbone.layer1(x)
x = self.backbone.layer2(x)
output.append(self.conv_sub2(x))
x = resize(x, scale_factor=0.5, mode='bilinear', align_corners=self.align_corners)
x = self.backbone.layer3(x)
x = self.backbone.layer4(x)
psp_outs = (self.psp_modules(x) + [x])
psp_outs = torch.cat(psp_outs, dim=1)
x = self.psp_bottleneck(psp_outs)
output.append(self.conv_sub4(x))
return output |
def load_tinyimagenet_data(datadir):
xray_train_ds = ImageFolder_custom((datadir + '/train/'), transform=None)
xray_test_ds = ImageFolder_custom((datadir + '/val/'), transform=None)
(X_train, y_train) = (np.array([s[0] for s in xray_train_ds.samples]), np.array([int(s[1]) for s in xray_train_ds.samples]))
(X_test, y_test) = (np.array([s[0] for s in xray_test_ds.samples]), np.array([int(s[1]) for s in xray_test_ds.samples]))
return (X_train, y_train, X_test, y_test) |
def process_all_table_structure_annotations(input_annotation_list):
current_region_annotations = input_annotation_list
current_region_annotations = resolve_direct_nesting_of_rows_and_columns(current_region_annotations)
(_, _) = assign_numbers_to_rows_and_cols(current_region_annotations)
structured_annotations = copy.deepcopy(current_region_annotations)
structured_annotations = adjust_and_move_row_and_column_borders(structured_annotations)
structured_annotations = create_all_cells_and_assign_coordinates(structured_annotations)
return structured_annotations |
def get_backtrans_data_dict(pkl_path, train_path):
if (not pkl_path.exists()):
print(f'creating {pkl_path}')
(sentences, _) = common.get_sentences_and_labels_from_txt(train_path)
sentence_to_augmented_sentences = {}
for sentence in tqdm(sentences):
sentence_to_augmented_sentences[sentence] = backtrans_string(sentence)
common.save_pickle(pkl_path, sentence_to_augmented_sentences)
return common.load_pickle(pkl_path) |
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
sparse_mx = sparse_mx.tocoo()
indices = torch.from_numpy(np.vstack((sparse_mx.row, sparse_mx.col))).long()
values = torch.from_numpy(sparse_mx.data).float()
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape) |
class ObservationsDecoder(nn.Module):
def __init__(self, representation_size, out_size, width):
super().__init__()
self.layers = nn.Sequential(nn.Linear((representation_size * 2), width), nn.ELU(), nn.Linear(width, width), nn.ELU(), nn.Linear(width, out_size))
def forward(self, x, y):
input = torch.cat((x, y), dim=(- 1))
return self.layers(input) |
class DebertaPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def stats(matrix):
mean = np.mean(matrix)
std = np.std(matrix)
maxv = np.amax(matrix)
minv = np.amin(matrix)
median = np.median(matrix)
output = np.array([mean, std, maxv, minv, median])
return output |
class SubnetResNet(nn.Module):
def __init__(self, block, num_blocks, taskcla, nf, sparsity):
super(SubnetResNet, self).__init__()
self.in_planes = nf
self.conv1 = subnet_conv3x3(3, (nf * 1), 1, sparsity=sparsity)
if True:
self.bn1 = nn.BatchNorm2d((nf * 1), track_running_stats=False, affine=False)
else:
self.bn1 = nn.BatchNorm2d((nf * 1), track_running_stats=False)
self.layer1 = self._make_layer(block, (nf * 1), num_blocks[0], stride=1, sparsity=sparsity, name='layer1')
self.layer2 = self._make_layer(block, (nf * 2), num_blocks[1], stride=2, sparsity=sparsity, name='layer2')
self.layer3 = self._make_layer(block, (nf * 4), num_blocks[2], stride=2, sparsity=sparsity, name='layer3')
self.layer4 = self._make_layer(block, (nf * 8), num_blocks[3], stride=2, sparsity=sparsity, name='layer4')
self.taskcla = taskcla
self.last = torch.nn.ModuleList()
for (t, n) in self.taskcla:
self.last.append(nn.Linear((((nf * 8) * block.expansion) * 4), n, bias=False))
self.act = OrderedDict()
self.none_masks = {}
for (name, module) in self.named_modules():
if (isinstance(module, SubnetLinear) or isinstance(module, SubnetConv2d)):
self.none_masks[(name + '.weight')] = None
self.none_masks[(name + '.bias')] = None
def _make_layer(self, block, planes, num_blocks, stride, sparsity, name):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
name_count = 0
for stride in strides:
new_name = ((name + '.') + str(name_count))
layers.append(block(self.in_planes, planes, stride, sparsity, new_name))
self.in_planes = (planes * block.expansion)
name_count += 1
return mySequential(*layers)
def forward(self, x, task_id, mask, mode='train', epoch=1):
if (mask is None):
mask = self.none_masks
bsz = x.size(0)
x = x.reshape(bsz, 3, 32, 32)
out = relu(self.bn1(self.conv1(x, weight_mask=mask['conv1.weight'], bias_mask=mask['conv1.bias'], mode=mode)))
out = self.layer1(out, mask, mode, epoch)
out = self.layer2(out, mask, mode, epoch)
out = self.layer3(out, mask, mode, epoch)
out = self.layer4(out, mask, mode, epoch)
out = avg_pool2d(out, 2)
out = out.view(out.size(0), (- 1))
y = self.last[task_id](out)
return y
def get_masks(self, task_id):
task_mask = {}
for (name, module) in self.named_modules():
if ('last' in name):
if (name != ('last.' + str(task_id))):
continue
if (isinstance(module, SubnetLinear) or isinstance(module, SubnetConv2d)):
print(name)
task_mask[(name + '.weight')] = (module.weight_mask.detach().clone() > 0).type(torch.uint8)
if (getattr(module, 'bias') is not None):
task_mask[(name + '.bias')] = (module.bias_mask.detach().clone() > 0).type(torch.uint8)
else:
task_mask[(name + '.bias')] = None
return task_mask |
def render_header(image: np.ndarray, header: str, **kwargs):
requires_backends(render_header, 'vision')
image = to_pil_image(image)
header_image = render_text(header, **kwargs)
new_width = max(header_image.width, image.width)
new_height = int((image.height * (new_width / image.width)))
new_header_height = int((header_image.height * (new_width / header_image.width)))
new_image = Image.new('RGB', (new_width, (new_height + new_header_height)), 'white')
new_image.paste(header_image.resize((new_width, new_header_height)), (0, 0))
new_image.paste(image.resize((new_width, new_height)), (0, new_header_height))
new_image = to_numpy_array(new_image)
if (infer_channel_dimension_format(new_image) == ChannelDimension.LAST):
new_image = to_channel_dimension_format(new_image, ChannelDimension.LAST)
return new_image |
class CorotatingRotationWrapperPotential(parentWrapperPotential):
def __init__(self, amp=1.0, pot=None, vpo=1.0, beta=0.0, to=0.0, pa=0.0, ro=None, vo=None):
vpo = conversion.parse_velocity(vpo, vo=self._vo)
to = conversion.parse_time(to, ro=self._ro, vo=self._vo)
pa = conversion.parse_angle(pa)
self._vpo = vpo
self._beta = beta
self._pa = pa
self._to = to
self.hasC = True
self.hasC_dxdv = True
def _wrap(self, attribute, *args, **kwargs):
kwargs['phi'] = ((kwargs.get('phi', 0.0) - ((self._vpo * (args[0] ** (self._beta - 1.0))) * (kwargs.get('t', 0.0) - self._to))) - self._pa)
return self._wrap_pot_func(attribute)(self._pot, *args, **kwargs)
def _Rforce(self, *args, **kwargs):
kwargs['phi'] = ((kwargs.get('phi', 0.0) - ((self._vpo * (args[0] ** (self._beta - 1.0))) * (kwargs.get('t', 0.0) - self._to))) - self._pa)
return (self._wrap_pot_func('_Rforce')(self._pot, *args, **kwargs) - (self._wrap_pot_func('_phitorque')(self._pot, *args, **kwargs) * (((self._vpo * (self._beta - 1.0)) * (args[0] ** (self._beta - 2.0))) * (kwargs.get('t', 0.0) - self._to))))
def _R2deriv(self, *args, **kwargs):
kwargs['phi'] = ((kwargs.get('phi', 0.0) - ((self._vpo * (args[0] ** (self._beta - 1.0))) * (kwargs.get('t', 0.0) - self._to))) - self._pa)
phiRderiv = ((((- self._vpo) * (self._beta - 1.0)) * (args[0] ** (self._beta - 2.0))) * (kwargs.get('t', 0.0) - self._to))
return (((self._wrap_pot_func('_R2deriv')(self._pot, *args, **kwargs) + ((2.0 * self._wrap_pot_func('_Rphideriv')(self._pot, *args, **kwargs)) * phiRderiv)) + (self._wrap_pot_func('_phi2deriv')(self._pot, *args, **kwargs) * (phiRderiv ** 2.0))) + (self._wrap_pot_func('_phitorque')(self._pot, *args, **kwargs) * ((((self._vpo * (self._beta - 1.0)) * (self._beta - 2.0)) * (args[0] ** (self._beta - 3.0))) * (kwargs.get('t', 0.0) - self._to))))
def _Rphideriv(self, *args, **kwargs):
kwargs['phi'] = ((kwargs.get('phi', 0.0) - ((self._vpo * (args[0] ** (self._beta - 1.0))) * (kwargs.get('t', 0.0) - self._to))) - self._pa)
return (self._wrap_pot_func('_Rphideriv')(self._pot, *args, **kwargs) - ((((self._wrap_pot_func('_phi2deriv')(self._pot, *args, **kwargs) * self._vpo) * (self._beta - 1.0)) * (args[0] ** (self._beta - 2.0))) * (kwargs.get('t', 0.0) - self._to))) |
class MobileBertPreTrainedModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class HorovodWorker():
def ip_addr(self):
import ray
return ray._private.services.get_node_ip_address()
def set_gloo_iface(self):
ip_addr = self.ip_addr()
import psutil
import socket
iface_name = None
for (intf, intf_addresses) in psutil.net_if_addrs().items():
for addr in intf_addresses:
if ((addr.family == socket.AF_INET) and (addr.address == ip_addr)):
iface_name = intf
invalidInputError((iface_name is not None), 'Cannot find network interface with ip {}'.format(ip_addr))
os.environ['HOROVOD_GLOO_IFACE'] = iface_name
return iface_name
def run(self, env, func):
import os
os.environ.update(env)
return func() |
def test_class_member_mutation_does_not_affect_instance_members():
run_cell('\n class Foo:\n shared = 99\n def __init__(self):\n self.x = 42\n ')
run_cell('foo = Foo()')
run_cell('Foo.shared = 12')
run_cell('logging.info(foo.x)')
assert_not_detected() |
def get_batch(source, i, args, seq_len=None, evaluation=False):
seq_len = min((seq_len if seq_len else args.bptt), ((len(source) - 1) - i))
data = source[i:(i + seq_len)]
target = source[(i + 1):((i + 1) + seq_len)].view((- 1))
return (data, target) |
def get_score_from_pos(pos_score_dict, prefix_len, hypo_dict, bpe_symbol, hypo_frac, backwards):
score_dict = {}
num_bpe_tokens_dict = {}
assert ((prefix_len is None) or (hypo_frac is None))
for key in pos_score_dict:
score_dict[key] = []
num_bpe_tokens_dict[key] = []
for i in range(len(pos_score_dict[key])):
if ((prefix_len is not None) and (not backwards)):
num_bpe_tokens = get_num_bpe_tokens_from_len(hypo_dict[key][i], bpe_symbol, prefix_len)
score_dict[key].append(sum(pos_score_dict[key][i][:num_bpe_tokens]))
num_bpe_tokens_dict[key].append(num_bpe_tokens)
elif (hypo_frac is not None):
(num_words, shortened, hypo_prefix_len) = calc_length_from_frac(hypo_dict[key][i], hypo_frac, bpe_symbol)
score_dict[key].append(sum(pos_score_dict[key][i][:hypo_prefix_len]))
num_bpe_tokens_dict[key].append(hypo_prefix_len)
else:
score_dict[key].append(sum(pos_score_dict[key][i]))
num_bpe_tokens_dict[key].append(len(pos_score_dict[key][i]))
return (score_dict, num_bpe_tokens_dict) |
def differentiable_round(z, training=True):
if training:
z_rounded = tf.round(z)
return roundNoGradient((z_rounded + ((z - z_rounded) ** 3)))
else:
return tf.round(z) |
def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format: bool):
if (not is_torch_available()):
raise Exception('Cannot convert because PyTorch is not installed. Please install torch first.')
import torch
from torch.onnx import export
print(f'Using framework PyTorch: {torch.__version__}')
with torch.no_grad():
(input_names, output_names, dynamic_axes, tokens) = infer_shapes(nlp, 'pt')
(ordered_input_names, model_args) = ensure_valid_input(nlp.model, tokens, input_names)
if (parse(torch.__version__) <= parse('1.10.99')):
export(nlp.model, model_args, f=output.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, use_external_data_format=use_external_format, enable_onnx_checker=True, opset_version=opset)
else:
export(nlp.model, model_args, f=output.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, opset_version=opset) |
def process_all():
build_new_table(config['path']['ATOMIC_Chinese'])
head = pd.read_csv(config['path']['head_phrase'])
trg = set(head['head_translated'])
extract = Extract()
data = json.load(open(config['path']['Cconv_matched'], 'r', encoding='utf8'))
content = []
for dialog in data['data']:
content.extend(dialog['content'])
all_entity_list = extract(content)
i = 0
for dialog in data['data']:
dialog['entity_extract'] = all_entity_list[i:(i + len(dialog['content']))]
i += len(dialog['content'])
assert (i == len(content))
all_entity_list = [append_tail(match(entity_list, trg)) for entity_list in all_entity_list]
i = 0
assert (len(all_entity_list) == len(content))
for dialog in data['data']:
dialog['entity_matched_result'] = all_entity_list[i:(i + len(dialog['content']))]
i += len(dialog['content'])
assert (i == len(content))
with open(config['path']['Cconv_matched'], 'w', encoding='utf8') as f:
f.write(json.dumps(data, ensure_ascii=False)) |
_tokenizers
class PreTrainedTokenizationFastTest(TokenizerTesterMixin, unittest.TestCase):
rust_tokenizer_class = PreTrainedTokenizerFast
test_slow_tokenizer = False
test_rust_tokenizer = True
from_pretrained_vocab_key = 'tokenizer_file'
def setUp(self):
self.test_rust_tokenizer = False
super().setUp()
self.test_rust_tokenizer = True
model_paths = ['robot-test/dummy-tokenizer-fast', 'robot-test/dummy-tokenizer-wordlevel']
self.bytelevel_bpe_model_name = 'SaulLu/dummy-tokenizer-bytelevel-bpe'
self.tokenizers_list = [(PreTrainedTokenizerFast, model_path, {}) for model_path in model_paths]
tokenizer = PreTrainedTokenizerFast.from_pretrained(model_paths[0])
tokenizer.save_pretrained(self.tmpdirname)
def test_tokenizer_mismatch_warning(self):
pass
def test_pretrained_model_lists(self):
pass
def test_prepare_for_model(self):
pass
def test_rust_tokenizer_signature(self):
pass
def test_training_new_tokenizer(self):
tmpdirname_orig = self.tmpdirname
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
try:
self.tmpdirname = tempfile.mkdtemp()
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer.save_pretrained(self.tmpdirname)
super().test_training_new_tokenizer()
finally:
shutil.rmtree(self.tmpdirname)
self.tmpdirname = tmpdirname_orig
def test_training_new_tokenizer_with_special_tokens_change(self):
tmpdirname_orig = self.tmpdirname
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
try:
self.tmpdirname = tempfile.mkdtemp()
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer.save_pretrained(self.tmpdirname)
super().test_training_new_tokenizer_with_special_tokens_change()
finally:
shutil.rmtree(self.tmpdirname)
self.tmpdirname = tmpdirname_orig
def test_training_new_tokenizer_with_bytelevel(self):
tokenizer = self.rust_tokenizer_class.from_pretrained(self.bytelevel_bpe_model_name)
toy_text_iterator = ('a' for _ in range(1000))
new_tokenizer = tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50)
encoding_ids = new_tokenizer.encode('a')
self.assertEqual(encoding_ids, [64, 172, 253, 97, 245]) |
_module()
class CrossKDSingleStageDetector(SingleStageDetector):
def __init__(self, backbone: ConfigType, neck: ConfigType, bbox_head: ConfigType, teacher_config: Union[(ConfigType, str, Path)], teacher_ckpt: Optional[str]=None, kd_cfg: OptConfigType=None, train_cfg: OptConfigType=None, test_cfg: OptConfigType=None, data_preprocessor: OptConfigType=None) -> None:
super().__init__(backbone=backbone, neck=neck, bbox_head=bbox_head, train_cfg=train_cfg, test_cfg=test_cfg, data_preprocessor=data_preprocessor)
if isinstance(teacher_config, (str, Path)):
teacher_config = Config.fromfile(teacher_config)
self.teacher = MODELS.build(teacher_config['model'])
if (teacher_ckpt is not None):
load_checkpoint(self.teacher, teacher_ckpt, map_location='cpu')
self.freeze(self.teacher)
self.loss_cls_kd = MODELS.build(kd_cfg['loss_cls_kd'])
self.loss_reg_kd = MODELS.build(kd_cfg['loss_reg_kd'])
self.with_feat_distill = False
if kd_cfg.get('loss_feat_kd', None):
self.loss_feat_kd = MODELS.build(kd_cfg['loss_feat_kd'])
self.with_feat_distill = True
self.reused_teacher_head_idx = kd_cfg['reused_teacher_head_idx']
def freeze(model: nn.Module):
model.eval()
for param in model.parameters():
param.requires_grad = False
def cuda(self, device: Optional[str]=None) -> nn.Module:
self.teacher.cuda(device=device)
return super().cuda(device=device)
def to(self, device: Optional[str]=None) -> nn.Module:
self.teacher.to(device=device)
return super().to(device=device)
def train(self, mode: bool=True) -> None:
self.teacher.train(False)
super().train(mode)
def __setattr__(self, name: str, value: Any) -> None:
if (name == 'teacher'):
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value) |
def load_or_encode_corpus(model_args: ModelArguments, data_args: DataArguments, eval_args: EvalArguments):
out_index_path = os.path.join(data_args.out_corpus_dir, 'index')
out_corpus_ids_path = os.path.join(data_args.out_corpus_dir, 'corpus_ids.npy')
if (os.path.exists(out_index_path) and os.path.exists(out_corpus_ids_path)):
index = faiss.read_index(out_index_path)
corpus_ids = np.load(out_corpus_ids_path)
logger.info('Load pre-computed corpus representations')
else:
doc_tokenizer = TCTTokenizerFast.from_pretrained(model_args.doc_encoder_path)
doc_encoder = tct_repconc_from_pretrained(model_args.doc_encoder_path, False, None, None)
if (data_args.data_format == 'msmarco'):
corpus = load_corpus(data_args.corpus_path, doc_tokenizer.sep_token, verbose=is_main_process(eval_args.local_rank))
elif (data_args.data_format == 'beir'):
corpus = load_beir_corpus(data_args.corpus_path, doc_tokenizer.sep_token, verbose=is_main_process(eval_args.local_rank))
else:
raise NotImplementedError()
(index, corpus_ids) = encode_corpus(corpus, doc_encoder, doc_tokenizer, model_args.max_seq_length, eval_args)
if is_main_process(eval_args.local_rank):
os.makedirs(data_args.out_corpus_dir, exist_ok=True)
faiss.write_index(index, out_index_path)
np.save(out_corpus_ids_path, corpus_ids)
return (index, corpus_ids) |
def test_caller(path, step_ind, on_val):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
config = Config()
config.load(path)
import pdb
pdb.set_trace()
config.first_subsampling_dl = 0.05
config.dataset = 'ETH'
config.KP_extent = 2
print()
print('Dataset Preparation')
print('')
dataset = ETHDataset(1, load_test=True)
dataset.init_test_input_pipeline(config)
print('Creating Model')
print('\n')
t1 = time.time()
model = KernelPointFCNN(dataset.flat_inputs, config)
snap_path = os.path.join(path, 'snapshots')
snap_steps = [int(f[:(- 5)].split('-')[(- 1)]) for f in os.listdir(snap_path) if (f[(- 5):] == '.meta')]
chosen_step = np.sort(snap_steps)[step_ind]
chosen_snap = os.path.join(path, 'snapshots', 'snap-{:d}'.format(chosen_step))
tester = ModelTester(model, restore_snap=chosen_snap)
t2 = time.time()
print('\n')
print('Done in {:.1f} s'.format((t2 - t1)))
print('\n')
print('Start Test')
print('\n')
tester.generate_descriptor(model, dataset) |
class FCBlockVGG(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim=10):
super(FCBlockVGG, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dims[0])
self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1])
self.fc3 = nn.Linear(hidden_dims[1], output_dim)
def forward(self, x):
x = F.dropout(x)
x = F.relu(self.fc1(x))
x = F.dropout(x)
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x |
def test_digits_greedi_ln_sparse():
model1 = FeatureBasedSelection(100, 'sqrt')
model2 = FeatureBasedSelection(100, 'log')
model = MixtureSelection(100, [model1, model2], [1.0, 0.3], optimizer='greedi', optimizer_kwds={'optimizer1': 'lazy', 'optimizer2': 'naive'}, random_state=0)
model.fit(X_digits_sparse)
assert_array_equal(model.ranking[:85], digits_greedi_ranking[:85])
assert_array_almost_equal(model.gains[:85], digits_greedi_gains[:85], 4)
assert_array_almost_equal(model.subset, X_digits_sparse[model.ranking].toarray()) |
def t5_tokenize(texts: List[str], name=DEFAULT_T5_NAME):
(t5, tokenizer) = get_model_and_tokenizer(name)
if torch.cuda.is_available():
t5 = t5.cuda()
device = next(t5.parameters()).device
encoded = tokenizer.batch_encode_plus(texts, return_tensors='pt', padding='longest', max_length=MAX_LENGTH, truncation=True)
input_ids = encoded.input_ids.to(device)
attn_mask = encoded.attention_mask.to(device)
return (input_ids, attn_mask) |
def assert_allclose(actual: Dict[(str, np.ndarray)], desired: Dict[(str, np.ndarray)], actual_name: str, oracle_name: str, equal_nan=False, rtol=0.01, atol=0.001):
akeys = set(actual.keys())
dkeys = set(desired.keys())
if (akeys != dkeys):
raise KeyError(f'{actual_name}: {akeys} != {oracle_name}: {dkeys}')
for key in akeys:
lhs = actual[key]
rhs = desired[key]
if (not isinstance(lhs, np.ndarray)):
raise TypeError(f'{actual_name}[{key}] is not np.ndarray but {type(lhs)}')
if (not isinstance(rhs, np.ndarray)):
raise TypeError(f'{oracle_name}[{key}] is not np.ndarray but {type(rhs)}')
testing.assert_allclose(lhs, rhs, equal_nan=equal_nan, rtol=rtol, atol=atol, err_msg=f'{actual_name} != {oracle_name} at {key}') |
class ShuffleNetV2(Backbone):
def __init__(self, stages_repeats, stages_out_channels, **kwargs):
super().__init__()
if (len(stages_repeats) != 3):
raise ValueError('expected stages_repeats as list of 3 positive ints')
if (len(stages_out_channels) != 5):
raise ValueError('expected stages_out_channels as list of 5 positive ints')
self._stage_out_channels = stages_out_channels
input_channels = 3
output_channels = self._stage_out_channels[0]
self.conv1 = nn.Sequential(nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False), nn.BatchNorm2d(output_channels), nn.ReLU(inplace=True))
input_channels = output_channels
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
stage_names = ['stage{}'.format(i) for i in [2, 3, 4]]
for (name, repeats, output_channels) in zip(stage_names, stages_repeats, self._stage_out_channels[1:]):
seq = [InvertedResidual(input_channels, output_channels, 2)]
for i in range((repeats - 1)):
seq.append(InvertedResidual(output_channels, output_channels, 1))
setattr(self, name, nn.Sequential(*seq))
input_channels = output_channels
output_channels = self._stage_out_channels[(- 1)]
self.conv5 = nn.Sequential(nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False), nn.BatchNorm2d(output_channels), nn.ReLU(inplace=True))
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self._out_features = output_channels
def featuremaps(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.conv5(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
return v.view(v.size(0), (- 1)) |
def _load_pretrain_emb(data_loader, en_batch_dev=None, en_batch_test=None):
if ((args.embedding_source == 'elmo_1') or (args.embedding_source == 'elmo_2') or (args.embedding_source == 'elmo_0')):
ext_dim = 1024
args.embedding_dim = ext_dim
args.hidden_dim = ((ext_dim + args.tag_dim) // 2)
elif (args.embedding_source[:4] == 'BERT'):
ext_dim = 768
args.embedding_dim = ext_dim
args.hidden_dim = ((ext_dim + args.tag_dim) // 2)
else:
print('embedding name is not available, double check.')
pre_compute_dict = {}
if (args.lang != 'en'):
embed_loader = Foreign_Elmo(args.elmo_model_path, args.embedding_source, device=args.device)
(elmo_embeds_train, non_context_embeds_train) = embed_loader._get_embeddings(data_loader.en_batch)
pre_compute_dict['elmo_embeds_train'] = elmo_embeds_train
pre_compute_dict['non_context_embeds_train'] = non_context_embeds_train
if (en_batch_dev is not None):
(elmo_embeds_dev, non_context_embeds_dev) = embed_loader._get_embeddings(en_batch_dev)
pre_compute_dict['elmo_embeds_dev'] = elmo_embeds_dev
pre_compute_dict['non_context_embeds_dev'] = non_context_embeds_dev
if (en_batch_test is not None):
(elmo_embeds_test, non_context_embeds_test) = embed_loader._get_embeddings(en_batch_test)
pre_compute_dict['elmo_embeds_test'] = elmo_embeds_test
pre_compute_dict['non_context_embeds_test'] = non_context_embeds_test
else:
embed_loader = Embedding_Weight(args.embedding_source, data_loader=data_loader, num_sent=args.epoch_sent, device=args.device)
(elmo_embeds_train, non_context_embeds_train) = load_elmo_batch(data_loader, args, embed_loader, mod='train', processed_sent_dev=None, processed_sent_test=None)
pre_compute_dict['elmo_embeds_train'] = elmo_embeds_train
pre_compute_dict['non_context_embeds_train'] = non_context_embeds_train
if (en_batch_dev is not None):
(elmo_embeds_dev, non_context_embeds_dev) = load_elmo_batch(None, args, embed_loader, mod='dev', processed_sent_dev=en_batch_dev, processed_sent_test=None)
pre_compute_dict['elmo_embeds_dev'] = elmo_embeds_dev
pre_compute_dict['non_context_embeds_dev'] = non_context_embeds_dev
if (en_batch_test is not None):
(elmo_embeds_test, non_context_embeds_test) = load_elmo_batch(None, args, embed_loader, mod='test', processed_sent_dev=None, processed_sent_test=en_batch_test)
pre_compute_dict['elmo_embeds_test'] = elmo_embeds_test
pre_compute_dict['non_context_embeds_test'] = non_context_embeds_test
return (pre_compute_dict, embed_loader) |
class ImageNet100(ImageFolder):
def __init__(self, root, train=True, transform=None, target_transform=None, download=False):
self.parent_dir = root
if download:
self.download()
if (not self._check_exists()):
raise FileNotFoundError('Dataset does not exist.')
data_folder = (self.train_folder if train else self.val_folder)
super(ImageNet100, self).__init__(data_folder, transform, target_transform)
def download(self):
if self._check_exists():
print('Data already downloaded and processed.')
return
if (not os.path.isdir(self.root)):
os.mkdir(self.root)
raise NotImplementedError('Download not supported')
def _check_exists(self):
return (os.path.exists(self.train_folder) and os.path.exists(self.val_folder))
def train_folder(self):
return os.path.join(self.parent_dir, 'train')
def val_folder(self):
return os.path.join(self.parent_dir, 'val') |
class WarmupMultiStepSchedule(LambdaLR):
def __init__(self, optimizer, warmup_steps, decay_steps, decay_ratio=0.1, last_epoch=(- 1)):
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
self.decay_ratio = decay_ratio
super(WarmupMultiStepSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if (step < self.warmup_steps):
return (float(step) / float(max(1.0, self.warmup_steps)))
ratio = 1.0
for decay_step in self.decay_steps:
if (step > decay_step):
ratio *= self.decay_ratio
return ratio |
def test_digits_cosine_two_stage_object():
model = SumRedundancySelection(100, 'cosine', optimizer=TwoStageGreedy())
model.fit(X_digits)
assert_array_equal(model.ranking, digits_cosine_ranking)
assert_array_almost_equal(model.gains, digits_cosine_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
_end_docstrings(PIPELINE_INIT_ARGS)
class AudioClassificationPipeline(Pipeline):
def __init__(self, *args, **kwargs):
kwargs['top_k'] = 5
super().__init__(*args, **kwargs)
if (self.framework != 'pt'):
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
self.check_model_type(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING)
def __call__(self, inputs: Union[(np.ndarray, bytes, str)], **kwargs):
return super().__call__(inputs, **kwargs)
def _sanitize_parameters(self, top_k=None, **kwargs):
postprocess_params = {}
if (top_k is not None):
if (top_k > self.model.config.num_labels):
top_k = self.model.config.num_labels
postprocess_params['top_k'] = top_k
return ({}, {}, postprocess_params)
def preprocess(self, inputs):
if isinstance(inputs, str):
with open(inputs, 'rb') as f:
inputs = f.read()
if isinstance(inputs, bytes):
inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate)
if (not isinstance(inputs, np.ndarray)):
raise ValueError('We expect a numpy ndarray as input')
if (len(inputs.shape) != 1):
raise ValueError('We expect a single channel audio input for AutomaticSpeechRecognitionPipeline')
processed = self.feature_extractor(inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors='pt')
return processed
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, top_k=5):
probs = model_outputs.logits[0].softmax((- 1))
(scores, ids) = probs.topk(top_k)
scores = scores.tolist()
ids = ids.tolist()
labels = [{'score': score, 'label': self.model.config.id2label[_id]} for (score, _id) in zip(scores, ids)]
return labels |
def read_in_articles(article_ids=None):
anno_df = pd.read_csv(anno_csv_path)
unique_article_ids = anno_df[STUDY_ID_COL].unique()
articles = []
for article_id in unique_article_ids:
if ((article_ids is None) or (article_id in article_ids)):
articles.append(get_article(article_id))
return articles |
class SummarizationModule(BaseTransformer):
mode = 'summarization'
loss_names = ['loss']
metric_names = ROUGE_KEYS
default_val_metric = 'rouge2'
def __init__(self, hparams, **kwargs):
if (hparams.sortish_sampler and (hparams.gpus > 1)):
hparams.replace_sampler_ddp = False
elif (hparams.max_tokens_per_batch is not None):
if (hparams.gpus > 1):
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training')
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously')
super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs)
use_task_specific_params(self.model, 'summarization')
save_git_info(self.hparams.output_dir)
self.metrics_save_path = (Path(self.output_dir) / 'metrics.json')
self.hparams_save_path = (Path(self.output_dir) / 'hparams.pkl')
pickle_save(self.hparams, self.hparams_save_path)
self.step_count = 0
self.metrics = defaultdict(list)
self.model_type = self.config.model_type
self.vocab_size = (self.config.tgt_vocab_size if (self.model_type == 'fsmt') else self.config.vocab_size)
self.dataset_kwargs: dict = dict(data_dir=self.hparams.data_dir, max_source_length=self.hparams.max_source_length, prefix=(self.model.config.prefix or ''))
n_observations_per_split = {'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test}
self.n_obs = {k: (v if (v >= 0) else None) for (k, v) in n_observations_per_split.items()}
self.target_lens = {'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length}
assert (self.target_lens['train'] <= self.target_lens['val']), f'target_lens: {self.target_lens}'
assert (self.target_lens['train'] <= self.target_lens['test']), f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
self.hparams.git_sha = get_git_info()['repo_sha']
self.num_workers = hparams.num_workers
self.decoder_start_token_id = None
if ((self.model.config.decoder_start_token_id is None) and isinstance(self.tokenizer, MBartTokenizer)):
self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
self.model.config.decoder_start_token_id = self.decoder_start_token_id
self.dataset_class = (Seq2SeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch') else LegacySeq2SeqDataset)
self.already_saved_batch = False
self.eval_beams = (self.model.config.num_beams if (self.hparams.eval_beams is None) else self.hparams.eval_beams)
if (self.hparams.eval_max_gen_length is not None):
self.eval_max_length = self.hparams.eval_max_gen_length
else:
self.eval_max_length = self.model.config.max_length
self.val_metric = (self.default_val_metric if (self.hparams.val_metric is None) else self.hparams.val_metric)
def save_readable_batch(self, batch: Dict[(str, torch.Tensor)]) -> Dict[(str, List[str])]:
readable_batch = {k: (self.tokenizer.batch_decode(v.tolist()) if ('mask' not in k) else v.shape) for (k, v) in batch.items()}
save_json(readable_batch, (Path(self.output_dir) / 'text_batch.json'))
save_json({k: v.tolist() for (k, v) in batch.items()}, (Path(self.output_dir) / 'tok_batch.json'))
self.already_saved_batch = True
return readable_batch
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)
def ids_to_clean_text(self, generated_ids: List[int]):
gen_text = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
return lmap(str.strip, gen_text)
def _step(self, batch: dict) -> Tuple:
pad_token_id = self.tokenizer.pad_token_id
(src_ids, src_mask) = (batch['input_ids'], batch['attention_mask'])
tgt_ids = batch['labels']
if isinstance(self.model, T5ForConditionalGeneration):
decoder_input_ids = self.model._shift_right(tgt_ids)
else:
decoder_input_ids = shift_tokens_right(tgt_ids, pad_token_id)
if (not self.already_saved_batch):
batch['decoder_input_ids'] = decoder_input_ids
self.save_readable_batch(batch)
outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False)
lm_logits = outputs['logits']
if (self.hparams.label_smoothing == 0):
ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id)
assert (lm_logits.shape[(- 1)] == self.vocab_size)
loss = ce_loss_fct(lm_logits.view((- 1), lm_logits.shape[(- 1)]), tgt_ids.view((- 1)))
else:
lprobs = torch.nn.functional.log_softmax(lm_logits, dim=(- 1))
(loss, nll_loss) = label_smoothed_nll_loss(lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id)
return (loss,)
def pad(self) -> int:
return self.tokenizer.pad_token_id
def training_step(self, batch, batch_idx) -> Dict:
loss_tensors = self._step(batch)
logs = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
logs['tpb'] = (batch['input_ids'].ne(self.pad).sum() + batch['labels'].ne(self.pad).sum())
logs['bs'] = batch['input_ids'].shape[0]
logs['src_pad_tok'] = batch['input_ids'].eq(self.pad).sum()
logs['src_pad_frac'] = batch['input_ids'].eq(self.pad).float().mean()
return {'loss': loss_tensors[0], 'log': logs}
def validation_step(self, batch, batch_idx) -> Dict:
return self._generative_step(batch)
def validation_epoch_end(self, outputs, prefix='val') -> Dict:
self.step_count += 1
losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
loss = losses['loss']
generative_metrics = {k: np.array([x[k] for x in outputs]).mean() for k in (self.metric_names + ['gen_time', 'gen_len'])}
metric_val = (generative_metrics[self.val_metric] if (self.val_metric in generative_metrics) else losses[self.val_metric])
metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss)
generative_metrics.update({k: v.item() for (k, v) in losses.items()})
losses.update(generative_metrics)
all_metrics = {f'{prefix}_avg_{k}': x for (k, x) in losses.items()}
all_metrics['step_count'] = self.step_count
self.metrics[prefix].append(all_metrics)
preds = flatten_list([x['preds'] for x in outputs])
return {'log': all_metrics, 'preds': preds, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor}
def calc_generative_metrics(self, preds, target) -> Dict:
return calculate_rouge(preds, target)
def _generative_step(self, batch: dict) -> dict:
t0 = time.time()
generated_ids = self.model.generate(batch['input_ids'], attention_mask=batch['attention_mask'], use_cache=True, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, max_length=self.eval_max_length)
gen_time = ((time.time() - t0) / batch['input_ids'].shape[0])
preds: List[str] = self.ids_to_clean_text(generated_ids)
target: List[str] = self.ids_to_clean_text(batch['labels'])
loss_tensors = self._step(batch)
base_metrics = {name: loss for (name, loss) in zip(self.loss_names, loss_tensors)}
rouge: Dict = self.calc_generative_metrics(preds, target)
summ_len = np.mean(lmap(len, generated_ids))
base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge)
return base_metrics
def test_step(self, batch, batch_idx):
return self._generative_step(batch)
def test_epoch_end(self, outputs):
return self.validation_epoch_end(outputs, prefix='test')
def get_dataset(self, type_path) -> Seq2SeqDataset:
n_obs = self.n_obs[type_path]
max_target_length = self.target_lens[type_path]
dataset = self.dataset_class(self.tokenizer, type_path=type_path, n_obs=n_obs, max_target_length=max_target_length, **self.dataset_kwargs)
return dataset
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool=False) -> DataLoader:
dataset = self.get_dataset(type_path)
if (self.hparams.sortish_sampler and (type_path != 'test') and (type_path != 'val')):
sampler = dataset.make_sortish_sampler(batch_size, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=False, num_workers=self.num_workers, sampler=sampler)
elif ((self.hparams.max_tokens_per_batch is not None) and (type_path != 'test') and (type_path != 'val')):
batch_sampler = dataset.make_dynamic_sampler(self.hparams.max_tokens_per_batch, distributed=(self.hparams.gpus > 1))
return DataLoader(dataset, batch_sampler=batch_sampler, collate_fn=dataset.collate_fn, num_workers=self.num_workers)
else:
return DataLoader(dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=shuffle, num_workers=self.num_workers, sampler=None)
def train_dataloader(self) -> DataLoader:
dataloader = self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=True)
return dataloader
def val_dataloader(self) -> DataLoader:
return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size)
def test_dataloader(self) -> DataLoader:
return self.get_dataloader('test', batch_size=self.hparams.eval_batch_size)
def add_model_specific_args(parser, root_dir):
BaseTransformer.add_model_specific_args(parser, root_dir)
add_generic_args(parser, root_dir)
parser.add_argument('--max_source_length', default=1024, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--max_target_length', default=56, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--val_max_target_length', default=142, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--test_max_target_length', default=142, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--freeze_encoder', action='store_true')
parser.add_argument('--freeze_embeds', action='store_true')
parser.add_argument('--sortish_sampler', action='store_true', default=False)
parser.add_argument('--overwrite_output_dir', action='store_true', default=False)
parser.add_argument('--max_tokens_per_batch', type=int, default=None)
parser.add_argument('--logger_name', type=str, choices=['default', 'wandb', 'wandb_shared'], default='default')
parser.add_argument('--n_train', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_val', type=int, default=500, required=False, help='# examples. -1 means use all.')
parser.add_argument('--n_test', type=int, default=(- 1), required=False, help='# examples. -1 means use all.')
parser.add_argument('--task', type=str, default='summarization', required=False, help='# examples. -1 means use all.')
parser.add_argument('--label_smoothing', type=float, default=0.0, required=False)
parser.add_argument('--src_lang', type=str, default='', required=False)
parser.add_argument('--tgt_lang', type=str, default='', required=False)
parser.add_argument('--eval_beams', type=int, default=None, required=False)
parser.add_argument('--val_metric', type=str, default=None, required=False, choices=['bleu', 'rouge2', 'loss', None])
parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
parser.add_argument('--save_top_k', type=int, default=1, required=False, help='How many checkpoints to save')
parser.add_argument('--early_stopping_patience', type=int, default=(- 1), required=False, help='-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.')
return parser |
class ChainExample(torch.utils.data.Dataset):
def __init__(self, egs_file, output_file=None):
if (output_file and egs_file.startswith('scp:')):
raise ValueError('need egs_file to start to be of type scp when using output_file')
self.egs_file = egs_file
egs_list = [ln.strip().split() for ln in open(egs_file)]
self.egs_dict = OrderedDict(egs_list)
self.egs_keys = [x for x in self.egs_dict]
if output_file:
self.output_file = output_file
self.lang_ids = dict([ln.strip().split() for ln in open(output_file)])
else:
self.output_file = None
def __len__(self):
return len(self.egs_dict)
def __getitem__(self, idx):
key = self.egs_keys[idx]
value = self.egs_dict[key]
if self.output_file:
if (key in self.lang_ids):
lang_id = self.lang_ids[key]
else:
lang_id = (- 1)
return (key, value, lang_id)
else:
return (key, value, lang_id) |
def translate_opts(parser):
group = parser.add_argument_group('Model')
group.add('--model', '-model', dest='models', metavar='MODEL', nargs='+', type=str, default=[], required=True, help='Path to model .pt file(s). Multiple models can be specified, for ensemble decoding.')
group.add('--fp32', '-fp32', action='store_true', help='Force the model to be in FP32 because FP16 is very slow on GTX1080(ti).')
group.add('--avg_raw_probs', '-avg_raw_probs', action='store_true', help='If this is set, during ensembling scores from different models will be combined by averaging their raw probabilities and then taking the log. Otherwise, the log probabilities will be averaged directly. Necessary for models whose output layers can assign zero probability.')
group = parser.add_argument_group('Data')
group.add('--data_type', '-data_type', default='text', help='Type of the source input. Options: [text|img].')
group.add('--src', '-src', required=True, help='Source sequence to decode (one line per sequence)')
group.add('--history', '-history', required=True, help='Source sequence to decode (one line per sequence)')
group.add('--ans', '-ans', required=False, help='')
group.add('--src_dir', '-src_dir', default='', help='Source directory for image or audio files')
group.add('--tgt', '-tgt', help='True target sequence (optional)')
group.add('--shard_size', '-shard_size', type=int, default=0, help='Divide src and tgt (if applicable) into smaller multiple src and tgt files, then build shards, each shard will have opt.shard_size samples except last shard. shard_size=0 means no segmentation shard_size>0 means segment dataset into multiple shards, each shard has shard_size samples')
group.add('--output', '-output', default='pred.txt', help='Path to output the predictions (each line will be the decoded sequence')
group.add('--report_bleu', '-report_bleu', action='store_true', help='Report bleu score after translation, call tools/multi-bleu.perl on command line')
group.add('--report_rouge', '-report_rouge', action='store_true', help='Report rouge 1/2/3/L/SU4 score after translation call tools/test_rouge.py on command line')
group.add('--report_time', '-report_time', action='store_true', help='Report some translation time metrics')
group.add('--dynamic_dict', '-dynamic_dict', action='store_true', help='Create dynamic dictionaries')
group.add('--share_vocab', '-share_vocab', action='store_true', help='Share source and target vocabulary')
group = parser.add_argument_group('Random Sampling')
group.add('--random_sampling_topk', '-random_sampling_topk', default=1, type=int, help='Set this to -1 to do random sampling from full distribution. Set this to value k>1 to do random sampling restricted to the k most likely next tokens. Set this to 1 to use argmax or for doing beam search.')
group.add('--random_sampling_temp', '-random_sampling_temp', default=1.0, type=float, help='If doing random sampling, divide the logits by this before computing softmax during decoding.')
group.add('--seed', '-seed', type=int, default=829, help='Random seed')
group = parser.add_argument_group('Beam')
group.add('--beam_size', '-beam_size', type=int, default=5, help='Beam size')
group.add('--min_length', '-min_length', type=int, default=0, help='Minimum prediction length')
group.add('--max_length', '-max_length', type=int, default=100, help='Maximum prediction length.')
group.add('--max_sent_length', '-max_sent_length', action=DeprecateAction, help='Deprecated, use `-max_length` instead')
group.add('--stepwise_penalty', '-stepwise_penalty', action='store_true', help='Apply penalty at every decoding step. Helpful for summary penalty.')
group.add('--length_penalty', '-length_penalty', default='none', choices=['none', 'wu', 'avg'], help='Length Penalty to use.')
group.add('--coverage_penalty', '-coverage_penalty', default='none', choices=['none', 'wu', 'summary'], help='Coverage Penalty to use.')
group.add('--alpha', '-alpha', type=float, default=0.0, help='Google NMT length penalty parameter (higher = longer generation)')
group.add('--beta', '-beta', type=float, default=(- 0.0), help='Coverage penalty parameter')
group.add('--block_ngram_repeat', '-block_ngram_repeat', type=int, default=0, help='Block repetition of ngrams during decoding.')
group.add('--ignore_when_blocking', '-ignore_when_blocking', nargs='+', type=str, default=[], help='Ignore these strings when blocking repeats. You want to block sentence delimiters.')
group.add('--replace_unk', '-replace_unk', action='store_true', help='Replace the generated UNK tokens with the source token that had highest attention weight. If phrase_table is provided, it will lookup the identified source token and give the corresponding target token. If it is not provided(or the identified source token does not exist in the table) then it will copy the source token')
group = parser.add_argument_group('Logging')
group.add('--verbose', '-verbose', action='store_true', help='Print scores and predictions for each sentence')
group.add('--log_file', '-log_file', type=str, default='', help='Output logs to a file under this path.')
group.add('--log_file_level', '-log_file_level', type=str, action=StoreLoggingLevelAction, choices=StoreLoggingLevelAction.CHOICES, default='0')
group.add('--attn_debug', '-attn_debug', action='store_true', help='Print best attn for each word')
group.add('--dump_beam', '-dump_beam', type=str, default='', help='File to dump beam information to.')
group.add('--n_best', '-n_best', type=int, default=1, help='If verbose is set, will output the n_best decoded sentences')
group = parser.add_argument_group('Efficiency')
group.add('--batch_size', '-batch_size', type=int, default=32, help='Batch size')
group.add('--gpu', '-gpu', type=int, default=(- 1), help='Device to run on')
group = parser.add_argument_group('Speech')
group.add('--sample_rate', '-sample_rate', type=int, default=16000, help='Sample rate.')
group.add('--window_size', '-window_size', type=float, default=0.02, help='Window size for spectrogram in seconds')
group.add('--window_stride', '-window_stride', type=float, default=0.01, help='Window stride for spectrogram in seconds')
group.add('--window', '-window', default='hamming', help='Window type for spectrogram generation')
group.add('--image_channel_size', '-image_channel_size', type=int, default=3, choices=[3, 1], help='Using grayscale image can training model faster and smaller') |
class Explanation(S):
def _init_explanation(cls, instance, *args):
super(Explanation, instance).__init__()
instance.components = {}
instance._field_components_map = {}
for value in args:
if (value is not None):
instance.append(value)
def __init__(self, **kwargs):
self.__class__._init_explanation(self, *list(kwargs.values()))
def append(self, component):
if (not isinstance(component, Component)):
raise Exception(f"Can't append object of type {type(component)} to this object.")
self.components[type(component)] = component
for (field_name, field_value) in component.fields.items():
self.__setattr__(field_name, field_value)
self._field_components_map[field_name] = type(component)
return self
def __contains__(self, item):
return (item in self.components)
def __repr__(self):
record = self.components.copy()
fields = []
shape_str = f'shape: {self.shape}'
fields.append(shape_str)
fields.append(('-' * len(shape_str)))
for (record_key, record_val) in record.items():
for (field_name, field_val) in record_val.fields.items():
field_value = str(self.__getattr__(field_name))
if (field_name in self._dims):
field_value_str = f'Dim {field_name} = {field_value}'
else:
if (field_name in self._objects):
field_type = 'O'
field_dim = ','.join((str(x) for x in self._objects[field_name].dim))
else:
field_type = 'A'
field_dim = ','.join((str(x) for x in self._aliases[field_name].dim))
field_value_str = f'{field_type}{{{field_dim}}} {field_name} = {field_value}'
if (len(field_value_str) > 60):
field_value_str = (field_value_str[:57] + '...')
fields.append(field_value_str)
fields = '\n'.join(fields)
return fields
def from_json(cls, json_str):
from interpret.newapi.serialization import ExplanationJSONDecoder
d = json.loads(json_str, cls=ExplanationJSONDecoder)
instance = d['content']
return instance
def from_components(cls, components):
instance = cls.__new__(cls)
cls._init_explanation(instance, *components)
return instance
def to_json(self, **kwargs):
from interpret.newapi.serialization import ExplanationJSONEncoder
version = '0.0.1'
di = {'version': version, 'content': self}
return json.dumps(di, cls=ExplanationJSONEncoder, **kwargs) |
class CNN(aicnn.CNN):
def __init__(model, input_shape, nb_classes, n_dense=128, p_dropout=0.5, BN_flag=False, PretrainedModel=VGG16):
model.in_shape = input_shape
model.n_dense = n_dense
model.p_dropout = p_dropout
model.PretrainedModel = PretrainedModel
model.BN_flag = BN_flag
super().__init__(nb_classes)
def build_model(model):
nb_classes = model.nb_classes
input_shape = model.in_shape
PretrainedModel = model.PretrainedModel
base_model = PretrainedModel(weights='imagenet', include_top=False, input_shape=input_shape)
x = base_model.input
h = base_model.output
z_cl = h
h = model.topmodel(h)
z_fl = h
y = Dense(nb_classes, activation='softmax', name='preds')(h)
for layer in base_model.layers:
layer.trainable = False
model.cl_part = Model(x, z_cl)
model.fl_part = Model(x, z_fl)
model.x = x
model.y = y
def topmodel(model, h):
BN_flag = model.BN_flag
n_dense = model.n_dense
p_dropout = model.p_dropout
h = GlobalAveragePooling2D()(h)
h = Dense(n_dense, activation='relu')(h)
if BN_flag:
h = BatchNormalization()(h)
else:
h = Dropout(p_dropout)(h)
return h |
class MLPMixer(nn.Module):
def __init__(self, num_classes: int, image_size: int=256, channels: int=3, patch_size: int=32, num_layers: int=8, hidden_dim: int=512, tokens_hidden_dim: int=256, channels_hidden_dim: int=2048):
super().__init__()
num_patches = ((image_size // patch_size) ** 2)
self.embed = PatchEmbeddings(patch_size, hidden_dim, channels)
layers = [MixerBlock(num_patches=num_patches, num_channels=hidden_dim, tokens_hidden_dim=tokens_hidden_dim, channels_hidden_dim=channels_hidden_dim) for _ in range(num_layers)]
self.layers = nn.Sequential(*layers)
self.norm = nn.LayerNorm(hidden_dim)
self.pool = GlobalAveragePooling(dim=1)
self.classifier = Classifier(hidden_dim, num_classes)
def forward(self, x: torch.Tensor) -> torch.Tensor:
(b, c, h, w) = x.shape
x = self.embed(x)
x = self.layers(x)
x = self.norm(x)
x = self.pool(x)
x = self.classifier(x)
return x |
def setup(app):
app.add_config_value('recommonmark_config', {'url_resolver': (lambda url: (github_doc_root + url)), 'auto_toc_tree_section': 'Contents'}, True)
app.add_transform(AutoStructify) |
class TestFeatureCommon(ZooTestCase):
def setup_method(self, method):
sparkConf = init_spark_conf().setMaster('local[4]').setAppName('test feature set')
self.sc = init_nncontext(sparkConf)
def test_BigDL_adapter(self):
new_preprocessing = BigDLAdapter(Resize(1, 1))
assert isinstance(new_preprocessing, Preprocessing)
def test_relations(self):
resource_path = os.path.join(os.path.split(__file__)[0], '../resources')
path = os.path.join(resource_path, 'qa')
relations = Relations.read((path + '/relations.txt'))
assert isinstance(relations, list)
relations2 = Relations.read((path + '/relations.csv'), self.sc, 2)
assert isinstance(relations2, RDD)
relations3 = Relations.read_parquet((path + '/relations.parquet'), self.sc)
assert isinstance(relations3, RDD)
def test_train_FeatureSet(self):
batch_size = 8
epoch_num = 5
images = []
labels = []
for i in range(0, 8):
features = np.random.uniform(0, 1, (200, 200, 3))
label = np.array([2])
images.append(features)
labels.append(label)
image_frame = DistributedImageFrame(self.sc.parallelize(images), self.sc.parallelize(labels))
transformer = Pipeline([BytesToMat(), Resize(256, 256), CenterCrop(224, 224), ChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225), MatToTensor(), ImageFrameToSample(target_keys=['label'])])
data_set = FeatureSet.image_frame(image_frame).transform(transformer).to_dataset()
model = Sequential()
model.add(SpatialConvolution(3, 1, 5, 5))
model.add(View([((1 * 220) * 220)]))
model.add(Linear(((1 * 220) * 220), 20))
model.add(LogSoftMax())
optim_method = SGD(learningrate=0.01)
optimizer = Optimizer.create(model=model, training_set=data_set, criterion=ClassNLLCriterion(), optim_method=optim_method, end_trigger=MaxEpoch(epoch_num), batch_size=batch_size)
optimizer.set_validation(batch_size=batch_size, val_rdd=data_set, trigger=EveryEpoch(), val_method=[Top1Accuracy()])
trained_model = optimizer.optimize()
predict_result = trained_model.predict_image(image_frame.transform(transformer))
assert (predict_result.get_predict().count(), 8)
def create_feature_set_from_rdd(self):
dim = 2
data_len = 100
def gen_rand_sample():
features = np.random.uniform(0, 1, dim)
label = np.array(((2 * features).sum() + 0.4))
return Sample.from_ndarray(features, label)
FeatureSet.rdd(self.sc.parallelize(range(0, data_len)).map((lambda i: gen_rand_sample()))).to_dataset() |
def heuristic_target_entropy(action_space):
heuristic_target_entropy = (- np.prod(action_space.shape))
return heuristic_target_entropy |
def get_images(fire, size=[128, 128]):
transform = transforms.Compose([transforms.Resize((size[0], size[1]))])
normalize = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
img = Image.open(fire).convert('RGB')
img = transform(img)
img = normalize(img)
return img.unsqueeze(0) |
_registry(calib_method='percentile')
class PercentileCalibrator(CalibratorBase):
def __init__(self, num_bins=2048, percentile=99.999):
super(PercentileCalibrator, self).__init__()
self.collector = None
self.num_bins = num_bins
self.percentile = percentile
def collect_calib_data(self, datas):
if (not self.collector):
self.collector = HistogramCollector(self.num_bins)
self.collector.collect_data(datas)
self.compute_percentile_range(self.percentile)
def compute_percentile_range(self, percentile):
if ((percentile < 0) or (percentile > 100)):
raise ValueError('Invalid percentile. Must be in range 0 <= percentile <= 100.')
(calib_hist, calib_bin_edges, min_range, max_range, th) = self.collector.histogram
total = calib_hist.sum()
cdf = np.cumsum((calib_hist / total))
percent_to_cut_one_side = ((100.0 - percentile) / 200.0)
max_idx = np.searchsorted(cdf, (1.0 - percent_to_cut_one_side))
min_idx = np.searchsorted(cdf, percent_to_cut_one_side)
self._calib_min = calib_bin_edges[min_idx].astype('float32')
self._calib_max = calib_bin_edges[max_idx].astype('float32')
if (self._calib_min < min_range):
self._calib_min = min_range
if (self._calib_max > max_range):
self._calib_max = max_range
def clear(self):
self._calib_min = None
self._calib_max = None
self.collector = None
def method_name(self):
return 'percentile' |
class Appr(object):
def __init__(self, model, args, lr_min=0.0001, lr_factor=3, lr_patience=5, clipgrad=1000, lamb=0.01):
self.model = model
self.model_old = None
self.fisher = None
self.nepochs = args.nepochs
self.sbatch = args.sbatch
self.lr = args.lr
self.lr_min = lr_min
self.lr_factor = lr_factor
self.lr_patience = lr_patience
self.clipgrad = clipgrad
self.checkpoint = args.checkpoint
self.ce = torch.nn.CrossEntropyLoss()
self.optimizer = self._get_optimizer()
self.lamb = lamb
self.device = args.device
return
def _get_optimizer(self, lr=None):
if (lr is None):
lr = self.lr
return torch.optim.SGD(self.model.parameters(), lr=lr)
def train(self, t, trainloader, valloader):
best_loss = np.inf
best_model = deepcopy(self.model.state_dict())
lr = self.lr
patience = self.lr_patience
self.optimizer = self._get_optimizer(lr)
for e in range(self.nepochs):
self.train_epoch(t, trainloader)
(train_loss, train_acc) = self.eval(t, trainloader)
print('| Epoch {:3d}| Train: loss={:.3f}, acc={:5.1f}% |'.format((e + 1), train_loss, (100 * train_acc)), end='')
(valid_loss, valid_acc) = self.eval(t, valloader)
print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss, (100 * valid_acc)), end='')
if (valid_loss < best_loss):
best_loss = valid_loss
best_model = deepcopy(self.model.state_dict())
patience = self.lr_patience
print(' *', end='')
else:
patience -= 1
if (patience <= 0):
lr /= self.lr_factor
print(' lr={:.1e}'.format(lr), end='')
if (lr < self.lr_min):
print()
break
patience = self.lr_patience
self.optimizer = self._get_optimizer(lr)
print()
self.model.load_state_dict(best_model)
if (t == 0):
self.fisher = fisher_matrix_diag(t, trainloader, self.model)
else:
fisher_new = fisher_matrix_diag(t, trainloader, self.model)
for ((n, p), (_, p_old)) in zip(self.model.named_parameters(), self.model_old.named_parameters()):
p = ((fisher_new[n] * p) + (self.fisher[n] * p_old))
self.fisher[n] += fisher_new[n]
p /= ((self.fisher[n] == 0).float() + self.fisher[n])
self.model_old = deepcopy(self.model)
self.model_old.eval()
freeze_model(self.model_old)
return
def train_epoch(self, t, loader):
self.model.train()
for (images, targets) in loader:
(images, targets) = (images.to(self.device), targets.to(self.device))
outputs = self.model.forward(images)
output = outputs[t]
loss = self.criterion(t, output, targets)
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm(self.model.parameters(), self.clipgrad)
self.optimizer.step()
return
def eval(self, t, loader):
total_loss = 0
total_acc = 0
total_num = 0
self.model.eval()
with torch.no_grad():
for (images, targets) in loader:
n = images.shape[0]
(images, targets) = (images.to(self.device), targets.to(self.device))
outputs = self.model.forward(images)
output = outputs[t]
loss = self.criterion(t, output, targets)
(_, pred) = output.max(1)
hits = (pred == targets).float()
total_loss += (loss.item() * n)
total_acc += hits.sum().item()
total_num += n
return ((total_loss / total_num), (total_acc / total_num))
def criterion(self, t, output, targets):
loss_reg = 0
if (t > 0):
for (p, p_old) in zip(self.model.parameters(), self.model_old.parameters()):
loss_reg += ((p - p_old).pow(2).sum() / 2)
loss_ce = self.ce(output, targets)
return (loss_ce + (self.lamb * loss_reg))
def save_model(self, t):
torch.save({'model_state_dict': self.model.state_dict()}, os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(t))) |
def get_user_topics(user_id):
conn = getDb()
with closing(conn.cursor(dictionary=True)) as cur:
sql = 'SELECT tr.topic_id, t.topic\n FROM topic_recommendations tr INNER JOIN topics t\n ON t.topic_id = tr.topic_id \n LEFT JOIN user_topics ut \n ON ut.topic_id = tr.topic_id AND tr.user_id = ut.user_id\n WHERE tr.user_id = %s \n AND ut.state IS NULL \n AND tr.interleaving_batch = (\n SELECT max(interleaving_batch) \n FROM topic_recommendations\n WHERE user_id = %s \n AND interleaving_batch > \n DATE_SUB(%s, INTERVAL 24 HOUR))\n ORDER BY tr.interleaving_order DESC'
cur.execute(sql, (user_id, user_id, datetime.utcnow()))
topics = cur.fetchall()
if (not topics):
clear_suggested_user_topics(user_id, 'EXPIRED')
multileave_topics.run(user_id)
cur.execute(sql, (user_id, user_id, datetime.utcnow()))
topics = cur.fetchall()
seen_sql = 'update topic_recommendations set\n seen = %s where topic_id = %s\n and user_id = %s and interleaving_batch is\n not NULL and seen is NULL'
current_time = datetime.utcnow()
data = [(current_time, topic['topic_id'], user_id) for topic in topics]
cur.executemany(seen_sql, data)
conn.commit()
return topics |
def create_solver(outfname, net_name, max_iter=10000, lr=0.0001, weight_decay=0.0005, snapshot_dir='snapshots', optimizer='Adam', solver_mode='GPU'):
txt = open('templates/solver.txt', 'r').read()
txt = txt.replace('_NET_NAME_', net_name)
txt = txt.replace('_MAX_ITER_', str(max_iter))
txt = txt.replace('_LR_', str(lr))
txt = txt.replace('_WEIGHT_DECAY_', str(weight_decay))
txt = txt.replace('_SNAPSHOT_DIR_', snapshot_dir)
txt = txt.replace('_OPTIMIZER_', optimizer)
txt = txt.replace('_SOLVER_MODE_', solver_mode)
write_to_file(outfname, txt) |
class InducingImages(inducing_variables.InducingVariables):
def __init__(self, images: TensorData, name: Optional[str]=None):
super().__init__(name=name)
self._images = Parameter(images, dtype=default_float())
def __len__(self) -> int:
return self._images.shape[0]
def Z(self) -> tf.Tensor:
return tf.reshape(self._images, [len(self), (- 1)])
def as_patches(self) -> tf.Tensor:
return tf.reshape(self.as_images, [len(self), (- 1)])
def as_filters(self) -> tf.Tensor:
return move_axis(self.as_images, 0, (- 1))
def as_images(self) -> tf.Tensor:
return tf.convert_to_tensor(self._images, dtype=self._images.dtype) |
_model
def efficientnet_cc_b0_4e(pretrained=False, **kwargs):
model = _gen_efficientnet_condconv('efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model |
class NRDataSHMArrayReader(object):
def __init__(self, shm_info: NRDataSHMInfo):
self.total_image_num = shm_info.total_image_num
self.num_image_per_split = shm_info.num_image_per_split
self.camera = shm_info.camera
(H, W) = (self.camera.H, self.camera.W)
self.imgs = SHMArray((self.total_image_num, H, W, 3), dtype=np.float32, shm_name=shm_info.imgs_shm_name)
self.poses = SHMArray((self.total_image_num, 4, 4), dtype=np.float32, shm_name=shm_info.poses_shm_name)
self.pls = SHMArray((self.total_image_num, 3), dtype=np.float32, shm_name=shm_info.pls_shm_name)
def get_shm_arrays(self) -> (SHMArray, SHMArray, SHMArray):
return (self.imgs, self.poses, self.pls)
def release_shm(self) -> None:
self.imgs.shm.close()
del self.imgs
self.poses.shm.close()
del self.poses
self.pls.shm.close()
del self.pls |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.