code stringlengths 17 6.64M |
|---|
def greedy_score(fileone, filetwo, w2v):
f1 = open(fileone, 'r')
f2 = open(filetwo, 'r')
r1 = f1.readlines()
r2 = f2.readlines()
dim = w2v.layer1_size
scores = []
for i in range(len(r1)):
tokens1 = r1[i].strip().split(' ')
tokens2 = r2[i].strip().split(' ')
X = np.zeros((dim,))
y_count = 0
x_count = 0
o = 0.0
Y = np.zeros((dim, 1))
for tok in tokens2:
Y = np.hstack((Y, w2v[tok].detach().cpu().numpy().reshape((dim, 1))))
y_count += 1
for tok in tokens1:
tmp = w2v[tok].detach().cpu().numpy().reshape((1, dim)).dot(Y)
o += np.max(tmp)
x_count += 1
if ((x_count < 1) or (y_count < 1)):
scores.append(0)
continue
o /= float(x_count)
scores.append(o)
return np.asarray(scores)
|
def extrema_score(fileone, filetwo, w2v):
f1 = open(fileone, 'r')
f2 = open(filetwo, 'r')
r1 = f1.readlines()
r2 = f2.readlines()
scores = []
for i in range(len(r1)):
tokens1 = r1[i].strip().split(' ')
tokens2 = r2[i].strip().split(' ')
X = []
for tok in tokens1:
X.append(w2v[tok].detach().cpu().numpy())
Y = []
for tok in tokens2:
Y.append(w2v[tok].detach().cpu().numpy())
if (np.linalg.norm(X) < 1e-11):
continue
if (np.linalg.norm(Y) < 1e-11):
scores.append(0)
continue
xmax = np.max(X, 0)
xmin = np.min(X, 0)
xtrema = []
for i in range(len(xmax)):
if (np.abs(xmin[i]) > xmax[i]):
xtrema.append(xmin[i])
else:
xtrema.append(xmax[i])
X = np.array(xtrema)
ymax = np.max(Y, 0)
ymin = np.min(Y, 0)
ytrema = []
for i in range(len(ymax)):
if (np.abs(ymin[i]) > ymax[i]):
ytrema.append(ymin[i])
else:
ytrema.append(ymax[i])
Y = np.array(ytrema)
o = ((np.dot(X, Y.T) / np.linalg.norm(X)) / np.linalg.norm(Y))
scores.append(o)
scores = np.asarray(scores)
return (np.mean(scores), ((1.96 * np.std(scores)) / float(len(scores))), np.std(scores))
|
def average(fileone, filetwo, w2v):
f1 = open(fileone, 'r')
f2 = open(filetwo, 'r')
r1 = f1.readlines()
r2 = f2.readlines()
dim = w2v.layer1_size
scores = []
for i in range(len(r1)):
tokens1 = r1[i].strip().split(' ')
tokens2 = r2[i].strip().split(' ')
X = np.zeros((dim,))
for tok in tokens1:
X += w2v[tok].detach().cpu().numpy()
Y = np.zeros((dim,))
for tok in tokens2:
Y += w2v[tok].detach().cpu().numpy()
if (np.linalg.norm(X) < 1e-11):
continue
if (np.linalg.norm(Y) < 1e-11):
scores.append(0)
continue
X = (np.array(X) / np.linalg.norm(X))
Y = (np.array(Y) / np.linalg.norm(Y))
o = ((np.dot(X, Y.T) / np.linalg.norm(X)) / np.linalg.norm(Y))
scores.append(o)
scores = np.asarray(scores)
return (np.mean(scores), ((1.96 * np.std(scores)) / float(len(scores))), np.std(scores))
|
def prepare_discriminator_data(pos_samples, neg_samples, gpu=False):
'\n Takes positive (target) samples, negative (generator) samples and prepares inp and target data for discriminator.\n\n Inputs: pos_samples, neg_samples\n - pos_samples: pos_size x seq_len\n - neg_samples: neg_size x seq_len\n\n Returns: inp, target\n - inp: (pos_size + neg_size) x seq_len\n - target: pos_size + neg_size (boolean 1/0)\n '
inp = torch.cat((pos_samples, neg_samples), 0).type(torch.LongTensor)
target = torch.ones((pos_samples.size()[0] + neg_samples.size()[0]))
target[pos_samples.size()[0]:] = 0
perm = torch.randperm(target.size()[0])
target = target[perm]
inp = inp[perm]
inp = Variable(inp)
target = Variable(target)
if gpu:
inp = inp.cuda()
target = target.cuda()
return (inp, target)
|
def load_data(path='dataset.pickle'):
'\n Load data set\n '
if (not os.path.isfile(path)):
corpus = DPCorpus(vocabulary_limit=VOCAB_SIZE)
train_dataset = corpus.get_train_dataset(min_reply_length=MIN_SEQ_LEN, max_reply_length=MAX_SEQ_LEN)
with open(path, 'wb') as handle:
pickle.dump(train_dataset, handle, protocol=pickle.HIGHEST_PROTOCOL)
train_data_loader = DPDataLoader(train_dataset, batch_size=BATCH_SIZE)
else:
with open(path, 'rb') as handle:
train_dataset = pickle.load(handle)
train_data_loader = DPDataLoader(train_dataset, batch_size=BATCH_SIZE)
return train_data_loader
|
class ReplayMemory():
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
def push(self, transition):
if (len(self.memory) == self.capacity):
del self.memory[0]
self.memory.append(transition)
def push_batch(self, transition):
if (len(self.memory) == self.capacity):
del self.memory[0]
self.memory.append(transition)
def sample(self, batch_size):
random_ints = np.random.randint(0, len(self.memory), size=batch_size)
sample = [self.memory[random_int] for random_int in random_ints]
return sample
def __len__(self):
return len(self.memory)
|
class Attention(nn.Module):
'\n Applies an attention mechanism on the output features from the decoder.\n\n .. math::\n \\begin{array}{ll}\n x = context*output \\\\\n attn = exp(x_i) / sum_j exp(x_j) \\\\\n output = \\tanh(w * (attn * context) + b * output)\n \\end{array}\n\n Args:\n dim(int): The number of expected features in the output\n\n Inputs: output, context\n - **output** (batch, output_len, dimensions): tensor containing the output features from the decoder.\n - **context** (batch, input_len, dimensions): tensor containing features of the encoded input sequence.\n\n Outputs: output, attn\n - **output** (batch, output_len, dimensions): tensor containing the attended output features from the decoder.\n - **attn** (batch, output_len, input_len): tensor containing attention weights.\n\n Attributes:\n linear_out (torch.nn.Linear): applies a linear transformation to the incoming data: :math:`y = Ax + b`.\n mask (torch.Tensor, optional): applies a :math:`-inf` to the indices specified in the `Tensor`.\n\n Examples::\n\n >>> attention = seq2seq.models.Attention(256)\n >>> context = Variable(torch.randn(5, 3, 256))\n >>> output = Variable(torch.randn(5, 5, 256))\n >>> output, attn = attention(output, context)\n\n '
def __init__(self, dim):
super(Attention, self).__init__()
self.linear_out = nn.Linear((dim * 2), dim)
self.mask = None
def set_mask(self, mask):
'\n Sets indices to be masked\n\n Args:\n mask (torch.Tensor): tensor containing indices to be masked\n '
self.mask = mask
def forward(self, output, context):
batch_size = output.size(0)
hidden_size = output.size(2)
input_size = context.size(1)
attn = torch.bmm(output, context.transpose(1, 2))
if (self.mask is not None):
attn.data.masked_fill_(self.mask, (- float('inf')))
attn = F.softmax(attn.view((- 1), input_size), dim=1).view(batch_size, (- 1), input_size)
mix = torch.bmm(attn, context)
combined = torch.cat((mix, output), dim=2)
output = torch.tanh(self.linear_out(combined.view((- 1), (2 * hidden_size)))).view(batch_size, (- 1), hidden_size)
return (output, attn)
|
class BaseRNN(nn.Module):
"\n Applies a multi-layer RNN to an input sequence.\n Note:\n Do not use this class directly, use one of the sub classes.\n Args:\n vocab_size (int): size of the vocabulary\n max_len (int): maximum allowed length for the sequence to be processed\n hidden_size (int): number of features in the hidden state `h`\n input_dropout_p (float): dropout probability for the input sequence\n dropout_p (float): dropout probability for the output sequence\n n_layers (int): number of recurrent layers\n rnn_cell (str): type of RNN cell (Eg. 'LSTM' , 'GRU')\n\n Inputs: ``*args``, ``**kwargs``\n - ``*args``: variable length argument list.\n - ``**kwargs``: arbitrary keyword arguments.\n\n Attributes:\n SYM_MASK: masking symbol\n SYM_EOS: end-of-sequence symbol\n "
SYM_MASK = 'MASK'
SYM_EOS = 'EOS'
def __init__(self, vocab_size, max_len, hidden_size, input_dropout_p, dropout_p, n_layers, rnn_cell):
super(BaseRNN, self).__init__()
self.vocab_size = vocab_size
self.max_len = max_len
self.hidden_size = hidden_size
self.n_layers = n_layers
self.input_dropout_p = input_dropout_p
self.input_dropout = nn.Dropout(p=input_dropout_p)
if (rnn_cell.lower() == 'lstm'):
self.rnn_cell = nn.LSTM
elif (rnn_cell.lower() == 'gru'):
self.rnn_cell = nn.GRU
else:
raise ValueError('Unsupported RNN Cell: {0}'.format(rnn_cell))
self.dropout_p = dropout_p
def forward(self, *args, **kwargs):
raise NotImplementedError()
|
class EncoderRNN(BaseRNN):
'\n Applies a multi-layer RNN to an input sequence.\n\n Args:\n vocab_size (int): size of the vocabulary\n max_len (int): a maximum allowed length for the sequence to be processed\n hidden_size (int): the number of features in the hidden state `h`\n input_dropout_p (float, optional): dropout probability for the input sequence (default: 0)\n dropout_p (float, optional): dropout probability for the output sequence (default: 0)\n n_layers (int, optional): number of recurrent layers (default: 1)\n bidirectional (bool, optional): if True, becomes a bidirectional encodr (defulat False)\n rnn_cell (str, optional): type of RNN cell (default: gru)\n variable_lengths (bool, optional): if use variable length RNN (default: False)\n embedding (torch.Tensor, optional): Pre-trained embedding. The size of the tensor has to match\n the size of the embedding parameter: (vocab_size, hidden_size). The embedding layer would be initialized\n with the tensor if provided (default: None).\n update_embedding (bool, optional): If the embedding should be updated during training (default: False).\n\n Inputs: inputs, input_lengths\n - **inputs**: list of sequences, whose length is the batch size and within which each sequence is a list of token IDs.\n - **input_lengths** (list of int, optional): list that contains the lengths of sequences\n in the mini-batch, it must be provided when using variable length RNN (default: `None`)\n\n Outputs: output, hidden\n - **output** (batch, seq_len, hidden_size): tensor containing the encoded features of the input sequence\n - **hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the features in the hidden state `h`\n\n Examples::\n\n >>> encoder = EncoderRNN(input_vocab, max_seq_length, hidden_size)\n >>> output, hidden = encoder(input)\n\n '
def __init__(self, vocab_size, max_len, hidden_size, input_dropout_p=0, dropout_p=0, n_layers=1, bidirectional=False, rnn_cell='gru', variable_lengths=False, embedding=None, update_embedding=True):
super(EncoderRNN, self).__init__(vocab_size, max_len, hidden_size, input_dropout_p, dropout_p, n_layers, rnn_cell)
self.variable_lengths = variable_lengths
self.embedding = nn.Embedding(vocab_size, hidden_size)
if (embedding is not None):
self.embedding.weight = nn.Parameter(embedding)
self.embedding.weight.requires_grad = update_embedding
self.rnn = self.rnn_cell(hidden_size, hidden_size, n_layers, batch_first=True, bidirectional=bidirectional, dropout=dropout_p)
def forward(self, input_var, input_lengths=None):
'\n Applies a multi-layer RNN to an input sequence.\n\n Args:\n input_var (batch, seq_len): tensor containing the features of the input sequence.\n input_lengths (list of int, optional): A list that contains the lengths of sequences\n in the mini-batch\n\n Returns: output, hidden\n - **output** (batch, seq_len, hidden_size): variable containing the encoded features of the input sequence\n - **hidden** (num_layers * num_directions, batch, hidden_size): variable containing the features in the hidden state h\n '
embedded = self.embedding(input_var)
embedded = self.input_dropout(embedded)
if self.variable_lengths:
embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=True)
(output, hidden) = self.rnn(embedded)
if self.variable_lengths:
(output, _) = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
return (output, hidden)
|
class Seq2seq(nn.Module):
' Standard sequence-to-sequence architecture with configurable encoder\n and decoder.\n\n Args:\n encoder (EncoderRNN): object of EncoderRNN\n decoder (DecoderRNN): object of DecoderRNN\n decode_function (func, optional): function to generate symbols from output hidden states (default: F.log_softmax)\n\n Inputs: input_variable, input_lengths, target_variable, teacher_forcing_ratio\n - **input_variable** (list, option): list of sequences, whose length is the batch size and within which\n each sequence is a list of token IDs. This information is forwarded to the encoder.\n - **input_lengths** (list of int, optional): A list that contains the lengths of sequences\n in the mini-batch, it must be provided when using variable length RNN (default: `None`)\n - **target_variable** (list, optional): list of sequences, whose length is the batch size and within which\n each sequence is a list of token IDs. This information is forwarded to the decoder.\n - **teacher_forcing_ratio** (int, optional): The probability that teacher forcing will be used. A random number\n is drawn uniformly from 0-1 for every decoding token, and if the sample is smaller than the given value,\n teacher forcing would be used (default is 0)\n\n Outputs: decoder_outputs, decoder_hidden, ret_dict\n - **decoder_outputs** (batch): batch-length list of tensors with size (max_length, hidden_size) containing the\n outputs of the decoder.\n - **decoder_hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the last hidden\n state of the decoder.\n - **ret_dict**: dictionary containing additional information as follows {*KEY_LENGTH* : list of integers\n representing lengths of output sequences, *KEY_SEQUENCE* : list of sequences, where each sequence is a list of\n predicted token IDs, *KEY_INPUT* : target outputs if provided for decoding, *KEY_ATTN_SCORE* : list of\n sequences, where each list is of attention weights }.\n\n '
def __init__(self, encoder, decoder, decode_function=F.log_softmax):
super(Seq2seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.decode_function = decode_function
def flatten_parameters(self):
self.encoder.rnn.flatten_parameters()
self.decoder.rnn.flatten_parameters()
def forward(self, input_variable, input_lengths=None, target_variable=None, teacher_forcing_ratio=0, sample=False):
(encoder_outputs, encoder_hidden) = self.encoder(input_variable, input_lengths)
result = self.decoder(inputs=target_variable, encoder_hidden=encoder_hidden, encoder_outputs=encoder_outputs, function=self.decode_function, teacher_forcing_ratio=teacher_forcing_ratio, sample=sample)
return result
|
def _remove_duplicate(input):
return list(set(input))
|
def build_stage_one_edges(res, graph_voc):
'\n :param res:\n :param graph_voc:\n :return: edge_idx [[1,2,3],[0,1,0]]\n '
edge_idx = []
for sample in res:
sample_idx = list(map((lambda x: graph_voc.word2idx[x]), sample))
for i in range((len(sample_idx) - 1)):
edge_idx.append((sample_idx[(i + 1)], sample_idx[i]))
edge_idx = _remove_duplicate(edge_idx)
row = list(map((lambda x: x[0]), edge_idx))
col = list(map((lambda x: x[1]), edge_idx))
return [row, col]
|
def build_stage_two_edges(res, graph_voc):
'\n :param res:\n :param graph_voc:\n :return: edge_idx [[1,2,3],[0,1,0]]\n '
edge_idx = []
for sample in res:
sample_idx = list(map((lambda x: graph_voc.word2idx[x]), sample))
edge_idx.extend([(sample_idx[0], sample_idx[i]) for i in range(1, len(sample_idx))])
edge_idx = _remove_duplicate(edge_idx)
row = list(map((lambda x: x[0]), edge_idx))
col = list(map((lambda x: x[1]), edge_idx))
return [row, col]
|
def build_cominbed_edges(res, graph_voc):
'\n :param res:\n :param graph_voc:\n :return: edge_idx [[1,2,3],[0,1,0]]\n '
edge_idx = []
for sample in res:
sample_idx = list(map((lambda x: graph_voc.word2idx[x]), sample))
for i in range((len(sample_idx) - 1)):
edge_idx.append((sample_idx[(i + 1)], sample_idx[i]))
edge_idx.extend([(sample_idx[0], sample_idx[i]) for i in range(1, len(sample_idx))])
edge_idx = _remove_duplicate(edge_idx)
row = list(map((lambda x: x[0]), edge_idx))
col = list(map((lambda x: x[1]), edge_idx))
return [row, col]
|
def expand_level2():
level2 = ['001-009', '010-018', '020-027', '030-041', '042', '045-049', '050-059', '060-066', '070-079', '080-088', '090-099', '100-104', '110-118', '120-129', '130-136', '137-139', '140-149', '150-159', '160-165', '170-176', '176', '179-189', '190-199', '200-208', '209', '210-229', '230-234', '235-238', '239', '240-246', '249-259', '260-269', '270-279', '280-289', '290-294', '295-299', '300-316', '317-319', '320-327', '330-337', '338', '339', '340-349', '350-359', '360-379', '380-389', '390-392', '393-398', '401-405', '410-414', '415-417', '420-429', '430-438', '440-449', '451-459', '460-466', '470-478', '480-488', '490-496', '500-508', '510-519', '520-529', '530-539', '540-543', '550-553', '555-558', '560-569', '570-579', '580-589', '590-599', '600-608', '610-611', '614-616', '617-629', '630-639', '640-649', '650-659', '660-669', '670-677', '678-679', '680-686', '690-698', '700-709', '710-719', '720-724', '725-729', '730-739', '740-759', '760-763', '764-779', '780-789', '790-796', '797-799', '800-804', '805-809', '810-819', '820-829', '830-839', '840-848', '850-854', '860-869', '870-879', '880-887', '890-897', '900-904', '905-909', '910-919', '920-924', '925-929', '930-939', '940-949', '950-957', '958-959', '960-979', '980-989', '990-995', '996-999', 'V01-V91', 'V01-V09', 'V10-V19', 'V20-V29', 'V30-V39', 'V40-V49', 'V50-V59', 'V60-V69', 'V70-V82', 'V83-V84', 'V85', 'V86', 'V87', 'V88', 'V89', 'V90', 'V91', 'E000-E899', 'E000', 'E001-E030', 'E800-E807', 'E810-E819', 'E820-E825', 'E826-E829', 'E830-E838', 'E840-E845', 'E846-E849', 'E850-E858', 'E860-E869', 'E870-E876', 'E878-E879', 'E880-E888', 'E890-E899', 'E900-E909', 'E910-E915', 'E916-E928', 'E929', 'E930-E949', 'E950-E959', 'E960-E969', 'E970-E978', 'E980-E989', 'E990-E999']
level2_expand = {}
for i in level2:
tokens = i.split('-')
if (i[0] == 'V'):
if (len(tokens) == 1):
level2_expand[i] = i
else:
for j in range(int(tokens[0][1:]), (int(tokens[1][1:]) + 1)):
level2_expand[('V%02d' % j)] = i
elif (i[0] == 'E'):
if (len(tokens) == 1):
level2_expand[i] = i
else:
for j in range(int(tokens[0][1:]), (int(tokens[1][1:]) + 1)):
level2_expand[('E%03d' % j)] = i
elif (len(tokens) == 1):
level2_expand[i] = i
else:
for j in range(int(tokens[0]), (int(tokens[1]) + 1)):
level2_expand[('%03d' % j)] = i
return level2_expand
|
def build_icd9_tree(unique_codes):
res = []
graph_voc = Voc()
root_node = 'icd9_root'
level3_dict = expand_level2()
for code in unique_codes:
level1 = code
level2 = (level1[:4] if (level1[0] == 'E') else level1[:3])
level3 = level3_dict[level2]
level4 = root_node
sample = [level1, level2, level3, level4]
graph_voc.add_sentence(sample)
res.append(sample)
return (res, graph_voc)
|
def build_atc_tree(unique_codes):
res = []
graph_voc = Voc()
root_node = 'atc_root'
for code in unique_codes:
sample = (([code] + [code[:i] for i in [4, 3, 1]]) + [root_node])
graph_voc.add_sentence(sample)
res.append(sample)
return (res, graph_voc)
|
class BertConfig(object):
'Configuration class to store the configuration of a `BertModel`.\n '
def __init__(self, vocab_size_or_config_json_file, hidden_size=300, num_hidden_layers=2, num_attention_heads=4, intermediate_size=300, hidden_act='relu', hidden_dropout_prob=0.4, attention_probs_dropout_prob=0.1, max_position_embeddings=1, type_vocab_size=2, initializer_range=0.02, graph=False, graph_hidden_size=75, graph_heads=4):
'Constructs BertConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the "intermediate" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler. If string, "gelu", "relu" and "swish" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.\n '
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, 'r', encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for (key, value) in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.graph = graph
self.graph_hidden_size = graph_hidden_size
self.graph_heads = graph_heads
else:
raise ValueError('First argument must be either a vocabulary size (int)or the path to a pretrained model config file (str)')
@classmethod
def from_dict(cls, json_object):
'Constructs a `BertConfig` from a Python dictionary of parameters.'
config = BertConfig(vocab_size_or_config_json_file=(- 1))
for (key, value) in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
'Constructs a `BertConfig` from a json file of parameters.'
with open(json_file, 'r', encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
'Serializes this instance to a Python dictionary.'
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
'Serializes this instance to a JSON string.'
return (json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n')
|
class OntologyEmbedding(nn.Module):
def __init__(self, voc, build_tree_func, in_channels=100, out_channels=20, heads=5):
super(OntologyEmbedding, self).__init__()
(res, graph_voc) = build_tree_func(list(voc.idx2word.values()))
stage_one_edges = build_stage_one_edges(res, graph_voc)
stage_two_edges = build_stage_two_edges(res, graph_voc)
self.edges1 = torch.tensor(stage_one_edges)
self.edges2 = torch.tensor(stage_two_edges)
self.graph_voc = graph_voc
assert (in_channels == (heads * out_channels))
self.g = GATConv(in_channels=in_channels, out_channels=out_channels, heads=heads)
num_nodes = len(graph_voc.word2idx)
self.embedding = nn.Parameter(torch.Tensor(num_nodes, in_channels))
self.idx_mapping = [self.graph_voc.word2idx[word] for word in voc.idx2word.values()]
self.init_params()
def get_all_graph_emb(self):
emb = self.embedding
emb = self.g(self.g(emb, self.edges1.to(emb.device)), self.edges2.to(emb.device))
return emb
def forward(self):
'\n :param idxs: [N, L]\n :return:\n '
emb = self.embedding
emb = self.g(self.g(emb, self.edges1.to(emb.device)), self.edges2.to(emb.device))
return emb[self.idx_mapping]
def init_params(self):
glorot(self.embedding)
|
class MessagePassing(nn.Module):
'Base class for creating message passing layers\n\n .. math::\n \\mathbf{x}_i^{\\prime} = \\gamma_{\\mathbf{\\Theta}} \\left( \\mathbf{x}_i,\n \\square_{j \\in \\mathcal{N}(i)} \\, \\phi_{\\mathbf{\\Theta}}\n \\left(\\mathbf{x}_i, \\mathbf{x}_j,\\mathbf{e}_{i,j}\\right) \\right),\n\n where :math:`\\square` denotes a differentiable, permutation invariant\n function, *e.g.*, sum, mean or max, and :math:`\\gamma_{\\mathbf{\\Theta}}`\n and :math:`\\phi_{\\mathbf{\\Theta}}` denote differentiable functions such as\n MLPs.\n See `here <https://rusty1s.github.io/pytorch_geometric/build/html/notes/\n create_gnn.html>`__ for the accompanying tutorial.\n\n '
def __init__(self, aggr='add'):
super(MessagePassing, self).__init__()
self.message_args = inspect.getargspec(self.message)[0][1:]
self.update_args = inspect.getargspec(self.update)[0][2:]
def propagate(self, aggr, edge_index, **kwargs):
'The initial call to start propagating messages.\n Takes in an aggregation scheme (:obj:`"add"`, :obj:`"mean"` or\n :obj:`"max"`), the edge indices, and all additional data which is\n needed to construct messages and to update node embeddings.'
assert (aggr in ['add', 'mean', 'max'])
kwargs['edge_index'] = edge_index
size = None
message_args = []
for arg in self.message_args:
if (arg[(- 2):] == '_i'):
tmp = kwargs[arg[:(- 2)]]
size = tmp.size(0)
message_args.append(tmp[edge_index[0]])
elif (arg[(- 2):] == '_j'):
tmp = kwargs[arg[:(- 2)]]
size = tmp.size(0)
message_args.append(tmp[edge_index[1]])
else:
message_args.append(kwargs[arg])
update_args = [kwargs[arg] for arg in self.update_args]
out = self.message(*message_args)
out = scatter_(aggr, out, edge_index[0], dim_size=size)
out = self.update(out, *update_args)
return out
def message(self, x_j):
'Constructs messages in analogy to :math:`\\phi_{\\mathbf{\\Theta}}`\n for each edge in :math:`(i,j) \\in \\mathcal{E}`.\n Can take any argument which was initially passed to :meth:`propagate`.\n In addition, features can be lifted to the source node :math:`i` and\n target node :math:`j` by appending :obj:`_i` or :obj:`_j` to the\n variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`.'
return x_j
def update(self, aggr_out):
'Updates node embeddings in analogy to\n :math:`\\gamma_{\\mathbf{\\Theta}}` for each node\n :math:`i \\in \\mathcal{V}`.\n Takes in the output of aggregation as first argument and any argument\n which was initially passed to :meth:`propagate`.'
return aggr_out
|
class GATConv(MessagePassing):
'The graph attentional operator from the `"Graph Attention Networks"\n <https://arxiv.org/abs/1710.10903>`_ paper\n\n .. math::\n \\mathbf{x}^{\\prime}_i = \\alpha_{i,i}\\mathbf{\\Theta}\\mathbf{x}_{j} +\n \\sum_{j \\in \\mathcal{N}(i)} \\alpha_{i,j}\\mathbf{\\Theta}\\mathbf{x}_{j},\n\n where the attention coefficients :math:`\\alpha_{i,j}` are computed as\n\n .. math::\n \\alpha_{i,j} =\n \\frac{\n \\exp\\left(\\mathrm{LeakyReLU}\\left(\\mathbf{a}^{\\top}\n [\\mathbf{\\Theta}\\mathbf{x}_i \\, \\Vert \\, \\mathbf{\\Theta}\\mathbf{x}_j]\n \\right)\\right)}\n {\\sum_{k \\in \\mathcal{N}(i) \\cup \\{ i \\}}\n \\exp\\left(\\mathrm{LeakyReLU}\\left(\\mathbf{a}^{\\top}\n [\\mathbf{\\Theta}\\mathbf{x}_i \\, \\Vert \\, \\mathbf{\\Theta}\\mathbf{x}_k]\n \\right)\\right)}.\n\n Args:\n in_channels (int): Size of each input sample.\n out_channels (int): Size of each output sample.\n heads (int, optional): Number of multi-head-attentions. (default:\n :obj:`1`)\n concat (bool, optional): If set to :obj:`False`, the multi-head\n attentions are averaged instead of concatenated. (default: :obj:`True`)\n negative_slope (float, optional): LeakyReLU angle of the negative\n slope. (default: :obj:`0.2`)\n dropout (float, optional): Dropout probability of the normalized\n attention coefficients which exposes each node to a stochastically\n sampled neighborhood during training. (default: :obj:`0`)\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n '
def __init__(self, in_channels, out_channels, heads=1, concat=True, negative_slope=0.2, dropout=0, bias=True):
super(GATConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.concat = concat
self.negative_slope = negative_slope
self.dropout = dropout
self.weight = nn.Parameter(torch.Tensor(in_channels, (heads * out_channels)))
self.att = nn.Parameter(torch.Tensor(1, heads, (2 * out_channels)))
if (bias and concat):
self.bias = nn.Parameter(torch.Tensor((heads * out_channels)))
elif (bias and (not concat)):
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
glorot(self.att)
zeros(self.bias)
def forward(self, x, edge_index):
''
edge_index = add_self_loops(edge_index, num_nodes=x.size(0))
x = torch.mm(x, self.weight).view((- 1), self.heads, self.out_channels)
return self.propagate('add', edge_index, x=x, num_nodes=x.size(0))
def message(self, x_i, x_j, edge_index, num_nodes):
alpha = (torch.cat([x_i, x_j], dim=(- 1)) * self.att).sum(dim=(- 1))
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, edge_index[0], num_nodes)
alpha = F.dropout(alpha, p=self.dropout)
return (x_j * alpha.view((- 1), self.heads, 1))
def update(self, aggr_out):
if (self.concat is True):
aggr_out = aggr_out.view((- 1), (self.heads * self.out_channels))
else:
aggr_out = aggr_out.mean(dim=1)
if (self.bias is not None):
aggr_out = (aggr_out + self.bias)
return aggr_out
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__, self.in_channels, self.out_channels, self.heads)
|
class ConcatEmbeddings(nn.Module):
'Concat rx and dx ontology embedding for easy access\n '
def __init__(self, config, dx_voc, rx_voc):
super(ConcatEmbeddings, self).__init__()
self.special_embedding = nn.Parameter(torch.Tensor(((config.vocab_size - len(dx_voc.idx2word)) - len(rx_voc.idx2word)), config.hidden_size))
self.rx_embedding = OntologyEmbedding(rx_voc, build_atc_tree, config.hidden_size, config.graph_hidden_size, config.graph_heads)
self.dx_embedding = OntologyEmbedding(dx_voc, build_icd9_tree, config.hidden_size, config.graph_hidden_size, config.graph_heads)
self.init_params()
def forward(self, input_ids):
emb = torch.cat([self.special_embedding, self.rx_embedding(), self.dx_embedding()], dim=0)
return emb[input_ids]
def init_params(self):
glorot(self.special_embedding)
|
class FuseEmbeddings(nn.Module):
'Construct the embeddings from ontology, patient info and type embeddings.\n '
def __init__(self, config, dx_voc, rx_voc):
super(FuseEmbeddings, self).__init__()
self.ontology_embedding = ConcatEmbeddings(config, dx_voc, rx_voc)
self.type_embedding = nn.Embedding(2, config.hidden_size)
def forward(self, input_ids, input_types=None, input_positions=None):
'\n :param input_ids: [B, L]\n :param input_types: [B, L]\n :param input_positions:\n :return:\n '
ontology_embedding = (self.ontology_embedding(input_ids) + self.type_embedding(input_types))
return ontology_embedding
|
class Voc(object):
def __init__(self):
self.idx2word = {}
self.word2idx = {}
def add_sentence(self, sentence):
for word in sentence:
if (word not in self.word2idx):
self.idx2word[len(self.word2idx)] = word
self.word2idx[word] = len(self.word2idx)
|
class EHRTokenizer(object):
'Runs end-to-end tokenization'
def __init__(self, data_dir, special_tokens=('[PAD]', '[CLS]', '[MASK]')):
self.vocab = Voc()
self.vocab.add_sentence(special_tokens)
self.rx_voc = self.add_vocab(os.path.join(data_dir, 'rx-vocab.txt'))
self.dx_voc = self.add_vocab(os.path.join(data_dir, 'dx-vocab.txt'))
self.rx_voc_multi = Voc()
self.dx_voc_multi = Voc()
with open(os.path.join(data_dir, 'rx-vocab-multi.txt'), 'r') as fin:
for code in fin:
self.rx_voc_multi.add_sentence([code.rstrip('\n')])
with open(os.path.join(data_dir, 'dx-vocab-multi.txt'), 'r') as fin:
for code in fin:
self.dx_voc_multi.add_sentence([code.rstrip('\n')])
def add_vocab(self, vocab_file):
voc = self.vocab
specific_voc = Voc()
with open(vocab_file, 'r') as fin:
for code in fin:
voc.add_sentence([code.rstrip('\n')])
specific_voc.add_sentence([code.rstrip('\n')])
return specific_voc
def convert_tokens_to_ids(self, tokens):
'Converts a sequence of tokens into ids using the vocab.'
ids = []
for token in tokens:
ids.append(self.vocab.word2idx[token])
return ids
def convert_ids_to_tokens(self, ids):
'Converts a sequence of ids in wordpiece tokens using the vocab.'
tokens = []
for i in ids:
tokens.append(self.vocab.idx2word[i])
return tokens
|
class EHRDataset(Dataset):
def __init__(self, data_pd, tokenizer: EHRTokenizer, max_seq_len):
self.data_pd = data_pd
self.tokenizer = tokenizer
self.seq_len = max_seq_len
self.sample_counter = 0
def transform_data(data):
'\n :param data: raw data form\n :return: {subject_id, [adm, 2, codes]},\n '
records = {}
for subject_id in data['SUBJECT_ID'].unique():
item_df = data[(data['SUBJECT_ID'] == subject_id)]
patient = []
for (_, row) in item_df.iterrows():
admission = [list(row['ICD9_CODE']), list(row['ATC4'])]
patient.append(admission)
if (len(patient) < 2):
continue
records[subject_id] = patient
return records
self.records = transform_data(data_pd)
def __len__(self):
return len(self.records)
def __getitem__(self, item):
cur_id = self.sample_counter
self.sample_counter += 1
subject_id = list(self.records.keys())[item]
def fill_to_max(l, seq):
while (len(l) < seq):
l.append('[PAD]')
return l
'extract input and output tokens\n '
input_tokens = []
output_dx_tokens = []
output_rx_tokens = []
for (idx, adm) in enumerate(self.records[subject_id]):
input_tokens.extend((['[CLS]'] + fill_to_max(list(adm[0]), (self.seq_len - 1))))
input_tokens.extend((['[CLS]'] + fill_to_max(list(adm[1]), (self.seq_len - 1))))
if (idx != 0):
output_rx_tokens.append(list(adm[1]))
output_dx_tokens.append(list(adm[0]))
'convert tokens to id\n '
input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens)
output_dx_labels = []
output_rx_labels = []
dx_voc_size = len(self.tokenizer.dx_voc_multi.word2idx)
rx_voc_size = len(self.tokenizer.rx_voc_multi.word2idx)
for tokens in output_dx_tokens:
tmp_labels = np.zeros(dx_voc_size)
tmp_labels[list(map((lambda x: self.tokenizer.dx_voc_multi.word2idx[x]), tokens))] = 1
output_dx_labels.append(tmp_labels)
for tokens in output_rx_tokens:
tmp_labels = np.zeros(rx_voc_size)
tmp_labels[list(map((lambda x: self.tokenizer.rx_voc_multi.word2idx[x]), tokens))] = 1
output_rx_labels.append(tmp_labels)
if (cur_id < 5):
logger.info('*** Example ***')
logger.info(('subject_id: %s' % subject_id))
logger.info(('input tokens: %s' % ' '.join([str(x) for x in input_tokens])))
logger.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
assert (len(input_ids) == ((self.seq_len * 2) * len(self.records[subject_id])))
assert (len(output_dx_labels) == (len(self.records[subject_id]) - 1))
cur_tensors = (torch.tensor(input_ids).view((- 1), self.seq_len), torch.tensor(output_dx_labels, dtype=torch.float), torch.tensor(output_rx_labels, dtype=torch.float))
return cur_tensors
|
def load_dataset(args):
data_dir = args.data_dir
max_seq_len = args.max_seq_length
tokenizer = EHRTokenizer(data_dir)
data = pd.read_pickle(os.path.join(data_dir, 'data-multi-visit.pkl'))
ids_file = [os.path.join(data_dir, 'train-id.txt'), os.path.join(data_dir, 'eval-id.txt'), os.path.join(data_dir, 'test-id.txt')]
def load_ids(data, file_name):
'\n :param data: multi-visit data\n :param file_name:\n :return: raw data form\n '
ids = []
with open(file_name, 'r') as f:
for line in f:
ids.append(int(line.rstrip('\n')))
return data[data['SUBJECT_ID'].isin(ids)].reset_index(drop=True)
return (tokenizer, tuple(map((lambda x: EHRDataset(load_ids(data, x), tokenizer, max_seq_len)), ids_file)))
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='GBert-predict', type=str, required=False, help='model name')
parser.add_argument('--data_dir', default='../data', type=str, required=False, help='The input data dir.')
parser.add_argument('--pretrain_dir', default='../saved/GBert-pretraining', type=str, required=False, help='pretraining model')
parser.add_argument('--train_file', default='data-multi-visit.pkl', type=str, required=False, help='training data file.')
parser.add_argument('--output_dir', default='../saved/', type=str, required=False, help='The output directory where the model checkpoints will be written.')
parser.add_argument('--use_pretrain', default=False, action='store_true', help='is use pretrain')
parser.add_argument('--graph', default=False, action='store_true', help='if use ontology embedding')
parser.add_argument('--therhold', default=0.3, type=float, help='therhold.')
parser.add_argument('--max_seq_length', default=55, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--do_train', default=False, action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', default=True, action='store_true', help='Whether to run on the dev set.')
parser.add_argument('--do_test', default=True, action='store_true', help='Whether to run on the test set.')
parser.add_argument('--train_batch_size', default=1, type=int, help='Total batch size for training.')
parser.add_argument('--learning_rate', default=0.0005, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--num_train_epochs', default=20.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--seed', type=int, default=1203, help='random seed for initialization')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.')
args = parser.parse_args()
args.output_dir = os.path.join(args.output_dir, args.model_name)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
if ((not args.do_train) and (not args.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
os.makedirs(args.output_dir, exist_ok=True)
print('Loading Dataset')
(tokenizer, (train_dataset, eval_dataset, test_dataset)) = load_dataset(args)
train_dataloader = DataLoader(train_dataset, sampler=RandomSampler(train_dataset), batch_size=1)
eval_dataloader = DataLoader(eval_dataset, sampler=SequentialSampler(eval_dataset), batch_size=1)
test_dataloader = DataLoader(test_dataset, sampler=SequentialSampler(test_dataset), batch_size=1)
print(('Loading Model: ' + args.model_name))
if args.use_pretrain:
logger.info('Use Pretraining model')
model = GBERT_Predict.from_pretrained(args.pretrain_dir, tokenizer=tokenizer)
else:
config = BertConfig(vocab_size_or_config_json_file=len(tokenizer.vocab.word2idx))
config.graph = args.graph
model = GBERT_Predict(config, tokenizer)
logger.info(('# of model parameters: ' + str(get_n_params(model))))
model.to(device)
model_to_save = (model.module if hasattr(model, 'module') else model)
rx_output_model_file = os.path.join(args.output_dir, 'pytorch_model.bin')
optimizer = Adam(model.parameters(), lr=args.learning_rate)
global_step = 0
if args.do_train:
writer = SummaryWriter(args.output_dir)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Batch size = %d', 1)
(dx_acc_best, rx_acc_best) = (0, 0)
acc_name = 'prauc'
dx_history = {'prauc': []}
rx_history = {'prauc': []}
for _ in trange(int(args.num_train_epochs), desc='Epoch'):
print('')
tr_loss = 0
(nb_tr_examples, nb_tr_steps) = (0, 0)
prog_iter = tqdm(train_dataloader, leave=False, desc='Training')
model.train()
for (_, batch) in enumerate(prog_iter):
batch = tuple((t.to(device) for t in batch))
(input_ids, dx_labels, rx_labels) = batch
(input_ids, dx_labels, rx_labels) = (input_ids.squeeze(dim=0), dx_labels.squeeze(dim=0), rx_labels.squeeze(dim=0))
(loss, rx_logits) = model(input_ids, dx_labels=dx_labels, rx_labels=rx_labels, epoch=global_step)
loss.backward()
tr_loss += loss.item()
nb_tr_examples += 1
nb_tr_steps += 1
prog_iter.set_postfix(loss=('%.4f' % (tr_loss / nb_tr_steps)))
optimizer.step()
optimizer.zero_grad()
writer.add_scalar('train/loss', (tr_loss / nb_tr_steps), global_step)
global_step += 1
if args.do_eval:
print('')
logger.info('***** Running eval *****')
model.eval()
dx_y_preds = []
dx_y_trues = []
rx_y_preds = []
rx_y_trues = []
for eval_input in tqdm(eval_dataloader, desc='Evaluating'):
eval_input = tuple((t.to(device) for t in eval_input))
(input_ids, dx_labels, rx_labels) = eval_input
(input_ids, dx_labels, rx_labels) = (input_ids.squeeze(), dx_labels.squeeze(), rx_labels.squeeze(dim=0))
with torch.no_grad():
(loss, rx_logits) = model(input_ids, dx_labels=dx_labels, rx_labels=rx_labels)
rx_y_preds.append(t2n(torch.sigmoid(rx_logits)))
rx_y_trues.append(t2n(rx_labels))
print('')
rx_acc_container = metric_report(np.concatenate(rx_y_preds, axis=0), np.concatenate(rx_y_trues, axis=0), args.therhold)
for (k, v) in rx_acc_container.items():
writer.add_scalar('eval/{}'.format(k), v, global_step)
if (rx_acc_container[acc_name] > rx_acc_best):
rx_acc_best = rx_acc_container[acc_name]
torch.save(model_to_save.state_dict(), rx_output_model_file)
with open(os.path.join(args.output_dir, 'bert_config.json'), 'w', encoding='utf-8') as fout:
fout.write(model.config.to_json_string())
if args.do_test:
logger.info('***** Running test *****')
logger.info(' Num examples = %d', len(test_dataset))
logger.info(' Batch size = %d', 1)
def test(task=0):
model_state_dict = torch.load(rx_output_model_file)
model.load_state_dict(model_state_dict)
model.to(device)
model.eval()
y_preds = []
y_trues = []
for test_input in tqdm(test_dataloader, desc='Testing'):
test_input = tuple((t.to(device) for t in test_input))
(input_ids, dx_labels, rx_labels) = test_input
(input_ids, dx_labels, rx_labels) = (input_ids.squeeze(), dx_labels.squeeze(), rx_labels.squeeze(dim=0))
with torch.no_grad():
(loss, rx_logits) = model(input_ids, dx_labels=dx_labels, rx_labels=rx_labels)
y_preds.append(t2n(torch.sigmoid(rx_logits)))
y_trues.append(t2n(rx_labels))
print('')
acc_container = metric_report(np.concatenate(y_preds, axis=0), np.concatenate(y_trues, axis=0), args.therhold)
if args.do_train:
for (k, v) in acc_container.items():
writer.add_scalar('test/{}'.format(k), v, 0)
return acc_container
test(task=0)
|
class Voc(object):
def __init__(self):
self.idx2word = {}
self.word2idx = {}
def add_sentence(self, sentence):
for word in sentence:
if (word not in self.word2idx):
self.idx2word[len(self.word2idx)] = word
self.word2idx[word] = len(self.word2idx)
|
class EHRTokenizer(object):
'Runs end-to-end tokenization'
def __init__(self, data_dir, special_tokens=('[PAD]', '[CLS]', '[MASK]')):
self.vocab = Voc()
self.vocab.add_sentence(special_tokens)
self.rx_voc = self.add_vocab(os.path.join(data_dir, 'rx-vocab.txt'))
self.dx_voc = self.add_vocab(os.path.join(data_dir, 'dx-vocab.txt'))
self.rx_singe2multi = []
with open(os.path.join(data_dir, 'rx-vocab-multi.txt'), 'r') as fin:
for code in fin:
self.rx_singe2multi.append(self.rx_voc.word2idx[code.rstrip('\n')])
def add_vocab(self, vocab_file):
voc = self.vocab
specific_voc = Voc()
with open(vocab_file, 'r') as fin:
for code in fin:
voc.add_sentence([code.rstrip('\n')])
specific_voc.add_sentence([code.rstrip('\n')])
return specific_voc
def convert_tokens_to_ids(self, tokens):
'Converts a sequence of tokens into ids using the vocab.'
ids = []
for token in tokens:
ids.append(self.vocab.word2idx[token])
return ids
def convert_ids_to_tokens(self, ids):
'Converts a sequence of ids in wordpiece tokens using the vocab.'
tokens = []
for i in ids:
tokens.append(self.vocab.idx2word[i])
return tokens
|
def save():
tokenizer = EHRTokenizer(data_dir='../data')
logger.info('Use Pretraining model')
model = TSNE.from_pretrained(model_name, dx_voc=tokenizer.dx_voc, rx_voc=tokenizer.rx_voc)
model(output_dir=output_dir)
logger.info(('# of model parameters: ' + str(get_n_params(model))))
|
def generate_meta(build_tree_func, task, output_path='emb-meta.tsv'):
tokenizer = EHRTokenizer(data_dir='../data')
voc = (tokenizer.dx_voc if (task == 0) else tokenizer.rx_voc)
(res, graph_voc) = build_tree_func(list(voc.idx2word.values()))
level_dict = {}
for row in res:
for (level, item) in enumerate(row):
level_dict[item] = level
with open(os.path.join(output_dir, (('dx-' if (task == 0) else 'rx-') + output_path)), 'w') as fout:
fout.write('name\tlevel\n')
for (word, _) in graph_voc.word2idx.items():
fout.write('{}\t{}\n'.format(word, str(level_dict[word])))
|
def generate_meta_for_not_graph(task, output_path='emb-meta.tsv'):
tokenizer = EHRTokenizer(data_dir='../data')
voc = (tokenizer.dx_voc if (task == 0) else tokenizer.rx_voc)
with open(os.path.join(output_dir, (('dx-' if (task == 0) else 'rx-') + output_path)), 'w') as fout:
for (word, _) in voc.word2idx.items():
fout.write('{}\n'.format(word))
|
def multi_collate_fn(batch, samples_per_gpu=1):
'Puts each data field into a tensor/DataContainer with outer dimension\n batch size. This is mainly used in query_support dataloader. The main\n difference with the :func:`collate_fn` in mmcv is it can process\n list[list[DataContainer]].\n\n Extend default_collate to add support for\n :type:`~mmcv.parallel.DataContainer`. There are 3 cases.\n\n 1. cpu_only = True, e.g., meta data.\n 2. cpu_only = False, stack = True, e.g., images tensors.\n 3. cpu_only = False, stack = False, e.g., gt bboxes.\n\n Args:\n batch (list[list[:obj:`mmcv.parallel.DataContainer`]] |\n list[:obj:`mmcv.parallel.DataContainer`]): Data of\n single batch.\n samples_per_gpu (int): The number of samples of single GPU.\n '
if (not isinstance(batch, Sequence)):
raise TypeError(f'{batch.dtype} is not supported.')
if isinstance(batch[0], Sequence):
samples_per_gpu = (len(batch[0]) * samples_per_gpu)
batch = sum(batch, [])
if isinstance(batch[0], DataContainer):
stacked = []
if batch[0].cpu_only:
for i in range(0, len(batch), samples_per_gpu):
stacked.append([sample.data for sample in batch[i:(i + samples_per_gpu)]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
elif batch[0].stack:
for i in range(0, len(batch), samples_per_gpu):
assert isinstance(batch[i].data, torch.Tensor)
if (batch[i].pad_dims is not None):
ndim = batch[i].dim()
assert (ndim > batch[i].pad_dims)
max_shape = [0 for _ in range(batch[i].pad_dims)]
for dim in range(1, (batch[i].pad_dims + 1)):
max_shape[(dim - 1)] = batch[i].size((- dim))
for sample in batch[i:(i + samples_per_gpu)]:
for dim in range(0, (ndim - batch[i].pad_dims)):
assert (batch[i].size(dim) == sample.size(dim))
for dim in range(1, (batch[i].pad_dims + 1)):
max_shape[(dim - 1)] = max(max_shape[(dim - 1)], sample.size((- dim)))
padded_samples = []
for sample in batch[i:(i + samples_per_gpu)]:
pad = [0 for _ in range((batch[i].pad_dims * 2))]
for dim in range(1, (batch[i].pad_dims + 1)):
pad[((2 * dim) - 1)] = (max_shape[(dim - 1)] - sample.size((- dim)))
padded_samples.append(F.pad(sample.data, pad, value=sample.padding_value))
stacked.append(default_collate(padded_samples))
elif (batch[i].pad_dims is None):
stacked.append(default_collate([sample.data for sample in batch[i:(i + samples_per_gpu)]]))
else:
raise ValueError('pad_dims should be either None or integers (1-3)')
else:
for i in range(0, len(batch), samples_per_gpu):
stacked.append([sample.data for sample in batch[i:(i + samples_per_gpu)]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
elif isinstance(batch[0], Sequence):
transposed = zip(*batch)
return [collate(samples, samples_per_gpu) for samples in transposed]
elif isinstance(batch[0], Mapping):
return {key: collate([d[key] for d in batch], samples_per_gpu) for key in batch[0]}
else:
return default_collate(batch)
|
def build_point_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, **kwargs):
'Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n '
(rank, world_size) = get_dist_info()
if dist:
if shuffle:
sampler = DistributedGroupSampler(dataset, samples_per_gpu, world_size, rank, seed=seed)
else:
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False, seed=seed)
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = (GroupSampler(dataset, samples_per_gpu) if shuffle else None)
batch_size = (num_gpus * samples_per_gpu)
num_workers = (num_gpus * workers_per_gpu)
init_fn = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None)
data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(multi_collate_fn, samples_per_gpu=samples_per_gpu), pin_memory=False, worker_init_fn=init_fn, **kwargs)
return data_loader
|
class PointGenerator(object):
def __init__(self, ann_file):
self.ann_file = ann_file
self.coco = COCO(ann_file)
self.seed = 0
def generate_points(self):
save_json = dict()
save_json['images'] = self.coco.dataset['images']
save_json['annotations'] = []
annotations = self.coco.dataset['annotations']
save_json['categories'] = self.coco.dataset['categories']
id_info = dict()
for img_info in self.coco.dataset['images']:
id_info[img_info['id']] = img_info
prog_bar = mmcv.ProgressBar(len(annotations))
with local_numpy_seed(self.seed):
for ann in annotations:
prog_bar.update()
img_info = id_info[ann['image_id']]
segm = ann.get('segmentation', None)
if isinstance(segm, list):
rles = maskUtils.frPyObjects(segm, img_info['height'], img_info['width'])
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
rle = maskUtils.frPyObjects(segm, img_info['height'], img_info['width'])
else:
rle = segm
mask = maskUtils.decode(rle)
if (mask.sum() > 0):
(ys, xs) = np.nonzero(mask)
point_idx = np.random.randint(len(xs))
x1 = int(xs[point_idx])
y1 = int(ys[point_idx])
ann['point'] = [x1, y1, x1, y1]
else:
(x1, y1, w, h) = ann['bbox']
x1 = np.random.uniform(x1, (x1 + w))
y1 = np.random.uniform(y1, (y1 + h))
ann['point'] = [x1, y1, x1, y1]
save_json['annotations'].append(ann)
mmcv.mkdir_or_exist('./point_ann/')
ann_name = self.ann_file.split('/')[(- 1)]
with open(f'./point_ann/{ann_name}', 'w') as f:
json.dump(save_json, f)
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--ceph', action='store_true', help='whether not to evaluate the checkpoint during training')
parser.add_argument('--vis', action='store_true', help='whether not to evaluate the checkpoint during training')
parser.add_argument('--work-dir', help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument('--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed')
parser.add_argument('--format-only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--show-dir', help='directory where painted images will be saved')
parser.add_argument('--show-score-thr', type=float, default=0.3, help='score threshold (default: 0.3)')
parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results.')
parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu-collect is not specified')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function (deprecate), change to --eval-options instead.')
parser.add_argument('--eval-options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.eval_options):
raise ValueError('--options and --eval-options cannot be both specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
|
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"'
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
for (k, v) in args.cfg_options.items():
args.cfg_options[k] = eval(v)
cfg.merge_from_dict(args.cfg_options)
if args.vis:
cfg.model.vis = True
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if (samples_per_gpu > 1):
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max([ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if (samples_per_gpu > 1):
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
(rank, _) = get_dist_info()
if ((args.work_dir is not None) and (rank == 0)):
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')
dataset = build_dataset(cfg.data.test)
data_loader = build_point_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = pointdet_single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = pointdet_multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
(rank, _) = get_dist_info()
if (rank == 0):
if args.out:
print(f'''
writing results to {args.out}''')
mmcv.dump(outputs, args.out)
kwargs = ({} if (args.eval_options is None) else args.eval_options)
if args.format_only:
outputs = [item[2] for item in outputs]
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule']:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if ((args.work_dir is not None) and (rank == 0)):
mmcv.dump(metric_dict, json_file)
|
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
logger = get_root_logger(log_level=cfg.log_level)
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
if ('imgs_per_gpu' in cfg.data):
logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. Please use "samples_per_gpu" instead')
if ('samples_per_gpu' in cfg.data):
logger.warning(f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and "samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"={cfg.data.imgs_per_gpu} is used in this experiments')
else:
logger.warning(f'Automatically set "samples_per_gpu"="imgs_per_gpu"={cfg.data.imgs_per_gpu} in this experiments')
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
data_loaders = [build_point_dataloader(ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, len(cfg.gpu_ids), dist=distributed, seed=cfg.seed) for ds in dataset]
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
optimizer = build_optimizer(model, cfg.optimizer)
if ('runner' not in cfg):
cfg.runner = {'type': 'EpochBasedRunner', 'max_epochs': cfg.total_epochs}
warnings.warn('config is now expected to have a `runner` section, please set `runner` in your config.', UserWarning)
elif ('total_epochs' in cfg):
assert (cfg.total_epochs == cfg.runner.max_epochs)
runner = build_runner(cfg.runner, default_args=dict(model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta))
runner.timestamp = timestamp
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif (distributed and ('type' not in cfg.optimizer_config)):
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None))
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
if validate:
val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)
if (val_samples_per_gpu > 1):
cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(val_dataset, samples_per_gpu=val_samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = (cfg.runner['type'] != 'IterBasedRunner')
eval_hook = (PointdetDistEvalHook if distributed else PointdetEvalHook)
runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW')
if cfg.get('custom_hooks', None):
custom_hooks = cfg.custom_hooks
assert isinstance(custom_hooks, list), f'custom_hooks expect list type, but got {type(custom_hooks)}'
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), f'Each item in custom_hooks expects dict type, but got {type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--port', type=int, default=20001, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file (deprecate), change to --cfg-options instead.')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.cfg_options):
raise ValueError('--options and --cfg-options cannot be both specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
|
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
for (k, v) in args.cfg_options.items():
args.cfg_options[k] = eval(v)
cfg.merge_from_dict(args.cfg_options)
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
elif (cfg.get('work_dir', None) is None):
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
if (args.gpu_ids is not None):
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = (range(1) if (args.gpus is None) else range(args.gpus))
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
(_, world_size) = get_dist_info()
cfg.gpu_ids = range(world_size)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
logger.info(f'Distributed training: {distributed}')
logger.info(f'''Config:
{cfg.pretty_text}''')
if (args.seed is not None):
logger.info(f'Set random seed to {args.seed}, deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if (len(cfg.workflow) == 2):
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmdet_version=(__version__ + get_git_hash()[:7]), CLASSES=datasets[0].CLASSES)
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
|
class BMAML():
def __init__(self, dim_input, dim_output, dim_hidden=32, num_layers=4, num_particles=2, max_test_step=5):
self.dim_input = dim_input
self.dim_output = dim_output
self.dim_hidden = dim_hidden
self.num_layers = num_layers
self.num_particles = num_particles
self.follow_lr = tf.placeholder_with_default(input=FLAGS.follow_lr, name='follow_lr', shape=[])
self.leader_lr = tf.placeholder_with_default(input=FLAGS.leader_lr, name='leader_lr', shape=[])
self.meta_lr = tf.placeholder_with_default(input=FLAGS.meta_lr, name='meta_lr', shape=[])
self.max_test_step = max_test_step
self.bnn = BNN(dim_input=self.dim_input, dim_output=self.dim_output, dim_hidden=self.dim_hidden, num_layers=self.num_layers, is_bnn=True)
self.construct_network_weights = self.bnn.construct_network_weights
self.forward_network = self.bnn.forward_network
self.follow_x = tf.placeholder(dtype=tf.float32, name='follow_x')
self.follow_y = tf.placeholder(dtype=tf.float32, name='follow_y')
self.leader_x = tf.placeholder(dtype=tf.float32, name='leader_x')
self.leader_y = tf.placeholder(dtype=tf.float32, name='leader_y')
self.valid_x = tf.placeholder(dtype=tf.float32, name='valid_x')
self.valid_y = tf.placeholder(dtype=tf.float32, name='valid_y')
self.W_network_particles = None
def construct_model(self, is_training=True):
print('start model construction')
with tf.variable_scope('model', reuse=None) as training_scope:
if (is_training or (self.W_network_particles is None)):
self.W_network_particles = [self.construct_network_weights(scope='network{}'.format(p_idx)) for p_idx in range(self.num_particles)]
else:
training_scope.reuse_variables()
if is_training:
max_follow_step = FLAGS.follow_step
else:
max_follow_step = max(FLAGS.follow_step, self.max_test_step)
def fast_learn_one_task(inputs):
[follow_x, leader_x, valid_x, follow_y, leader_y, valid_y] = inputs
WW_follow = [OrderedDict(zip(W_dic.keys(), W_dic.values())) for W_dic in self.W_network_particles]
[step_follow_weight_var, step_follow_data_var, step_follow_train_llik, step_follow_valid_llik, step_follow_train_loss, step_follow_valid_loss, step_follow_train_pred, step_follow_valid_pred, step_follow_weight_lprior, step_follow_gamma_lprior, step_follow_lambda_lprior, step_follow_lpost, step_follow_kernel_h, WW_follow] = self.update_particle(train_x=follow_x, train_y=follow_y, valid_x=valid_x, valid_y=valid_y, WW=WW_follow, num_updates=max_follow_step, lr=self.follow_lr)
WW_leader = [OrderedDict(zip(W_dic.keys(), W_dic.values())) for W_dic in WW_follow]
[step_leader_weight_var, step_leader_data_var, step_leader_train_llik, step_leader_valid_llik, step_leader_train_loss, step_leader_valid_loss, step_leader_train_pred, step_leader_valid_pred, step_leader_weight_lprior, step_leader_gamma_lprior, step_leader_lambda_lprior, step_leader_lpost, step_leader_kernel_h, WW_leader] = self.update_particle(train_x=leader_x, train_y=leader_y, valid_x=valid_x, valid_y=valid_y, WW=WW_leader, num_updates=FLAGS.leader_step, lr=self.leader_lr)
meta_loss = []
for p_idx in range(self.num_particles):
p_dist_list = []
for name in WW_leader[p_idx].keys():
if ('log' in name):
continue
p_dist = tf.square((WW_follow[p_idx][name] - tf.stop_gradient(WW_leader[p_idx][name])))
p_dist = tf.reduce_sum(p_dist)
p_dist_list.append(p_dist)
meta_loss.append(tf.reduce_sum(p_dist_list))
meta_loss = tf.reduce_sum(meta_loss)
return [step_follow_weight_lprior, step_follow_gamma_lprior, step_follow_lambda_lprior, step_follow_train_llik, step_follow_valid_llik, step_follow_train_loss, step_follow_valid_loss, step_follow_train_pred, step_follow_valid_pred, step_follow_weight_var, step_follow_data_var, step_follow_lpost, step_follow_kernel_h, step_leader_weight_lprior, step_leader_gamma_lprior, step_leader_lambda_lprior, step_leader_train_llik, step_leader_valid_llik, step_leader_train_loss, step_leader_valid_loss, step_leader_train_pred, step_leader_valid_pred, step_leader_weight_var, step_leader_data_var, step_leader_lpost, step_leader_kernel_h, meta_loss]
out_dtype = [([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * (max_follow_step + 1)), ([tf.float32] * max_follow_step), ([tf.float32] * max_follow_step), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * (FLAGS.leader_step + 1)), ([tf.float32] * FLAGS.leader_step), ([tf.float32] * FLAGS.leader_step), tf.float32]
result = tf.map_fn(fast_learn_one_task, elems=[self.follow_x, self.leader_x, self.valid_x, self.follow_y, self.leader_y, self.valid_y], dtype=out_dtype, parallel_iterations=FLAGS.num_tasks)
[full_step_follow_weight_lprior, full_step_follow_gamma_lprior, full_step_follow_lambda_lprior, full_step_follow_train_llik, full_step_follow_valid_llik, full_step_follow_train_loss, full_step_follow_valid_loss, full_step_follow_train_pred, full_step_follow_valid_pred, full_step_follow_weight_var, full_step_follow_data_var, full_step_follow_lpost, full_step_follow_kernel_h, full_step_leader_weight_lprior, full_step_leader_gamma_lprior, full_step_leader_lambda_lprior, full_step_leader_train_llik, full_step_leader_valid_llik, full_step_leader_train_loss, full_step_leader_valid_loss, full_step_leader_train_pred, full_step_leader_valid_pred, full_step_leader_weight_var, full_step_leader_data_var, full_step_leader_lpost, full_step_leader_kernel_h, full_meta_loss] = result
if is_training:
self.total_follow_weight_lprior = [tf.reduce_mean(full_step_follow_weight_lprior[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_gamma_lprior = [tf.reduce_mean(full_step_follow_gamma_lprior[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_lambda_lprior = [tf.reduce_mean(full_step_follow_lambda_lprior[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_train_llik = [tf.reduce_mean(full_step_follow_train_llik[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_valid_llik = [tf.reduce_mean(full_step_follow_valid_llik[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_train_loss = [tf.reduce_mean(full_step_follow_train_loss[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_valid_loss = [tf.reduce_mean(full_step_follow_valid_loss[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_weight_var = [tf.reduce_mean(full_step_follow_weight_var[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_data_var = [tf.reduce_mean(full_step_follow_data_var[j]) for j in range((FLAGS.follow_step + 1))]
self.total_follow_lpost = [tf.reduce_mean(full_step_follow_lpost[j]) for j in range(FLAGS.follow_step)]
self.total_follow_kernel_h = [tf.reduce_mean(full_step_follow_kernel_h[j]) for j in range(FLAGS.follow_step)]
self.total_leader_weight_lprior = [tf.reduce_mean(full_step_leader_weight_lprior[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_gamma_lprior = [tf.reduce_mean(full_step_leader_gamma_lprior[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_lambda_lprior = [tf.reduce_mean(full_step_leader_lambda_lprior[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_train_llik = [tf.reduce_mean(full_step_leader_train_llik[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_valid_llik = [tf.reduce_mean(full_step_leader_valid_llik[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_train_loss = [tf.reduce_mean(full_step_leader_train_loss[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_valid_loss = [tf.reduce_mean(full_step_leader_valid_loss[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_weight_var = [tf.reduce_mean(full_step_leader_weight_var[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_data_var = [tf.reduce_mean(full_step_leader_data_var[j]) for j in range((FLAGS.leader_step + 1))]
self.total_leader_lpost = [tf.reduce_mean(full_step_leader_lpost[j]) for j in range(FLAGS.leader_step)]
self.total_leader_kernel_h = [tf.reduce_mean(full_step_leader_kernel_h[j]) for j in range(FLAGS.leader_step)]
self.total_meta_loss = tf.reduce_mean(full_meta_loss)
self.total_train_z_list = full_step_follow_train_pred
self.total_valid_z_list = full_step_follow_valid_pred
update_params_list = []
update_params_name = []
for p_idx in range(self.num_particles):
for name in self.W_network_particles[0].keys():
update_params_name.append([p_idx, name])
update_params_list.append(self.W_network_particles[p_idx][name])
optimizer = tf.train.AdamOptimizer(learning_rate=self.meta_lr)
gv_list = optimizer.compute_gradients(loss=self.total_meta_loss, var_list=update_params_list)
if (FLAGS.out_grad_clip > 0):
gv_list = [(tf.clip_by_value(grad, (- FLAGS.out_grad_clip), FLAGS.out_grad_clip), var) for (grad, var) in gv_list]
self.metatrain_op = optimizer.apply_gradients(gv_list)
else:
self.eval_train_llik = [tf.reduce_mean(full_step_follow_train_llik[j]) for j in range((max_follow_step + 1))]
self.eval_train_loss = [tf.reduce_mean(full_step_follow_train_loss[j]) for j in range((max_follow_step + 1))]
self.eval_valid_llik = [tf.reduce_mean(full_step_follow_valid_llik[j]) for j in range((max_follow_step + 1))]
self.eval_valid_loss = [tf.reduce_mean(full_step_follow_valid_loss[j]) for j in range((max_follow_step + 1))]
self.eval_train_z_list = full_step_follow_train_pred
self.eval_valid_z_list = full_step_follow_valid_pred
print('end of model construction')
def kernel(self, particle_tensor, h=(- 1)):
euclidean_dists = tf_utils.pdist(particle_tensor)
pairwise_dists = (tf_utils.squareform(euclidean_dists) ** 2)
if (h == (- 1)):
if (FLAGS.kernel == 'org'):
mean_dist = tf_utils.median(pairwise_dists)
h = (mean_dist / math.log(self.num_particles))
h = tf.stop_gradient(h)
elif (FLAGS.kernel == 'med'):
mean_dist = (tf_utils.median(euclidean_dists) ** 2)
h = (mean_dist / math.log(self.num_particles))
h = tf.stop_gradient(h)
else:
mean_dist = (tf.reduce_mean(euclidean_dists) ** 2)
h = (mean_dist / math.log(self.num_particles))
kernel_matrix = tf.exp(((- pairwise_dists) / h))
kernel_sum = tf.reduce_sum(kernel_matrix, axis=1, keep_dims=True)
grad_kernel = (- tf.matmul(kernel_matrix, particle_tensor))
grad_kernel += (particle_tensor * kernel_sum)
grad_kernel /= h
return (kernel_matrix, grad_kernel, h)
def diclist2tensor(self, WW):
list_m = []
for Wm_dic in WW:
W_vec = tf.concat([tf.reshape(ww, [(- 1)]) for ww in Wm_dic.values()], axis=0)
list_m.append(W_vec)
tensor = tf.stack(list_m)
return tensor
def tensor2diclist(self, tensor):
return [self.bnn.vec2dic(tensor[m]) for m in range(self.num_particles)]
def update_particle(self, train_x, train_y, valid_x, valid_y, WW, num_updates, lr):
step_weight_lprior = ([None] * (num_updates + 1))
step_lambda_lprior = ([None] * (num_updates + 1))
step_gamma_lprior = ([None] * (num_updates + 1))
step_train_llik = ([None] * (num_updates + 1))
step_valid_llik = ([None] * (num_updates + 1))
step_train_loss = ([None] * (num_updates + 1))
step_valid_loss = ([None] * (num_updates + 1))
step_train_pred = ([None] * (num_updates + 1))
step_valid_pred = ([None] * (num_updates + 1))
step_weight_var = ([None] * (num_updates + 1))
step_data_var = ([None] * (num_updates + 1))
step_kernel_h = ([None] * num_updates)
step_lpost = ([None] * num_updates)
for s_idx in range((num_updates + 1)):
train_z_list = []
valid_z_list = []
train_llik_list = []
valid_llik_list = []
weight_lprior_list = []
lambda_lprior_list = []
gamma_lprior_list = []
weight_var_list = []
data_var_list = []
for p_idx in range(self.num_particles):
train_z = self.forward_network(x=train_x, W_dict=WW[p_idx])
valid_z = self.forward_network(x=valid_x, W_dict=WW[p_idx])
train_llik_list.append(self.bnn.log_likelihood_data(predict_y=train_z, target_y=train_y, log_gamma=WW[p_idx]['log_gamma']))
valid_llik_list.append(self.bnn.log_likelihood_data(predict_y=valid_z, target_y=valid_y, log_gamma=WW[p_idx]['log_gamma']))
train_z_list.append(train_z)
valid_z_list.append(valid_z)
(weight_lprior, gamma_lprior, lambda_lprior) = self.bnn.log_prior_weight(W_dict=WW[p_idx])
weight_lprior_list.append(weight_lprior)
lambda_lprior_list.append(lambda_lprior)
gamma_lprior_list.append(gamma_lprior)
weight_var_list.append(tf.reciprocal(tf.exp(WW[p_idx]['log_lambda'])))
data_var_list.append(tf.reciprocal(tf.exp(WW[p_idx]['log_gamma'])))
if (s_idx < num_updates):
WW_tensor = self.diclist2tensor(WW=WW)
dWW = []
for p_idx in range(self.num_particles):
lpost = weight_lprior_list[p_idx]
lpost += lambda_lprior_list[p_idx]
lpost += gamma_lprior_list[p_idx]
lpost += tf.reduce_sum(train_llik_list[p_idx])
dWp = tf.gradients(ys=lpost, xs=list(WW[p_idx].values()))
if (p_idx == 0):
step_lpost[s_idx] = []
step_lpost[s_idx].append(lpost)
if FLAGS.stop_grad:
dWp = [tf.stop_gradient(grad) for grad in dWp]
dWW.append(OrderedDict(zip(WW[p_idx].keys(), dWp)))
step_lpost[s_idx] = tf.reduce_mean(step_lpost[s_idx])
dWW_tensor = self.diclist2tensor(WW=dWW)
[kernel_mat, grad_kernel, kernel_h] = self.kernel(particle_tensor=WW_tensor)
dWW_tensor = tf.divide((tf.matmul(kernel_mat, dWW_tensor) + grad_kernel), self.num_particles)
step_kernel_h[s_idx] = kernel_h
dWW = self.tensor2diclist(tensor=dWW_tensor)
for p_idx in range(self.num_particles):
param_names = []
param_vals = []
for key in list(WW[p_idx].keys()):
if (FLAGS.in_grad_clip > 0):
grad = tf.clip_by_value(dWW[p_idx][key], (- FLAGS.in_grad_clip), FLAGS.in_grad_clip)
else:
grad = dWW[p_idx][key]
param_names.append(key)
if ('log' in key):
param_vals.append((WW[p_idx][key] + ((FLAGS.lambda_lr * lr) * grad)))
else:
param_vals.append((WW[p_idx][key] + (lr * grad)))
WW[p_idx] = OrderedDict(zip(param_names, param_vals))
train_z = tf.reduce_mean(train_z_list, 0)
valid_z = tf.reduce_mean(valid_z_list, 0)
step_weight_lprior[s_idx] = tf.reduce_mean(weight_lprior_list)
step_gamma_lprior[s_idx] = tf.reduce_mean(gamma_lprior_list)
step_lambda_lprior[s_idx] = tf.reduce_mean(lambda_lprior_list)
step_train_llik[s_idx] = tf.reduce_mean([tf.reduce_mean(train_llik) for train_llik in train_llik_list])
step_valid_llik[s_idx] = tf.reduce_mean([tf.reduce_mean(valid_llik) for valid_llik in valid_llik_list])
step_train_loss[s_idx] = tf.reduce_mean(tf.square((train_z - train_y)))
step_valid_loss[s_idx] = tf.reduce_mean(tf.square((valid_z - valid_y)))
step_train_pred[s_idx] = tf.concat([tf.expand_dims(train_z, 0) for train_z in train_z_list], axis=0)
step_valid_pred[s_idx] = tf.concat([tf.expand_dims(valid_z, 0) for valid_z in valid_z_list], axis=0)
step_weight_var[s_idx] = tf.reduce_mean(weight_var_list)
step_data_var[s_idx] = tf.reduce_mean(data_var_list)
return [step_weight_var, step_data_var, step_train_llik, step_valid_llik, step_train_loss, step_valid_loss, step_train_pred, step_valid_pred, step_weight_lprior, step_gamma_lprior, step_lambda_lprior, step_lpost, step_kernel_h, WW]
|
def train(model, dataset, saver, sess, config_str):
experiment_dir = ((FLAGS.logdir + '/') + config_str)
train_writer = tf.summary.FileWriter(experiment_dir, sess.graph)
print('Done initializing, starting training.')
num_iters_per_epoch = int((FLAGS.train_total_num_tasks / FLAGS.num_tasks))
if (not FLAGS.finite):
num_iters_per_epoch = 1
follow_lpost = []
follow_weight_lprior = []
follow_gamma_lprior = []
follow_lambda_lprior = []
follow_train_llik = []
follow_valid_llik = []
follow_train_loss = []
follow_valid_loss = []
follow_weight_var = []
follow_data_var = []
follow_kernel_h = []
leader_lpost = []
leader_weight_lprior = []
leader_gamma_lprior = []
leader_lambda_lprior = []
leader_train_llik = []
leader_valid_llik = []
leader_train_loss = []
leader_valid_loss = []
leader_weight_var = []
leader_data_var = []
leader_kernel_h = []
meta_loss = []
test_itr_list = []
test_train_loss_list = []
test_valid_loss_list = []
best_test_loss = 1000.0
best_test_iter = 0
itr = 0
for e_idx in range(FLAGS.num_epochs):
for b_idx in range(num_iters_per_epoch):
itr += 1
[follow_x, leader_x, valid_x, follow_y, leader_y, valid_y] = dataset.generate_batch(is_training=True, batch_idx=None, inc_follow=True)
meta_lr = (FLAGS.meta_lr * (FLAGS.decay_lr ** (float((itr - 1)) / float(((FLAGS.num_epochs * num_iters_per_epoch) / 100)))))
feed_in = OrderedDict()
feed_in[model.meta_lr] = meta_lr
feed_in[model.follow_x] = follow_x
feed_in[model.follow_y] = follow_y
feed_in[model.leader_x] = leader_x
feed_in[model.leader_y] = leader_y
feed_in[model.valid_x] = valid_x
feed_in[model.valid_y] = valid_y
fetch_out = [model.metatrain_op, model.total_follow_lpost, model.total_follow_weight_lprior, model.total_follow_gamma_lprior, model.total_follow_lambda_lprior, model.total_follow_train_llik, model.total_follow_valid_llik, model.total_follow_train_loss, model.total_follow_valid_loss, model.total_follow_weight_var, model.total_follow_data_var, model.total_follow_kernel_h, model.total_leader_lpost, model.total_leader_weight_lprior, model.total_leader_gamma_lprior, model.total_leader_lambda_lprior, model.total_leader_train_llik, model.total_leader_valid_llik, model.total_leader_train_loss, model.total_leader_valid_loss, model.total_leader_weight_var, model.total_leader_data_var, model.total_leader_kernel_h, model.total_meta_loss]
result = sess.run(fetch_out, feed_in)[1:]
follow_lpost.append(result[0])
follow_weight_lprior.append(result[1])
follow_gamma_lprior.append(result[2])
follow_lambda_lprior.append(result[3])
follow_train_llik.append(result[4])
follow_valid_llik.append(result[5])
follow_train_loss.append(result[6])
follow_valid_loss.append(result[7])
follow_weight_var.append(result[8])
follow_data_var.append(result[9])
follow_kernel_h.append(result[10])
leader_lpost.append(result[11])
leader_weight_lprior.append(result[12])
leader_gamma_lprior.append(result[13])
leader_lambda_lprior.append(result[14])
leader_train_llik.append(result[15])
leader_valid_llik.append(result[16])
leader_train_loss.append(result[17])
leader_valid_loss.append(result[18])
leader_weight_var.append(result[19])
leader_data_var.append(result[20])
leader_kernel_h.append(result[21])
meta_loss.append(result[22])
if ((itr % PRINT_INTERVAL) == 0):
follow_lpost = np.stack(follow_lpost).mean(axis=0)
follow_weight_lprior = np.stack(follow_weight_lprior).mean(axis=0)
follow_gamma_lprior = np.stack(follow_gamma_lprior).mean(axis=0)
follow_lambda_lprior = np.stack(follow_lambda_lprior).mean(axis=0)
follow_train_llik = np.stack(follow_train_llik).mean(axis=0)
follow_valid_llik = np.stack(follow_valid_llik).mean(axis=0)
follow_train_loss = np.stack(follow_train_loss).mean(axis=0)
follow_valid_loss = np.stack(follow_valid_loss).mean(axis=0)
follow_weight_var = np.stack(follow_weight_var).mean(axis=0)
follow_data_var = np.stack(follow_data_var).mean(axis=0)
follow_kernel_h = np.stack(follow_kernel_h).mean(axis=0)
leader_lpost = np.stack(leader_lpost).mean(axis=0)
leader_weight_lprior = np.stack(leader_weight_lprior).mean(axis=0)
leader_gamma_lprior = np.stack(leader_gamma_lprior).mean(axis=0)
leader_lambda_lprior = np.stack(leader_lambda_lprior).mean(axis=0)
leader_train_llik = np.stack(leader_train_llik).mean(axis=0)
leader_valid_llik = np.stack(leader_valid_llik).mean(axis=0)
leader_train_loss = np.stack(leader_train_loss).mean(axis=0)
leader_valid_loss = np.stack(leader_valid_loss).mean(axis=0)
leader_weight_var = np.stack(leader_weight_var).mean(axis=0)
leader_data_var = np.stack(leader_data_var).mean(axis=0)
leader_kernel_h = np.stack(leader_kernel_h).mean(axis=0)
meta_loss = np.stack(meta_loss).mean(axis=0)
print('======================================')
print('exp: ', config_str)
print('epoch: ', e_idx, ' total iter: ', itr)
print('--------------------------------------')
print('follower')
print('--------------------------------------')
print('log-posterior: ', follow_lpost)
print('weight-log-prior: ', follow_weight_lprior)
print('gamma-log-prior: ', follow_gamma_lprior)
print('lambda-log-prior: ', follow_lambda_lprior)
print('train_llik: ', follow_train_llik)
print('valid_llik: ', follow_valid_llik)
print('train_loss: ', follow_train_loss)
print('valid_loss: ', follow_valid_loss)
print('- - - - - - - - - - - - - - - - - - - ')
print('data var: ', follow_data_var)
print('weight var: ', follow_weight_var)
print('kernel_h: ', follow_kernel_h)
print('--------------------------------------')
print('leader')
print('--------------------------------------')
print('log-posterior: ', leader_lpost)
print('weight-log-prior: ', leader_weight_lprior)
print('gamma-log-prior: ', leader_gamma_lprior)
print('lambda-log-prior: ', leader_lambda_lprior)
print('train_llik: ', leader_train_llik)
print('valid_llik: ', leader_valid_llik)
print('train_loss: ', leader_train_loss)
print('valid_loss: ', leader_valid_loss)
print('- - - - - - - - - - - - - - - - - - - ')
print('data var: ', leader_data_var)
print('weight var: ', leader_weight_var)
print('kernel_h: ', leader_kernel_h)
print('--------------------------------------')
print('meta_loss: ', meta_loss)
print('meta_lr: ', meta_lr)
print('--------------------------------------')
print('best_test_loss: ', best_test_loss, '({})'.format(best_test_iter))
follow_lpost = []
follow_weight_lprior = []
follow_gamma_lprior = []
follow_lambda_lprior = []
follow_train_llik = []
follow_valid_llik = []
follow_train_loss = []
follow_valid_loss = []
follow_weight_var = []
follow_data_var = []
follow_kernel_h = []
leader_lpost = []
leader_weight_lprior = []
leader_gamma_lprior = []
leader_lambda_lprior = []
leader_train_llik = []
leader_valid_llik = []
leader_train_loss = []
leader_valid_loss = []
leader_weight_var = []
leader_data_var = []
leader_kernel_h = []
meta_loss = []
if ((itr % TEST_PRINT_INTERVAL) == 0):
eval_train_llik_list = []
eval_valid_llik_list = []
eval_train_loss_list = []
eval_valid_loss_list = []
fetch_out = [model.eval_train_llik[:(FLAGS.follow_step + 1)], model.eval_valid_llik[:(FLAGS.follow_step + 1)], model.eval_train_loss[:(FLAGS.follow_step + 1)], model.eval_valid_loss[:(FLAGS.follow_step + 1)]]
for i in range(int((FLAGS.test_total_num_tasks / FLAGS.num_tasks))):
[follow_x, _, valid_x, follow_y, _, valid_y] = dataset.generate_batch(is_training=False, batch_idx=(i * FLAGS.num_tasks), inc_follow=True)
feed_in = OrderedDict()
feed_in[model.follow_x] = follow_x
feed_in[model.follow_y] = follow_y
feed_in[model.valid_x] = valid_x
feed_in[model.valid_y] = valid_y
result = sess.run(fetch_out, feed_in)
eval_train_llik_list.append(result[0])
eval_valid_llik_list.append(result[1])
eval_train_loss_list.append(result[2])
eval_valid_loss_list.append(result[3])
eval_train_llik = np.stack(eval_train_llik_list).mean(axis=0)
eval_valid_llik = np.stack(eval_valid_llik_list).mean(axis=0)
eval_train_loss = np.stack(eval_train_loss_list).mean(axis=0)
eval_valid_loss = np.stack(eval_valid_loss_list).mean(axis=0)
print('======================================')
print('exp: ', config_str)
print('epoch: ', e_idx, ' total iter: ', itr)
print('--------------------------------------')
print('Eval')
print('--------------------------------------')
print('train_llik: ', eval_train_llik)
print('valid_llik: ', eval_valid_llik)
print('train_loss: ', eval_train_loss)
print('valid_loss: ', eval_valid_loss)
test_itr_list.append(itr)
test_train_loss_list.append(eval_train_loss[(- 1)])
test_valid_loss_list.append(eval_valid_loss[(- 1)])
pkl.dump([test_itr_list, test_train_loss_list, test_valid_loss_list], open(((experiment_dir + '/') + 'results.pkl'), 'wb'))
plt.title('valid loss during training')
plt.plot(test_itr_list, test_valid_loss_list, '-', label='test loss')
plt.savefig(((experiment_dir + '/') + 'test_loss.png'))
plt.close()
if (best_test_loss > test_valid_loss_list[(- 1)]):
best_test_loss = test_valid_loss_list[(- 1)]
best_test_iter = itr
if (itr > 10000):
saver.save(sess, ((experiment_dir + '/') + 'best_model'))
|
def test(model, dataset, sess, inner_lr):
eval_valid_loss_list = []
for i in range(int((FLAGS.test_total_num_tasks / FLAGS.num_tasks))):
[follow_x, _, valid_x, follow_y, _, valid_y] = dataset.generate_batch(is_training=False, batch_idx=(i * FLAGS.num_tasks), inc_follow=True)
feed_in = OrderedDict()
feed_in[model.follow_lr] = inner_lr
feed_in[model.follow_x] = follow_x
feed_in[model.follow_y] = follow_y
feed_in[model.valid_x] = valid_x
feed_in[model.valid_y] = valid_y
eval_valid_loss_list.append(sess.run(model.eval_valid_loss, feed_in))
eval_valid_loss_list = np.array(eval_valid_loss_list)
eval_valid_loss_mean = np.mean(eval_valid_loss_list, axis=0)
return eval_valid_loss_mean
|
def main():
random.seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
tf.set_random_seed(FLAGS.seed)
if (not os.path.exists(FLAGS.logdir)):
os.makedirs(FLAGS.logdir)
fname_args = []
if FLAGS.finite:
fname_args += [('train_total_num_tasks', 'SinusoidFinite')]
fname_args += [('test_total_num_tasks', 'Test')]
else:
fname_args += [('test_total_num_tasks', 'SinusoidInfiniteTest')]
fname_args += [('num_epochs', 'Epoch'), ('num_tasks', 'T'), ('seed', 'SEED'), ('noise_factor', 'Noise'), ('num_particles', 'M'), ('dim_hidden', 'H'), ('num_layers', 'L'), ('phase', 'PHS'), ('freq', 'FRQ'), ('few_k_shot', 'TrainK'), ('val_k_shot', 'ValidK'), ('in_grad_clip', 'InGrad'), ('out_grad_clip', 'OutGrad'), ('follow_step', 'FStep'), ('leader_step', 'LStep'), ('follow_lr', 'FLr'), ('leader_lr', 'LLr'), ('meta_lr', 'MetaLr'), ('decay_lr', 'DecLr'), ('lambda_lr', 'LmdLr'), ('kernel', 'Kernel'), ('a_g', 'AG'), ('b_g', 'BG'), ('a_l', 'AL'), ('b_l', 'BL')]
config_str = utils.experiment_string2(FLAGS.flag_values_dict(), fname_args, separator='_')
config_str = ((str(time.mktime(datetime.now().timetuple()))[:(- 2)] + '_BMAML_CHASE') + config_str)
print(config_str)
dataset = SinusoidGenerator()
dim_output = dataset.dim_output
dim_input = dataset.dim_input
model = BMAML(dim_input=dim_input, dim_output=dim_output, dim_hidden=FLAGS.dim_hidden, num_layers=FLAGS.num_layers, num_particles=FLAGS.num_particles, max_test_step=10)
model.construct_model(is_training=True)
model.construct_model(is_training=False)
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES), max_to_keep=1)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
if FLAGS.train:
train(model, dataset, saver, sess, config_str)
|
class BNN(object):
def __init__(self, dim_input, dim_output, dim_hidden, num_layers, is_bnn=True):
self.dim_input = dim_input
self.dim_output = dim_output
self.dim_hidden = dim_hidden
self.num_layers = num_layers
self.is_bnn = is_bnn
def construct_network_weights(self, scope='network'):
params = OrderedDict()
fc_initializer = tf.contrib.layers.xavier_initializer(dtype=tf.float32)
params['w1'] = tf.get_variable(name=(scope + '_w1'), shape=[self.dim_input, self.dim_hidden], initializer=fc_initializer)
params['b1'] = tf.Variable(name=(scope + '_b1'), initial_value=tf.random_normal([self.dim_hidden], 0.0, 0.01))
for l in range(self.num_layers):
if (l < (self.num_layers - 1)):
dim_output = self.dim_hidden
else:
dim_output = self.dim_output
params['w{}'.format((l + 2))] = tf.get_variable(name=(scope + '_w{}'.format((l + 2))), shape=[self.dim_hidden, dim_output], initializer=fc_initializer)
params['b{}'.format((l + 2))] = tf.Variable(name=(scope + '_b{}'.format((l + 2))), initial_value=tf.random_normal([dim_output], 0.0, 0.01))
if self.is_bnn:
init_val = np.random.normal((- np.log(FLAGS.m_l)), 0.001, [1])
params['log_lambda'] = tf.Variable(name=(scope + '_log_lambda'), initial_value=init_val, dtype=tf.float32)
print('log_lambda: ', init_val)
init_val = np.random.normal((- np.log(FLAGS.m_g)), 0.001, [1])
params['log_gamma'] = tf.Variable(name=(scope + '_log_gamma'), initial_value=init_val, dtype=tf.float32)
print('log_gamma: ', init_val)
return params
def log_likelihood_data(self, predict_y, target_y, log_gamma):
if (not self.is_bnn):
NotImplementedError()
error_y = (predict_y - target_y)
log_lik_data = ((0.5 * log_gamma) - ((0.5 * tf.exp(log_gamma)) * tf.square(error_y)))
return log_lik_data
def log_prior_weight(self, W_dict):
if (not self.is_bnn):
NotImplementedError()
W_vec = self.dicval2vec(W_dict)
log_lambda = tf.reshape(W_vec[(- 2)], (1,))
log_gamma = tf.reshape(W_vec[(- 1)], (1,))
W_vec = W_vec[:(- 2)]
num_params = tf.cast(W_vec.shape[0], tf.float32)
log_prior_gamma = ((((FLAGS.a_g - 1) * log_gamma) - (FLAGS.b_g * tf.exp(log_gamma))) + log_gamma)
W_diff = W_vec
log_prior_w = (((0.5 * num_params) * log_lambda) - ((0.5 * tf.exp(log_lambda)) * tf.reduce_sum((W_diff ** 2))))
log_prior_lambda = ((((FLAGS.a_l - 1) * log_lambda) - (FLAGS.b_l * tf.exp(log_lambda))) + log_lambda)
return (log_prior_w, log_prior_gamma, log_prior_lambda)
def mse_data(self, predict_y, target_y):
return tf.reduce_sum(tf.square((predict_y - target_y)), axis=1)
def forward_network(self, x, W_dict):
hid = tf.nn.relu((tf.matmul(x, W_dict['w1']) + W_dict['b1']))
for l in range(self.num_layers):
hid = (tf.matmul(hid, W_dict['w{}'.format((l + 2))]) + W_dict['b{}'.format((l + 2))])
if (l < (self.num_layers - 1)):
hid = tf.nn.relu(hid)
return hid
def list2vec(self, list_in):
return tf.concat([tf.reshape(ww, [(- 1)]) for ww in list_in], axis=0)
def vec2dic(self, W_vec):
if self.is_bnn:
log_lambda = tf.reshape(W_vec[(- 2)], (1,))
log_gamma = tf.reshape(W_vec[(- 1)], (1,))
W_vec = W_vec[:(- 2)]
W_dic = self.network_weight_vec2dict(W_vec)
W_dic['log_lambda'] = log_lambda
W_dic['log_gamma'] = log_gamma
else:
W_dic = self.network_weight_vec2dict(W_vec)
return W_dic
def network_weight_vec2dict(self, W_vec):
W_dic = OrderedDict()
dim_list = (([self.dim_input] + ([self.dim_hidden] * self.num_layers)) + [self.dim_output])
for l in range((len(dim_list) - 1)):
(dim_input, dim_output) = (dim_list[l], dim_list[(l + 1)])
W_dic['w{}'.format((l + 1))] = tf.reshape(W_vec[:(dim_input * dim_output)], [dim_input, dim_output])
W_dic['b{}'.format((l + 1))] = W_vec[(dim_input * dim_output):((dim_input * dim_output) + dim_output)]
if (l < (len(dim_list) - 2)):
W_vec = W_vec[((dim_input * dim_output) + dim_output):]
return W_dic
def dicval2vec(self, dic):
return tf.concat([tf.reshape(val, [(- 1)]) for val in dic.values()], axis=0)
|
class EMAML():
def __init__(self, dim_input, dim_output, dim_hidden=32, num_layers=4, num_particles=2, max_test_step=5):
self.dim_input = dim_input
self.dim_output = dim_output
self.dim_hidden = dim_hidden
self.num_layers = num_layers
self.num_particles = num_particles
self.in_lr = tf.placeholder_with_default(input=FLAGS.in_lr, name='in_lr', shape=[])
self.out_lr = tf.placeholder_with_default(input=FLAGS.out_lr, name='out_lr', shape=[])
self.max_test_step = max_test_step
self.bnn = BNN(dim_input=self.dim_input, dim_output=self.dim_output, dim_hidden=self.dim_hidden, num_layers=self.num_layers, is_bnn=False)
self.construct_network_weights = self.bnn.construct_network_weights
self.forward_network = self.bnn.forward_network
self.train_x = tf.placeholder(dtype=tf.float32, name='train_x')
self.train_y = tf.placeholder(dtype=tf.float32, name='train_y')
self.valid_x = tf.placeholder(dtype=tf.float32, name='valid_x')
self.valid_y = tf.placeholder(dtype=tf.float32, name='valid_y')
self.W_network_particles = None
def construct_model(self, is_training=True):
print('start model construction')
with tf.variable_scope('model', reuse=None) as training_scope:
if (is_training or (self.W_network_particles is None)):
self.W_network_particles = [self.construct_network_weights(scope='network{}'.format(p_idx)) for p_idx in range(self.num_particles)]
else:
training_scope.reuse_variables()
if is_training:
max_update_step = FLAGS.in_step
else:
max_update_step = max(FLAGS.in_step, self.max_test_step)
def fast_learn_one_task(inputs):
[train_x, valid_x, train_y, valid_y] = inputs
meta_loss = []
WW_update = [OrderedDict(zip(W_dic.keys(), W_dic.values())) for W_dic in self.W_network_particles]
step_train_loss = ([None] * (max_update_step + 1))
step_valid_loss = ([None] * (max_update_step + 1))
step_train_pred = ([None] * (max_update_step + 1))
step_valid_pred = ([None] * (max_update_step + 1))
for s_idx in range((max_update_step + 1)):
train_z_list = []
valid_z_list = []
train_mse_list = []
valid_mse_list = []
for p_idx in range(FLAGS.num_particles):
train_z_list.append(self.forward_network(x=train_x, W_dict=WW_update[p_idx]))
valid_z_list.append(self.forward_network(x=valid_x, W_dict=WW_update[p_idx]))
train_mse_list.append(self.bnn.mse_data(predict_y=train_z_list[(- 1)], target_y=train_y))
valid_mse_list.append(self.bnn.mse_data(predict_y=valid_z_list[(- 1)], target_y=valid_y))
if (s_idx < max_update_step):
particle_loss = tf.reduce_mean(train_mse_list[(- 1)])
dWp = tf.gradients(ys=particle_loss, xs=list(WW_update[p_idx].values()))
if FLAGS.stop_grad:
dWp = [tf.stop_gradient(grad) for grad in dWp]
dWp = OrderedDict(zip(WW_update[p_idx].keys(), dWp))
param_names = []
param_vals = []
for key in list(WW_update[p_idx].keys()):
if (FLAGS.in_grad_clip > 0):
grad = tf.clip_by_value(dWp[key], (- FLAGS.in_grad_clip), FLAGS.in_grad_clip)
else:
grad = dWp[key]
param_names.append(key)
param_vals.append((WW_update[p_idx][key] - (self.in_lr * grad)))
WW_update[p_idx] = OrderedDict(zip(param_names, param_vals))
else:
meta_loss.append(tf.reduce_mean(valid_mse_list[(- 1)]))
step_train_loss[s_idx] = tf.reduce_mean([tf.reduce_mean(train_mse) for train_mse in train_mse_list])
step_valid_loss[s_idx] = tf.reduce_mean([tf.reduce_mean(valid_mse) for valid_mse in valid_mse_list])
step_train_pred[s_idx] = tf.concat([tf.expand_dims(train_z, 0) for train_z in train_z_list], axis=0)
step_valid_pred[s_idx] = tf.concat([tf.expand_dims(valid_z, 0) for valid_z in valid_z_list], axis=0)
meta_loss = tf.reduce_sum(meta_loss)
return [step_train_loss, step_valid_loss, step_train_pred, step_valid_pred, meta_loss]
out_dtype = [([tf.float32] * (max_update_step + 1)), ([tf.float32] * (max_update_step + 1)), ([tf.float32] * (max_update_step + 1)), ([tf.float32] * (max_update_step + 1)), tf.float32]
result = tf.map_fn(fast_learn_one_task, elems=[self.train_x, self.valid_x, self.train_y, self.valid_y], dtype=out_dtype, parallel_iterations=FLAGS.num_tasks)
full_step_train_loss = result[0]
full_step_valid_loss = result[1]
full_step_train_pred = result[2]
full_step_valid_pred = result[3]
full_meta_loss = result[4]
if is_training:
self.total_train_loss = [tf.reduce_mean(full_step_train_loss[j]) for j in range((FLAGS.in_step + 1))]
self.total_valid_loss = [tf.reduce_mean(full_step_valid_loss[j]) for j in range((FLAGS.in_step + 1))]
self.total_meta_loss = tf.reduce_mean(full_meta_loss)
self.total_train_z_list = full_step_train_pred
self.total_valid_z_list = full_step_valid_pred
update_params_list = []
update_params_name = []
for p in range(FLAGS.num_particles):
for name in self.W_network_particles[0].keys():
update_params_name.append([p, name])
update_params_list.append(self.W_network_particles[p][name])
optimizer = tf.train.AdamOptimizer(learning_rate=self.out_lr)
gv_list = optimizer.compute_gradients(loss=self.total_meta_loss, var_list=update_params_list)
if (FLAGS.out_grad_clip > 0):
gv_list = [(tf.clip_by_value(grad, (- FLAGS.out_grad_clip), FLAGS.out_grad_clip), var) for (grad, var) in gv_list]
self.metatrain_op = optimizer.apply_gradients(gv_list)
else:
self.eval_train_loss = [tf.reduce_mean(full_step_train_loss[j]) for j in range((max_update_step + 1))]
self.eval_valid_loss = [tf.reduce_mean(full_step_valid_loss[j]) for j in range((max_update_step + 1))]
self.eval_train_z_list = full_step_train_pred
self.eval_valid_z_list = full_step_valid_pred
print('end of model construction')
|
def train(model, dataset, saver, sess, config_str):
experiment_dir = ((FLAGS.logdir + '/') + config_str)
train_writer = tf.summary.FileWriter(experiment_dir, sess.graph)
print('Done initializing, starting training.')
num_iters_per_epoch = int((FLAGS.train_total_num_tasks / FLAGS.num_tasks))
if (not FLAGS.finite):
num_iters_per_epoch = 1
inner_train_loss = []
inner_valid_loss = []
meta_loss = []
test_itr_list = []
test_valid_loss_list = []
best_test_loss = 1000.0
best_test_iter = 0
itr = 0
for e_idx in range(FLAGS.num_epochs):
for b_idx in range(num_iters_per_epoch):
itr += 1
[train_x, valid_x, train_y, valid_y] = dataset.generate_batch(is_training=True, batch_idx=None)
out_lr = (FLAGS.out_lr * (FLAGS.decay_lr ** (float((itr - 1)) / float(((FLAGS.num_epochs * num_iters_per_epoch) / 100)))))
feed_in = OrderedDict()
feed_in[model.out_lr] = out_lr
feed_in[model.train_x] = train_x
feed_in[model.valid_x] = valid_x
feed_in[model.train_y] = train_y
feed_in[model.valid_y] = valid_y
fetch_out = [model.metatrain_op, model.total_train_loss, model.total_valid_loss, model.total_meta_loss]
result = sess.run(fetch_out, feed_in)[1:]
inner_train_loss.append(result[0])
inner_valid_loss.append(result[1])
meta_loss.append(result[2])
if ((itr % PRINT_INTERVAL) == 0):
inner_train_loss = np.stack(inner_train_loss).mean(axis=0)
inner_valid_loss = np.stack(inner_valid_loss).mean(axis=0)
meta_loss = np.stack(meta_loss).mean(axis=0)
print('======================================')
print('exp: ', config_str)
print('epoch: ', e_idx, ' total iter: ', itr)
print('--------------------------------------')
print('train_loss: ', inner_train_loss)
print('valid_loss: ', inner_valid_loss)
print('--------------------------------------')
print('meta_loss: ', meta_loss)
print('out_lr: ', out_lr)
print('--------------------------------------')
print('best_test_loss: ', best_test_loss, '({})'.format(best_test_iter))
inner_train_loss = []
inner_valid_loss = []
meta_loss = []
if ((itr % TEST_PRINT_INTERVAL) == 0):
eval_train_loss_list = []
eval_valid_loss_list = []
fetch_out = [model.eval_train_loss[:(FLAGS.in_step + 1)], model.eval_valid_loss[:(FLAGS.in_step + 1)]]
for i in range(int((FLAGS.test_total_num_tasks / FLAGS.num_tasks))):
[train_x, valid_x, train_y, valid_y] = dataset.generate_batch(is_training=False, batch_idx=(i * FLAGS.num_tasks))
feed_in = OrderedDict()
feed_in[model.train_x] = train_x
feed_in[model.valid_x] = valid_x
feed_in[model.train_y] = train_y
feed_in[model.valid_y] = valid_y
result = sess.run(fetch_out, feed_in)
eval_train_loss_list.append(result[0])
eval_valid_loss_list.append(result[1])
eval_train_loss = np.stack(eval_train_loss_list).mean(axis=0)
eval_valid_loss = np.stack(eval_valid_loss_list).mean(axis=0)
print('======================================')
print('Eval')
print('--------------------------------------')
print('exp: ', config_str)
print('epoch: ', e_idx, ' total iter: ', itr)
print('--------------------------------------')
print('train_loss: ', eval_train_loss)
print('valid_loss: ', eval_valid_loss)
test_itr_list.append(itr)
test_valid_loss_list.append(eval_valid_loss[(- 1)])
pkl.dump([test_itr_list, test_valid_loss_list], open(((experiment_dir + '/') + 'results.pkl'), 'wb'))
plt.title('valid loss during training')
plt.plot(test_itr_list, test_valid_loss_list, '-', label='test loss')
plt.savefig(((experiment_dir + '/') + 'test_loss.png'))
plt.close()
if (best_test_loss > test_valid_loss_list[(- 1)]):
best_test_loss = test_valid_loss_list[(- 1)]
best_test_iter = itr
saver.save(sess, ((experiment_dir + '/') + 'best_model'))
|
def test(model, dataset, sess, inner_lr):
eval_valid_loss_list = []
for i in range(int((FLAGS.test_total_num_tasks / FLAGS.num_tasks))):
[train_x, valid_x, train_y, valid_y] = dataset.generate_batch(is_training=False, batch_idx=(i * FLAGS.num_tasks))
feed_in = OrderedDict()
feed_in[model.in_lr] = inner_lr
feed_in[model.train_x] = train_x
feed_in[model.valid_x] = valid_x
feed_in[model.train_y] = train_y
feed_in[model.valid_y] = valid_y
eval_valid_loss_list.append(sess.run(model.eval_valid_loss, feed_in))
eval_valid_loss_list = np.array(eval_valid_loss_list)
eval_valid_loss_mean = np.mean(eval_valid_loss_list, axis=0)
return eval_valid_loss_mean
|
def main():
random.seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
tf.set_random_seed(FLAGS.seed)
if (not os.path.exists(FLAGS.logdir)):
os.makedirs(FLAGS.logdir)
fname_args = []
if FLAGS.finite:
fname_args += [('train_total_num_tasks', 'SinusoidFinite')]
fname_args += [('test_total_num_tasks', 'Test')]
else:
fname_args += [('test_total_num_tasks', 'SinusoidInfiniteTest')]
fname_args += [('num_epochs', 'Epoch'), ('num_tasks', 'T'), ('seed', 'SEED'), ('noise_factor', 'Noise'), ('num_particles', 'M'), ('dim_hidden', 'H'), ('num_layers', 'L'), ('phase', 'PHS'), ('freq', 'FRQ'), ('few_k_shot', 'TrainK'), ('val_k_shot', 'ValidK'), ('in_step', 'InStep'), ('in_grad_clip', 'InGrad'), ('out_grad_clip', 'OutGrad'), ('in_lr', 'InLr'), ('out_lr', 'OutLr'), ('decay_lr', 'DecLr')]
config_str = utils.experiment_string2(FLAGS.flag_values_dict(), fname_args, separator='_')
config_str = ((str(time.mktime(datetime.now().timetuple()))[:(- 2)] + '_EMAML') + config_str)
print(config_str)
dataset = SinusoidGenerator(split_data=False)
dim_output = dataset.dim_output
dim_input = dataset.dim_input
model = EMAML(dim_input=dim_input, dim_output=dim_output, dim_hidden=FLAGS.dim_hidden, num_layers=FLAGS.num_layers, num_particles=FLAGS.num_particles, max_test_step=10)
model.construct_model(is_training=True)
model.construct_model(is_training=False)
saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES), max_to_keep=1)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.train.start_queue_runners()
if FLAGS.train:
train(model, dataset, saver, sess, config_str)
|
def pdist(tensor, metric='euclidean'):
assert isinstance(tensor, (tf.Variable, tf.Tensor)), 'tensor_utils.pdist: Input must be a `tensorflow.Tensor` instance.'
if (len(tensor.shape.as_list()) != 2):
raise ValueError('tensor_utils.pdist: A 2-d tensor must be passed.')
if (metric == 'euclidean'):
def pairwise_euclidean_distance(tensor):
def euclidean_distance(tensor1, tensor2):
return tf.norm((tensor1 - tensor2))
m = tensor.shape.as_list()[0]
distances = []
for i in range(m):
for j in range((i + 1), m):
distances.append(euclidean_distance(tensor[i], tensor[j]))
return tf.convert_to_tensor(distances)
metric_function = pairwise_euclidean_distance
else:
raise NotImplementedError("tensor_utils.pdist: Metric '{metric}' currently not supported!".format(metric=metric))
return metric_function(tensor)
|
def _is_vector(tensor):
return (len(tensor.shape.as_list()) == 1)
|
def median(tensor):
tensor_reshaped = tf.reshape(tensor, [(- 1)])
n_elements = tensor_reshaped.get_shape()[0]
sorted_tensor = tf.nn.top_k(tensor_reshaped, n_elements, sorted=True)
mid_index = (n_elements // 2)
if ((n_elements % 2) == 1):
return sorted_tensor.values[mid_index]
return ((sorted_tensor.values[(mid_index - 1)] + sorted_tensor.values[mid_index]) / 2)
|
def squareform(tensor):
assert isinstance(tensor, tf.Tensor), 'tensor_utils.squareform: Input must be a `tensorflow.Tensor` instance.'
tensor_shape = tensor.shape.as_list()
n_elements = tensor_shape[0]
if _is_vector(tensor):
if (n_elements == 0):
return tf.zeros((1, 1), dtype=tensor.dtype)
dimension = int(np.ceil(np.sqrt((n_elements * 2))))
if ((dimension * (dimension - 1)) != (n_elements * 2)):
raise ValueError('Incompatible vector size. It must be a binomial coefficient n choose 2 for some integer n >=2.')
n_total_elements_matrix = (dimension ** 2)
n_diagonal_zeros = dimension
n_fill_zeros = ((n_total_elements_matrix - n_elements) - n_diagonal_zeros)
condensed_distance_tensor = tf.reshape(tensor, shape=(n_elements, 1))
diagonal_zeros = tf.zeros(shape=(n_diagonal_zeros, 1), dtype=condensed_distance_tensor.dtype)
fill_zeros = tf.zeros(shape=(n_fill_zeros, 1), dtype=condensed_distance_tensor.dtype)
def upper_triangular_indices(dimension):
' For a square matrix with shape (`dimension`, `dimension`),\n return a list of indices into a vector with\n `dimension * dimension` elements that correspond to its\n upper triangular part after reshaping.\n Parameters\n ----------\n dimension : int\n Target dimensionality of the square matrix we want to\n obtain by reshaping a `dimension * dimension` element\n vector.\n Yields\n -------\n index: int\n Indices are indices into a `dimension * dimension` element\n vector that correspond to the upper triangular part of the\n matrix obtained by reshaping it into shape\n `(dimension, dimension)`.\n '
assert (dimension > 0), 'tensor_utils.upper_triangular_indices: Dimension must be positive integer!'
for row in range(dimension):
for column in range((row + 1), dimension):
element_index = ((dimension * row) + column)
(yield element_index)
all_indices = set(range(n_total_elements_matrix))
diagonal_indices = list(range(0, n_total_elements_matrix, (dimension + 1)))
upper_triangular = list(upper_triangular_indices(dimension))
remaining_indices = all_indices.difference(set(diagonal_indices).union(upper_triangular))
data = (diagonal_zeros, condensed_distance_tensor, fill_zeros)
indices = (tuple(diagonal_indices), tuple(upper_triangular), tuple(remaining_indices))
stitch_vector = tf.dynamic_stitch(data=data, indices=indices)
upper_triangular = tf.reshape(stitch_vector, (dimension, dimension))
lower_triangular = tf.transpose(upper_triangular)
return (upper_triangular + lower_triangular)
else:
raise NotImplementedError('tensor_utils.squareform: Only 1-d (vector) input is supported!')
|
def get_images(paths, labels, nb_samples=None, shuffle=True):
if (nb_samples is not None):
sampler = (lambda x: random.sample(x, nb_samples))
else:
sampler = (lambda x: x)
images = [(i, os.path.join(path, image)) for (i, path) in zip(labels, paths) for image in sampler(os.listdir(path))]
if shuffle:
random.shuffle(images)
return images
|
def clip_if_not_none(grad, min_value, max_value):
if (grad is None):
return grad
return tf.clip_by_value(grad, min_value, max_value)
|
def str2bool(v):
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
|
def make_logdir(configs, fname_args=[]):
this_run_str = (time.strftime('%H%M%S_') + str(socket.gethostname()))
if is_git_dir():
this_run_str += ('_git' + git_hash_str())
for str_arg in fname_args:
if (str_arg in configs.keys()):
this_run_str += ((('_' + str_arg.title().replace('_', '')) + '_') + str(configs[str_arg]))
else:
raise ValueError(('%s in fname_args does not exist in configs' % str_arg))
this_run_str = this_run_str.replace('/', '_')
return log_dir
|
def experiment_prefix_str(separator=',', hostname=False, git=True):
this_run_str = time.strftime('%y%m%d_%H%M%S')
if hostname:
this_run_str += str(socket.gethostname())
if (git and is_git_dir()):
this_run_str += (separator + str(git_hash_str()))
this_run_str = this_run_str.replace('-', '')
return this_run_str
|
def experiment_string2(configs, fname_args=[], separator=','):
this_run_str = ''
for (org_arg_str, short_arg_str) in fname_args:
short_arg_str = (org_arg_str.title().replace('_', '') if (short_arg_str is None) else short_arg_str)
if (org_arg_str in configs.keys()):
this_run_str += ((separator + short_arg_str) + str(configs[org_arg_str]).title().replace('_', ''))
else:
raise ValueError(('%s in fname_args doesn not exist in configs' % org_arg_str))
this_run_str = this_run_str.replace('/', '_')
return this_run_str
|
def experiment_string(configs, fname_args=[], separator=','):
this_run_str = expr_prefix_str(configs)
for str_arg in fname_args:
if (str_arg in configs.keys()):
this_run_str += (((separator + str_arg.title().replace('_', '')) + '=') + str(configs[str_arg]))
else:
raise ValueError(('%s in fname_args does not exist in configs' % str_arg))
this_run_str = this_run_str.replace('/', '_')
return this_run_str
|
def is_git_dir():
from subprocess import call, STDOUT
if (call(['git', 'branch'], stderr=STDOUT, stdout=open(os.devnull, 'w')) != 0):
return False
else:
return True
|
def git_hash_str(hash_len=7):
import subprocess
hash_str = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
return str(hash_str[:hash_len])
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--work-dir', help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument('--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed')
parser.add_argument('--format-only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--show-dir', help='directory where painted images will be saved')
parser.add_argument('--show-score-thr', type=float, default=0.3, help='score threshold (default: 0.3)')
parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results.')
parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu-collect is not specified')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function (deprecate), change to --eval-options instead.')
parser.add_argument('--eval-options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.eval_options):
raise ValueError('--options and --eval-options cannot be both specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
|
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"'
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if (samples_per_gpu > 1):
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max([ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if (samples_per_gpu > 1):
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
(rank, _) = get_dist_info()
if ((args.work_dir is not None) and (rank == 0)):
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
if ('custom_hooks' in cfg):
for hook in cfg.custom_hooks:
if (hook.type == 'FisherPruningHook'):
hook_cfg = hook.copy()
hook_cfg.pop('priority', None)
from mmcv.runner.hooks import HOOKS
hook_cls = HOOKS.get(hook_cfg['type'])
if hasattr(hook_cls, 'after_build_model'):
pruning_hook = mmcv.build_from_cfg(hook_cfg, HOOKS)
pruning_hook.after_build_model(model)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
(rank, _) = get_dist_info()
if (rank == 0):
if args.out:
print(f'''
writing results to {args.out}''')
mmcv.dump(outputs, args.out)
kwargs = ({} if (args.eval_options is None) else args.eval_options)
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule']:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if ((args.work_dir is not None) and (rank == 0)):
mmcv.dump(metric_dict, json_file)
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file (deprecate), change to --cfg-options instead.')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.cfg_options):
raise ValueError('--options and --cfg-options cannot be both specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
|
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
elif (cfg.get('work_dir', None) is None):
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
if (args.gpu_ids is not None):
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = (range(1) if (args.gpus is None) else range(args.gpus))
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
(_, world_size) = get_dist_info()
cfg.gpu_ids = range(world_size)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
logger.info(f'Distributed training: {distributed}')
logger.info(f'''Config:
{cfg.pretty_text}''')
if (args.seed is not None):
logger.info(f'Set random seed to {args.seed}, deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg'))
model.init_weights()
if ('custom_hooks' in cfg):
for hook in cfg.custom_hooks:
if (hook.type == 'FisherPruningHook'):
hook_cfg = hook.copy()
hook_cfg.pop('priority', None)
from mmcv.runner.hooks import HOOKS
hook_cls = HOOKS.get(hook_cfg['type'])
if hasattr(hook_cls, 'after_build_model'):
pruning_hook = mmcv.build_from_cfg(hook_cfg, HOOKS)
pruning_hook.after_build_model(model)
datasets = [build_dataset(cfg.data.train)]
if (len(cfg.workflow) == 2):
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmdet_version=(__version__ + get_git_hash()[:7]), CLASSES=datasets[0].CLASSES)
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
|
@register_line_cell_magic
def writetemplate(line, cell):
with open(line, 'w') as f:
f.write(cell.format(**globals()))
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--device', type=int, default=0, help='CUDA device id')
parser.add_argument('--camera-id', type=int, default=0, help='camera device id')
parser.add_argument('--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
model = init_detector(args.config, args.checkpoint, device=torch.device('cuda', args.device))
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
(ret_val, img) = camera.read()
result = inference_detector(model, img)
ch = cv2.waitKey(1)
if ((ch == 27) or (ch == ord('q')) or (ch == ord('Q'))):
break
show_result(img, result, model.CLASSES, score_thr=args.score_thr, wait_time=1)
|
def single_gpu_test(model, data_loader, show=False):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=(not show), **data)
results.append(result)
if show:
model.module.show_result(data, result)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
|
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"Test model with multiple gpus.\n\n This method tests model with multiple gpus and collects the results\n under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'\n it encodes results to gpu tensors and use gpu communication for results\n collection. On cpu mode it saves the results on different gpus to 'tmpdir'\n and collects them by the rank 0 worker.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n tmpdir (str): Path of directory to save the temporary results from\n different gpus under cpu mode.\n gpu_collect (bool): Option to use either gpu or cpu to collect results.\n\n Returns:\n list: The prediction results.\n "
model.eval()
results = []
dataset = data_loader.dataset
(rank, world_size) = get_dist_info()
if (rank == 0):
prog_bar = mmcv.ProgressBar(len(dataset))
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if (rank == 0):
batch_size = data['img'][0].size(0)
for _ in range((batch_size * world_size)):
prog_bar.update()
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
|
def collect_results_cpu(result_part, size, tmpdir=None):
(rank, world_size) = get_dist_info()
if (tmpdir is None):
MAX_LEN = 512
dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device='cuda')
if (rank == 0):
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
if (rank != 0):
return None
else:
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results
|
def collect_results_gpu(result_part, size):
(rank, world_size) = get_dist_info()
part_tensor = torch.tensor(bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [part_tensor.new_zeros(shape_max) for _ in range(world_size)]
dist.all_gather(part_recv_list, part_send)
if (rank == 0):
part_list = []
for (recv, shape) in zip(part_recv_list, shape_list):
part_list.append(pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
return ordered_results
|
def set_random_seed(seed, deterministic=False):
'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n '
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
|
def parse_losses(losses):
log_vars = OrderedDict()
for (loss_name, loss_value) in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum((_loss.mean() for _loss in loss_value))
else:
raise TypeError('{} is not a tensor or list of tensors'.format(loss_name))
loss = sum((_value for (_key, _value) in log_vars.items() if ('loss' in _key)))
log_vars['loss'] = loss
for (loss_name, loss_value) in log_vars.items():
if (dist.is_available() and dist.is_initialized()):
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return (loss, log_vars)
|
def batch_processor(model, data, train_mode):
'Process a data batch.\n\n This method is required as an argument of Runner, which defines how to\n process a data batch and obtain proper outputs. The first 3 arguments of\n batch_processor are fixed.\n\n Args:\n model (nn.Module): A PyTorch model.\n data (dict): The data batch in a dict.\n train_mode (bool): Training mode or not. It may be useless for some\n models.\n\n Returns:\n dict: A dict containing losses and log vars.\n '
losses = model(**data)
(loss, log_vars) = parse_losses(losses)
outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(data['img'].data))
return outputs
|
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
logger = get_root_logger(cfg.log_level)
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
data_loaders = [build_dataloader(ds, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, len(cfg.gpu_ids), dist=distributed, seed=cfg.seed) for ds in dataset]
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
optimizer = build_optimizer(model, cfg.optimizer)
runner = Runner(model, batch_processor, optimizer, cfg.work_dir, logger=logger, meta=meta)
runner.timestamp = timestamp
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed:
optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config)
if distributed:
runner.register_hook(DistSamplerSeedHook())
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(val_dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_hook = (DistEvalHook if distributed else EvalHook)
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
|
class PointGenerator(object):
def _meshgrid(self, x, y, row_major=True):
xx = x.repeat(len(y))
yy = y.view((- 1), 1).repeat(1, len(x)).view((- 1))
if row_major:
return (xx, yy)
else:
return (yy, xx)
def grid_points(self, featmap_size, stride=16, device='cuda'):
(feat_h, feat_w) = featmap_size
shift_x = (torch.arange(0.0, feat_w, device=device) * stride)
shift_y = (torch.arange(0.0, feat_h, device=device) * stride)
(shift_xx, shift_yy) = self._meshgrid(shift_x, shift_y)
stride = shift_x.new_full((shift_xx.shape[0],), stride)
shifts = torch.stack([shift_xx, shift_yy, stride], dim=(- 1))
all_points = shifts.to(device)
return all_points
def valid_flags(self, featmap_size, valid_size, device='cuda'):
(feat_h, feat_w) = featmap_size
(valid_h, valid_w) = valid_size
assert ((valid_h <= feat_h) and (valid_w <= feat_w))
valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
(valid_xx, valid_yy) = self._meshgrid(valid_x, valid_y)
valid = (valid_xx & valid_yy)
return valid
|
def build_assigner(cfg, **kwargs):
if isinstance(cfg, assigners.BaseAssigner):
return cfg
elif isinstance(cfg, dict):
return mmcv.runner.obj_from_dict(cfg, assigners, default_args=kwargs)
else:
raise TypeError('Invalid type {} for building a sampler'.format(type(cfg)))
|
def build_sampler(cfg, **kwargs):
if isinstance(cfg, samplers.BaseSampler):
return cfg
elif isinstance(cfg, dict):
return mmcv.runner.obj_from_dict(cfg, samplers, default_args=kwargs)
else:
raise TypeError('Invalid type {} for building a sampler'.format(type(cfg)))
|
def assign_and_sample(bboxes, gt_bboxes, gt_bboxes_ignore, gt_labels, cfg):
bbox_assigner = build_assigner(cfg.assigner)
bbox_sampler = build_sampler(cfg.sampler)
assign_result = bbox_assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore, gt_labels)
sampling_result = bbox_sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
return (assign_result, sampling_result)
|
class AssignResult(util_mixins.NiceRepr):
'Stores assignments between predicted and truth boxes.\n\n Attributes:\n num_gts (int): the number of truth boxes considered when computing this\n assignment\n\n gt_inds (LongTensor): for each predicted box indicates the 1-based\n index of the assigned truth box. 0 means unassigned and -1 means\n ignore.\n\n max_overlaps (FloatTensor): the iou between the predicted box and its\n assigned truth box.\n\n labels (None | LongTensor): If specified, for each predicted box\n indicates the category label of the assigned truth box.\n\n Example:\n >>> # An assign result between 4 predicted boxes and 9 true boxes\n >>> # where only two boxes were assigned.\n >>> num_gts = 9\n >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])\n >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])\n >>> labels = torch.LongTensor([0, 3, 4, 0])\n >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),\n labels.shape=(4,))>\n >>> # Force addition of gt labels (when adding gt as proposals)\n >>> new_labels = torch.LongTensor([3, 4, 5])\n >>> self.add_gt_(new_labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),\n labels.shape=(7,))>\n '
def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
self.num_gts = num_gts
self.gt_inds = gt_inds
self.max_overlaps = max_overlaps
self.labels = labels
@property
def num_preds(self):
'Return the number of predictions in this assignment.'
return len(self.gt_inds)
@property
def info(self):
'Returns a dictionary of info about the object.'
return {'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels}
def __nice__(self):
'Create a "nice" summary string describing this assign result.'
parts = []
parts.append('num_gts={!r}'.format(self.num_gts))
if (self.gt_inds is None):
parts.append('gt_inds={!r}'.format(self.gt_inds))
else:
parts.append('gt_inds.shape={!r}'.format(tuple(self.gt_inds.shape)))
if (self.max_overlaps is None):
parts.append('max_overlaps={!r}'.format(self.max_overlaps))
else:
parts.append('max_overlaps.shape={!r}'.format(tuple(self.max_overlaps.shape)))
if (self.labels is None):
parts.append('labels={!r}'.format(self.labels))
else:
parts.append('labels.shape={!r}'.format(tuple(self.labels.shape)))
return ', '.join(parts)
@classmethod
def random(cls, **kwargs):
'Create random AssignResult for tests or debugging.\n\n Kwargs:\n num_preds: number of predicted boxes\n num_gts: number of true boxes\n p_ignore (float): probability of a predicted box assinged to an\n ignored truth\n p_assigned (float): probability of a predicted box not being\n assigned\n p_use_label (float | bool): with labels or not\n rng (None | int | numpy.random.RandomState): seed or state\n\n Returns:\n AssignResult :\n\n Example:\n >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA\n >>> self = AssignResult.random()\n >>> print(self.info)\n '
from mmdet.core.bbox import demodata
rng = demodata.ensure_rng(kwargs.get('rng', None))
num_gts = kwargs.get('num_gts', None)
num_preds = kwargs.get('num_preds', None)
p_ignore = kwargs.get('p_ignore', 0.3)
p_assigned = kwargs.get('p_assigned', 0.7)
p_use_label = kwargs.get('p_use_label', 0.5)
num_classes = kwargs.get('p_use_label', 3)
if (num_gts is None):
num_gts = rng.randint(0, 8)
if (num_preds is None):
num_preds = rng.randint(0, 16)
if (num_gts == 0):
max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
if ((p_use_label is True) or (p_use_label < rng.rand())):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = None
else:
import numpy as np
max_overlaps = torch.from_numpy(rng.rand(num_preds))
is_assigned = torch.from_numpy((rng.rand(num_preds) < p_assigned))
n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
assigned_idxs = np.where(is_assigned)[0]
rng.shuffle(assigned_idxs)
assigned_idxs = assigned_idxs[0:n_assigned]
assigned_idxs.sort()
is_assigned[:] = 0
is_assigned[assigned_idxs] = True
is_ignore = (torch.from_numpy((rng.rand(num_preds) < p_ignore)) & is_assigned)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
true_idxs = np.arange(num_gts)
rng.shuffle(true_idxs)
true_idxs = torch.from_numpy(true_idxs)
gt_inds[is_assigned] = true_idxs[:n_assigned]
gt_inds = torch.from_numpy(rng.randint(1, (num_gts + 1), size=num_preds))
gt_inds[is_ignore] = (- 1)
gt_inds[(~ is_assigned)] = 0
max_overlaps[(~ is_assigned)] = 0
if ((p_use_label is True) or (p_use_label < rng.rand())):
if (num_classes == 0):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = torch.from_numpy(rng.randint(1, (num_classes + 1), size=num_preds))
labels[(~ is_assigned)] = 0
else:
labels = None
self = cls(num_gts, gt_inds, max_overlaps, labels)
return self
def add_gt_(self, gt_labels):
self_inds = torch.arange(1, (len(gt_labels) + 1), dtype=torch.long, device=gt_labels.device)
self.gt_inds = torch.cat([self_inds, self.gt_inds])
self.max_overlaps = torch.cat([self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])
if (self.labels is not None):
self.labels = torch.cat([gt_labels, self.labels])
|
class BaseAssigner(metaclass=ABCMeta):
@abstractmethod
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
pass
|
class CombinedSampler(BaseSampler):
def __init__(self, pos_sampler, neg_sampler, **kwargs):
super(CombinedSampler, self).__init__(**kwargs)
self.pos_sampler = build_sampler(pos_sampler, **kwargs)
self.neg_sampler = build_sampler(neg_sampler, **kwargs)
def _sample_pos(self, **kwargs):
raise NotImplementedError
def _sample_neg(self, **kwargs):
raise NotImplementedError
|
class InstanceBalancedPosSampler(RandomSampler):
def _sample_pos(self, assign_result, num_expected, **kwargs):
pos_inds = torch.nonzero((assign_result.gt_inds > 0))
if (pos_inds.numel() != 0):
pos_inds = pos_inds.squeeze(1)
if (pos_inds.numel() <= num_expected):
return pos_inds
else:
unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
num_gts = len(unique_gt_inds)
num_per_gt = int((round((num_expected / float(num_gts))) + 1))
sampled_inds = []
for i in unique_gt_inds:
inds = torch.nonzero((assign_result.gt_inds == i.item()))
if (inds.numel() != 0):
inds = inds.squeeze(1)
else:
continue
if (len(inds) > num_per_gt):
inds = self.random_choice(inds, num_per_gt)
sampled_inds.append(inds)
sampled_inds = torch.cat(sampled_inds)
if (len(sampled_inds) < num_expected):
num_extra = (num_expected - len(sampled_inds))
extra_inds = np.array(list((set(pos_inds.cpu()) - set(sampled_inds.cpu()))))
if (len(extra_inds) > num_extra):
extra_inds = self.random_choice(extra_inds, num_extra)
extra_inds = torch.from_numpy(extra_inds).to(assign_result.gt_inds.device).long()
sampled_inds = torch.cat([sampled_inds, extra_inds])
elif (len(sampled_inds) > num_expected):
sampled_inds = self.random_choice(sampled_inds, num_expected)
return sampled_inds
|
class IoUBalancedNegSampler(RandomSampler):
'IoU Balanced Sampling.\n\n arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n Sampling proposals according to their IoU. `floor_fraction` of needed RoIs\n are sampled from proposals whose IoU are lower than `floor_thr` randomly.\n The others are sampled from proposals whose IoU are higher than\n `floor_thr`. These proposals are sampled from some bins evenly, which are\n split by `num_bins` via IoU evenly.\n\n Args:\n num (int): number of proposals.\n pos_fraction (float): fraction of positive proposals.\n floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,\n set to -1 if all using IoU balanced sampling.\n floor_fraction (float): sampling fraction of proposals under floor_thr.\n num_bins (int): number of bins in IoU balanced sampling.\n '
def __init__(self, num, pos_fraction, floor_thr=(- 1), floor_fraction=0, num_bins=3, **kwargs):
super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, **kwargs)
assert ((floor_thr >= 0) or (floor_thr == (- 1)))
assert (0 <= floor_fraction <= 1)
assert (num_bins >= 1)
self.floor_thr = floor_thr
self.floor_fraction = floor_fraction
self.num_bins = num_bins
def sample_via_interval(self, max_overlaps, full_set, num_expected):
max_iou = max_overlaps.max()
iou_interval = ((max_iou - self.floor_thr) / self.num_bins)
per_num_expected = int((num_expected / self.num_bins))
sampled_inds = []
for i in range(self.num_bins):
start_iou = (self.floor_thr + (i * iou_interval))
end_iou = (self.floor_thr + ((i + 1) * iou_interval))
tmp_set = set(np.where(np.logical_and((max_overlaps >= start_iou), (max_overlaps < end_iou)))[0])
tmp_inds = list((tmp_set & full_set))
if (len(tmp_inds) > per_num_expected):
tmp_sampled_set = self.random_choice(tmp_inds, per_num_expected)
else:
tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
sampled_inds.append(tmp_sampled_set)
sampled_inds = np.concatenate(sampled_inds)
if (len(sampled_inds) < num_expected):
num_extra = (num_expected - len(sampled_inds))
extra_inds = np.array(list((full_set - set(sampled_inds))))
if (len(extra_inds) > num_extra):
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate([sampled_inds, extra_inds])
return sampled_inds
def _sample_neg(self, assign_result, num_expected, **kwargs):
neg_inds = torch.nonzero((assign_result.gt_inds == 0))
if (neg_inds.numel() != 0):
neg_inds = neg_inds.squeeze(1)
if (len(neg_inds) <= num_expected):
return neg_inds
else:
max_overlaps = assign_result.max_overlaps.cpu().numpy()
neg_set = set(neg_inds.cpu().numpy())
if (self.floor_thr > 0):
floor_set = set(np.where(np.logical_and((max_overlaps >= 0), (max_overlaps < self.floor_thr)))[0])
iou_sampling_set = set(np.where((max_overlaps >= self.floor_thr))[0])
elif (self.floor_thr == 0):
floor_set = set(np.where((max_overlaps == 0))[0])
iou_sampling_set = set(np.where((max_overlaps > self.floor_thr))[0])
else:
floor_set = set()
iou_sampling_set = set(np.where((max_overlaps > self.floor_thr))[0])
self.floor_thr = 0
floor_neg_inds = list((floor_set & neg_set))
iou_sampling_neg_inds = list((iou_sampling_set & neg_set))
num_expected_iou_sampling = int((num_expected * (1 - self.floor_fraction)))
if (len(iou_sampling_neg_inds) > num_expected_iou_sampling):
if (self.num_bins >= 2):
iou_sampled_inds = self.sample_via_interval(max_overlaps, set(iou_sampling_neg_inds), num_expected_iou_sampling)
else:
iou_sampled_inds = self.random_choice(iou_sampling_neg_inds, num_expected_iou_sampling)
else:
iou_sampled_inds = np.array(iou_sampling_neg_inds, dtype=np.int)
num_expected_floor = (num_expected - len(iou_sampled_inds))
if (len(floor_neg_inds) > num_expected_floor):
sampled_floor_inds = self.random_choice(floor_neg_inds, num_expected_floor)
else:
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
sampled_inds = np.concatenate((sampled_floor_inds, iou_sampled_inds))
if (len(sampled_inds) < num_expected):
num_extra = (num_expected - len(sampled_inds))
extra_inds = np.array(list((neg_set - set(sampled_inds))))
if (len(extra_inds) > num_extra):
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate((sampled_inds, extra_inds))
sampled_inds = torch.from_numpy(sampled_inds).long().to(assign_result.gt_inds.device)
return sampled_inds
|
class OHEMSampler(BaseSampler):
'Online Hard Example Mining Sampler described in [1]_.\n\n References:\n .. [1] https://arxiv.org/pdf/1604.03540.pdf\n '
def __init__(self, num, pos_fraction, context, neg_pos_ub=(- 1), add_gt_as_proposals=True, **kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
if (not hasattr(context, 'num_stages')):
self.bbox_roi_extractor = context.bbox_roi_extractor
self.bbox_head = context.bbox_head
else:
self.bbox_roi_extractor = context.bbox_roi_extractor[context.current_stage]
self.bbox_head = context.bbox_head[context.current_stage]
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
bbox_feats = self.bbox_roi_extractor(feats[:self.bbox_roi_extractor.num_inputs], rois)
(cls_score, _) = self.bbox_head(bbox_feats)
loss = self.bbox_head.loss(cls_score=cls_score, bbox_pred=None, labels=labels, label_weights=cls_score.new_ones(cls_score.size(0)), bbox_targets=None, bbox_weights=None, reduction_override='none')['loss_cls']
(_, topk_loss_inds) = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs):
pos_inds = torch.nonzero((assign_result.gt_inds > 0))
if (pos_inds.numel() != 0):
pos_inds = pos_inds.squeeze(1)
if (pos_inds.numel() <= num_expected):
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], assign_result.labels[pos_inds], feats)
def _sample_neg(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs):
neg_inds = torch.nonzero((assign_result.gt_inds == 0))
if (neg_inds.numel() != 0):
neg_inds = neg_inds.squeeze(1)
if (len(neg_inds) <= num_expected):
return neg_inds
else:
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], assign_result.labels[neg_inds], feats)
|
class PseudoSampler(BaseSampler):
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
raise NotImplementedError
def _sample_neg(self, **kwargs):
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
pos_inds = torch.nonzero((assign_result.gt_inds > 0)).squeeze((- 1)).unique()
neg_inds = torch.nonzero((assign_result.gt_inds == 0)).squeeze((- 1)).unique()
gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags)
return sampling_result
|
class RandomSampler(BaseSampler):
def __init__(self, num, pos_fraction, neg_pos_ub=(- 1), add_gt_as_proposals=True, **kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
'Random select some elements from the gallery.\n\n If `gallery` is a Tensor, the returned indices will be a Tensor;\n If `gallery` is a ndarray or list, the returned indices will be a\n ndarray.\n\n Args:\n gallery (Tensor | ndarray | list): indices pool.\n num (int): expected sample num.\n\n Returns:\n Tensor or ndarray: sampled indices.\n '
assert (len(gallery) >= num)
is_tensor = isinstance(gallery, torch.Tensor)
if (not is_tensor):
gallery = torch.tensor(gallery, dtype=torch.long, device=torch.cuda.current_device())
perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
rand_inds = gallery[perm]
if (not is_tensor):
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
'Randomly sample some positive samples.'
pos_inds = torch.nonzero((assign_result.gt_inds > 0))
if (pos_inds.numel() != 0):
pos_inds = pos_inds.squeeze(1)
if (pos_inds.numel() <= num_expected):
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
'Randomly sample some negative samples.'
neg_inds = torch.nonzero((assign_result.gt_inds == 0))
if (neg_inds.numel() != 0):
neg_inds = neg_inds.squeeze(1)
if (len(neg_inds) <= num_expected):
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
|
def wider_face_classes():
return ['face']
|
def voc_classes():
return ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
|
def imagenet_det_classes():
return ['accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', 'whale', 'wine_bottle', 'zebra']
|
def imagenet_vid_classes():
return ['airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra']
|
def coco_classes():
return ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
|
def cityscapes_classes():
return ['person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
|
def get_classes(dataset):
'Get class names of a dataset.'
alias2name = {}
for (name, aliases) in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if (dataset in alias2name):
labels = eval((alias2name[dataset] + '_classes()'))
else:
raise ValueError('Unrecognized dataset: {}'.format(dataset))
else:
raise TypeError('dataset must a str, but got {}'.format(type(dataset)))
return labels
|
class EvalHook(Hook):
'Evaluation hook.\n\n Attributes:\n dataloader (DataLoader): A PyTorch dataloader.\n interval (int): Evaluation interval (by epochs). Default: 1.\n '
def __init__(self, dataloader, interval=1, **eval_kwargs):
if (not isinstance(dataloader, DataLoader)):
raise TypeError('dataloader must be a pytorch DataLoader, but got {}'.format(type(dataloader)))
self.dataloader = dataloader
self.interval = interval
self.eval_kwargs = eval_kwargs
def after_train_epoch(self, runner):
if (not self.every_n_epochs(runner, self.interval)):
return
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def evaluate(self, runner, results):
eval_res = self.dataloader.dataset.evaluate(results, logger=runner.logger, **self.eval_kwargs)
for (name, val) in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
|
class DistEvalHook(EvalHook):
'Distributed evaluation hook.\n\n Attributes:\n dataloader (DataLoader): A PyTorch dataloader.\n interval (int): Evaluation interval (by epochs). Default: 1.\n tmpdir (str | None): Temporary directory to save the results of all\n processes. Default: None.\n gpu_collect (bool): Whether to use gpu or cpu to collect results.\n Default: False.\n '
def __init__(self, dataloader, interval=1, gpu_collect=False, **eval_kwargs):
if (not isinstance(dataloader, DataLoader)):
raise TypeError('dataloader must be a pytorch DataLoader, but got {}'.format(type(dataloader)))
self.dataloader = dataloader
self.interval = interval
self.gpu_collect = gpu_collect
self.eval_kwargs = eval_kwargs
def after_train_epoch(self, runner):
if (not self.every_n_epochs(runner, self.interval)):
return
from mmdet.apis import multi_gpu_test
results = multi_gpu_test(runner.model, self.dataloader, tmpdir=osp.join(runner.work_dir, '.eval_hook'), gpu_collect=self.gpu_collect)
if (runner.rank == 0):
print('\n')
self.evaluate(runner, results)
|
def auto_fp16(apply_to=None, out_fp32=False):
"Decorator to enable fp16 training automatically.\n\n This decorator is useful when you write custom modules and want to support\n mixed precision training. If inputs arguments are fp32 tensors, they will\n be converted to fp16 automatically. Arguments other than fp32 tensors are\n ignored.\n\n Args:\n apply_to (Iterable, optional): The argument names to be converted.\n `None` indicates all arguments.\n out_fp32 (bool): Whether to convert the output back to fp32.\n\n :Example:\n\n class MyModule1(nn.Module)\n\n # Convert x and y to fp16\n @auto_fp16()\n def forward(self, x, y):\n pass\n\n class MyModule2(nn.Module):\n\n # convert pred to fp16\n @auto_fp16(apply_to=('pred', ))\n def do_something(self, pred, others):\n pass\n "
def auto_fp16_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
if (not isinstance(args[0], torch.nn.Module)):
raise TypeError('@auto_fp16 can only be used to decorate the method of nn.Module')
if (not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled)):
return old_func(*args, **kwargs)
args_info = getfullargspec(old_func)
args_to_cast = (args_info.args if (apply_to is None) else apply_to)
new_args = []
if args:
arg_names = args_info.args[:len(args)]
for (i, arg_name) in enumerate(arg_names):
if (arg_name in args_to_cast):
new_args.append(cast_tensor_type(args[i], torch.float, torch.half))
else:
new_args.append(args[i])
new_kwargs = {}
if kwargs:
for (arg_name, arg_value) in kwargs.items():
if (arg_name in args_to_cast):
new_kwargs[arg_name] = cast_tensor_type(arg_value, torch.float, torch.half)
else:
new_kwargs[arg_name] = arg_value
output = old_func(*new_args, **new_kwargs)
if out_fp32:
output = cast_tensor_type(output, torch.half, torch.float)
return output
return new_func
return auto_fp16_wrapper
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.