repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ReChorus | ReChorus-master/src/models/sequential/ComiRec.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" ComiRec
Reference:
"Controllable Multi-Interest Framework for Recommendation"
Cen et al., KDD'2020.
CMD example:
python main.py --model_name ComiRec --emb_size 64 --lr 1e-3 --l2 1e-6 --attn_size 8 --K 4 --add_pos 1 \
--history_max 20 --dataset 'Grocery_and_Gourmet_Food'
"""
import torch
import torch.nn as nn
import numpy as np
from models.BaseModel import SequentialModel
from utils import layers
class ComiRec(SequentialModel):
reader = 'SeqReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size', 'attn_size', 'K']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--attn_size', type=int, default=8,
help='Size of attention vectors.')
parser.add_argument('--K', type=int, default=2,
help='Number of hidden intent.')
parser.add_argument('--add_pos', type=int, default=1,
help='Whether add position embedding.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.attn_size = args.attn_size
self.K = args.K
self.add_pos = args.add_pos
self.max_his = args.history_max
self.len_range = torch.from_numpy(np.arange(self.max_his)).to(self.device)
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size)
if self.add_pos:
self.p_embeddings = nn.Embedding(self.max_his + 1, self.emb_size)
self.W1 = nn.Linear(self.emb_size, self.attn_size)
self.W2 = nn.Linear(self.attn_size, self.K)
def forward(self, feed_dict):
self.check_list = []
i_ids = feed_dict['item_id'] # [batch_size, -1]
history = feed_dict['history_items'] # [batch_size, history_max]
lengths = feed_dict['lengths'] # [batch_size]
batch_size, seq_len = history.shape
valid_his = (history > 0).long()
his_vectors = self.i_embeddings(history)
if self.add_pos:
position = (lengths[:, None] - self.len_range[None, :seq_len]) * valid_his
pos_vectors = self.p_embeddings(position)
his_pos_vectors = his_vectors + pos_vectors
else:
his_pos_vectors = his_vectors
# Self-attention
attn_score = self.W2(self.W1(his_pos_vectors).tanh()) # bsz, his_max, K
attn_score = attn_score.masked_fill(valid_his.unsqueeze(-1) == 0, -np.inf)
attn_score = attn_score.transpose(-1, -2) # bsz, K, his_max
attn_score = (attn_score - attn_score.max()).softmax(dim=-1)
attn_score = attn_score.masked_fill(torch.isnan(attn_score), 0)
interest_vectors = (his_vectors[:, None, :, :] * attn_score[:, :, :, None]).sum(-2) # bsz, K, emb
i_vectors = self.i_embeddings(i_ids)
if feed_dict['phase'] == 'train':
target_vector = i_vectors[:, 0] # bsz, emb
target_pred = (interest_vectors * target_vector[:, None, :]).sum(-1) # bsz, K
idx_select = target_pred.max(-1)[1] # bsz
user_vector = interest_vectors[torch.arange(batch_size), idx_select, :] # bsz, emb
prediction = (user_vector[:, None, :] * i_vectors).sum(-1)
else:
prediction = (interest_vectors[:, None, :, :] * i_vectors[:, :, None, :]).sum(-1) # bsz, -1, K
prediction = prediction.max(-1)[0] # bsz, -1
return {'prediction': prediction.view(batch_size, -1)}
| 3,848 | 39.946809 | 107 | py |
ReChorus | ReChorus-master/src/models/sequential/KDA.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" KDA
Reference:
"Toward Dynamic User Intention: Temporal Evolutionary Effects of Item Relations in Sequential Recommendation"
Chenyang Wang et al., TOIS'2021.
CMD example:
python main.py --model_name KDA --emb_size 64 --include_attr 1 --freq_rand 0 --lr 1e-3 --l2 1e-6 --num_heads 4 \
--history_max 20 --dataset 'Grocery_and_Gourmet_Food'
"""
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from utils import layers
from models.BaseModel import SequentialModel
from helpers.KDAReader import KDAReader
class KDA(SequentialModel):
reader = 'KDAReader'
runner = 'BaseRunner'
extra_log_args = ['num_layers', 'num_heads', 'gamma', 'freq_rand', 'include_val']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--neg_head_p', type=float, default=0.5,
help='The probability of sampling negative head entity.')
parser.add_argument('--num_layers', type=int, default=1,
help='Number of self-attention layers.')
parser.add_argument('--num_heads', type=int, default=1,
help='Number of attention heads.')
parser.add_argument('--gamma', type=float, default=-1,
help='Coefficient of KG loss (-1 for auto-determine).')
parser.add_argument('--attention_size', type=int, default=10,
help='Size of attention hidden space.')
parser.add_argument('--pooling', type=str, default='average',
help='Method of pooling relational history embeddings: average, max, attention')
parser.add_argument('--include_val', type=int, default=1,
help='Whether include relation value in the relation representation')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.relation_num = corpus.n_relations
self.entity_num = corpus.n_entities
self.freq_x = corpus.freq_x
self.freq_dim = args.n_dft // 2 + 1
self.freq_rand = args.freq_rand
self.emb_size = args.emb_size
self.neg_head_p = args.neg_head_p
self.layer_num = args.num_layers
self.head_num = args.num_heads
self.attention_size = args.attention_size
self.pooling = args.pooling.lower()
self.include_val = args.include_val
self.gamma = args.gamma
if self.gamma < 0:
self.gamma = len(corpus.relation_df) / len(corpus.all_df)
self._define_params()
self.apply(self.init_weights)
if not self.freq_rand:
dft_freq_real = torch.tensor(np.real(self.freq_x)) # R * n_freq
dft_freq_imag = torch.tensor(np.imag(self.freq_x))
self.relational_dynamic_aggregation.freq_real.weight.data.copy_(dft_freq_real)
self.relational_dynamic_aggregation.freq_imag.weight.data.copy_(dft_freq_imag)
def _define_params(self):
self.user_embeddings = nn.Embedding(self.user_num, self.emb_size)
self.entity_embeddings = nn.Embedding(self.entity_num, self.emb_size)
self.relation_embeddings = nn.Embedding(self.relation_num, self.emb_size)
# First-level aggregation
self.relational_dynamic_aggregation = RelationalDynamicAggregation(
self.relation_num, self.freq_dim, self.relation_embeddings, self.include_val, self.device
)
# Second-level aggregation
self.attn_head = layers.MultiHeadAttention(self.emb_size, self.head_num, bias=False)
self.W1 = nn.Linear(self.emb_size, self.emb_size)
self.W2 = nn.Linear(self.emb_size, self.emb_size)
self.dropout_layer = nn.Dropout(self.dropout)
self.layer_norm = nn.LayerNorm(self.emb_size)
# Pooling
if self.pooling == 'attention':
self.A = nn.Linear(self.emb_size, self.attention_size)
self.A_out = nn.Linear(self.attention_size, 1, bias=False)
# Prediction
self.item_bias = nn.Embedding(self.item_num, 1)
def forward(self, feed_dict):
self.check_list = []
prediction = self.rec_forward(feed_dict)
out_dict = {'prediction': prediction}
if feed_dict['phase'] == 'train':
kg_prediction = self.kg_forward(feed_dict)
out_dict['kg_prediction'] = kg_prediction
return out_dict
def rec_forward(self, feed_dict):
u_ids = feed_dict['user_id'] # B
i_ids = feed_dict['item_id'] # B * -1
v_ids = feed_dict['item_val'] # B * -1 * R
history = feed_dict['history_items'] # B * H
delta_t_n = feed_dict['history_delta_t'].float() # B * H
batch_size, seq_len = history.shape
u_vectors = self.user_embeddings(u_ids)
i_vectors = self.entity_embeddings(i_ids)
v_vectors = self.entity_embeddings(v_ids) # B * -1 * R * V
his_vectors = self.entity_embeddings(history) # B * H * V
"""
Relational Dynamic History Aggregation
"""
valid_mask = (history > 0).view(batch_size, 1, seq_len, 1)
context = self.relational_dynamic_aggregation(
his_vectors, delta_t_n, i_vectors, v_vectors, valid_mask) # B * -1 * R * V
"""
Multi-layer Self-attention
"""
for i in range(self.layer_num):
residual = context
# self-attention
context = self.attn_head(context, context, context)
# feed forward
context = self.W1(context)
context = self.W2(context.relu())
# dropout, residual and layer_norm
context = self.dropout_layer(context)
context = self.layer_norm(residual + context)
"""
Pooling Layer
"""
if self.pooling == 'attention':
query_vectors = context * u_vectors[:, None, None, :] # B * -1 * R * V
user_attention = self.A_out(self.A(query_vectors).tanh()).squeeze(-1) # B * -1 * R
user_attention = (user_attention - user_attention.max()).softmax(dim=-1)
his_vector = (context * user_attention[:, :, :, None]).sum(dim=-2) # B * -1 * V
elif self.pooling == 'max':
his_vector = context.max(dim=-2).values # B * -1 * V
else:
his_vector = context.mean(dim=-2) # B * -1 * V
"""
Prediction
"""
i_bias = self.item_bias(i_ids).squeeze(-1)
prediction = ((u_vectors[:, None, :] + his_vector) * i_vectors).sum(dim=-1)
prediction = prediction + i_bias
return prediction.view(feed_dict['batch_size'], -1)
def kg_forward(self, feed_dict):
head_ids = feed_dict['head_id'].long() # B * -1
tail_ids = feed_dict['tail_id'].long() # B * -1
value_ids = feed_dict['value_id'].long() # B
relation_ids = feed_dict['relation_id'].long() # B
head_vectors = self.entity_embeddings(head_ids)
tail_vectors = self.entity_embeddings(tail_ids)
value_vectors = self.entity_embeddings(value_ids)
relation_vectors = self.relation_embeddings(relation_ids)
# DistMult
if self.include_val:
prediction = (head_vectors * (relation_vectors + value_vectors)[:, None, :] * tail_vectors).sum(-1)
else:
prediction = (head_vectors * relation_vectors[:, None, :] * tail_vectors).sum(-1)
return prediction
def loss(self, out_dict):
predictions = out_dict['prediction']
pos_pred, neg_pred = predictions[:, 0], predictions[:, 1:]
neg_softmax = (neg_pred - neg_pred.max()).softmax(dim=1)
rec_loss = -((pos_pred[:, None] - neg_pred).sigmoid() * neg_softmax).sum(dim=1).log().mean()
predictions = out_dict['kg_prediction']
pos_pred, neg_pred = predictions[:, 0], predictions[:, 1:]
neg_softmax = (neg_pred - neg_pred.max()).softmax(dim=1)
kg_loss = -((pos_pred[:, None] - neg_pred).sigmoid() * neg_softmax).sum(dim=1).log().mean()
loss = rec_loss + self.gamma * kg_loss
return loss
class Dataset(SequentialModel.Dataset):
def __init__(self, model, corpus, phase):
super().__init__(model, corpus, phase)
if self.phase == 'train':
self.kg_data, self.neg_heads, self.neg_tails = None, None, None
# Prepare item-to-value dict
item_val = self.corpus.item_meta_df.copy()
item_val[self.corpus.item_relations] = 0 # set the value of natural item relations to None
for idx, r in enumerate(self.corpus.attr_relations):
base = self.corpus.n_items + np.sum(self.corpus.attr_max[:idx])
item_val[r] = item_val[r].apply(lambda x: x + base).astype(int)
item_vals = item_val[self.corpus.relations].values # this ensures the order is consistent to relations
self.item_val_dict = dict()
for item, vals in zip(item_val['item_id'].values, item_vals.tolist()):
self.item_val_dict[item] = [0] + vals # the first dimension None for the virtual relation
def _get_feed_dict(self, index):
feed_dict = super()._get_feed_dict(index)
feed_dict['item_val'] = [self.item_val_dict[item] for item in feed_dict['item_id']]
delta_t = self.data['time'][index] - feed_dict['history_times']
feed_dict['history_delta_t'] = KDAReader.norm_time(delta_t, self.corpus.t_scalar)
if self.phase == 'train':
feed_dict['head_id'] = np.concatenate([[self.kg_data['head'][index]], self.neg_heads[index]])
feed_dict['tail_id'] = np.concatenate([[self.kg_data['tail'][index]], self.neg_tails[index]])
feed_dict['relation_id'] = self.kg_data['relation'][index]
feed_dict['value_id'] = self.kg_data['value'][index]
return feed_dict
def generate_kg_data(self) -> pd.DataFrame:
rec_data_size = len(self)
replace = (rec_data_size > len(self.corpus.relation_df))
kg_data = self.corpus.relation_df.sample(n=rec_data_size, replace=replace).reset_index(drop=True)
kg_data['value'] = np.zeros(len(kg_data), dtype=int) # default for None
tail_select = kg_data['tail'].apply(lambda x: x < self.corpus.n_items)
item_item_df = kg_data[tail_select]
item_attr_df = kg_data.drop(item_item_df.index)
item_attr_df['value'] = item_attr_df['tail'].values
sample_tails = list() # sample items sharing the same attribute
for head, val in zip(item_attr_df['head'].values, item_attr_df['tail'].values):
share_attr_items = self.corpus.share_attr_dict[val]
tail_idx = np.random.randint(len(share_attr_items))
sample_tails.append(share_attr_items[tail_idx])
item_attr_df['tail'] = sample_tails
kg_data = pd.concat([item_item_df, item_attr_df], ignore_index=True)
return kg_data
def actions_before_epoch(self):
super().actions_before_epoch()
self.kg_data = self.generate_kg_data()
heads, tails = self.kg_data['head'].values, self.kg_data['tail'].values
relations, vals = self.kg_data['relation'].values, self.kg_data['value'].values
self.neg_heads = np.random.randint(1, self.corpus.n_items, size=(len(self.kg_data), self.model.num_neg))
self.neg_tails = np.random.randint(1, self.corpus.n_items, size=(len(self.kg_data), self.model.num_neg))
for i in range(len(self.kg_data)):
item_item_relation = (tails[i] <= self.corpus.n_items)
for j in range(self.model.num_neg):
if np.random.rand() < self.model.neg_head_p: # sample negative head
tail = tails[i] if item_item_relation else vals[i]
while (self.neg_heads[i][j], relations[i], tail) in self.corpus.triplet_set:
self.neg_heads[i][j] = np.random.randint(1, self.corpus.n_items)
self.neg_tails[i][j] = tails[i]
else: # sample negative tail
head = heads[i] if item_item_relation else self.neg_tails[i][j]
tail = self.neg_tails[i][j] if item_item_relation else vals[i]
while (head, relations[i], tail) in self.corpus.triplet_set:
self.neg_tails[i][j] = np.random.randint(1, self.corpus.n_items)
head = heads[i] if item_item_relation else self.neg_tails[i][j]
tail = self.neg_tails[i][j] if item_item_relation else vals[i]
self.neg_heads[i][j] = heads[i]
class RelationalDynamicAggregation(nn.Module):
def __init__(self, n_relation, n_freq, relation_embeddings, include_val, device):
super().__init__()
self.relation_embeddings = relation_embeddings
self.include_val = include_val
self.freq_real = nn.Embedding(n_relation, n_freq)
self.freq_imag = nn.Embedding(n_relation, n_freq)
freq = np.linspace(0, 1, n_freq) / 2.
self.freqs = torch.from_numpy(np.concatenate((freq, -freq))).to(device).float()
self.relation_range = torch.from_numpy(np.arange(n_relation)).to(device)
def idft_decay(self, delta_t):
real, imag = self.freq_real(self.relation_range), self.freq_imag(self.relation_range)
# create conjugate symmetric to ensure real number output
x_real = torch.cat([real, real], dim=-1)
x_imag = torch.cat([imag, -imag], dim=-1)
w = 2. * np.pi * self.freqs * delta_t.unsqueeze(-1) # B * H * n_freq
real_part = w.cos()[:, :, None, :] * x_real[None, None, :, :] # B * H * R * n_freq
imag_part = w.sin()[:, :, None, :] * x_imag[None, None, :, :]
decay = (real_part - imag_part).mean(dim=-1) / 2. # B * H * R
return decay.float()
def forward(self, seq, delta_t_n, target, target_value, valid_mask):
r_vectors = self.relation_embeddings(self.relation_range) # R * V
if self.include_val:
rv_vectors = r_vectors[None, None, :, :] + target_value
ri_vectors = rv_vectors * target[:, :, None, :] # B * -1 * R * V
else:
ri_vectors = r_vectors[None, None, :, :] * target[:, :, None, :] # B * -1 * R * V
attention = (seq[:, None, :, None, :] * ri_vectors[:, :, None, :, :]).sum(-1) # B * -1 * H * R
# shift masked softmax
attention = attention - attention.max()
attention = attention.masked_fill(valid_mask == 0, -np.inf).softmax(dim=-2)
# temporal evolution
decay = self.idft_decay(delta_t_n).clamp(0, 1).unsqueeze(1).masked_fill(valid_mask==0, 0.) # B * 1 * H * R
attention = attention * decay
# attentional aggregation of history items
context = (seq[:, None, :, None, :] * attention[:, :, :, :, None]).sum(-3) # B * -1 * R * V
return context
| 15,388 | 49.621711 | 116 | py |
ReChorus | ReChorus-master/src/models/sequential/GRU4Rec.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" GRU4Rec
Reference:
"Session-based Recommendations with Recurrent Neural Networks"
Hidasi et al., ICLR'2016.
CMD example:
python main.py --model_name GRU4Rec --emb_size 64 --hidden_size 128 --lr 1e-3 --l2 1e-4 --history_max 20 \
--dataset 'Grocery_and_Gourmet_Food'
"""
import torch
import torch.nn as nn
from models.BaseModel import SequentialModel
class GRU4Rec(SequentialModel):
reader = 'SeqReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size', 'hidden_size']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--hidden_size', type=int, default=64,
help='Size of hidden vectors in GRU.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.hidden_size = args.hidden_size
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size)
self.rnn = nn.GRU(input_size=self.emb_size, hidden_size=self.hidden_size, batch_first=True)
# self.pred_embeddings = nn.Embedding(self.item_num, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.emb_size)
def forward(self, feed_dict):
self.check_list = []
i_ids = feed_dict['item_id'] # [batch_size, -1]
history = feed_dict['history_items'] # [batch_size, history_max]
lengths = feed_dict['lengths'] # [batch_size]
his_vectors = self.i_embeddings(history)
# Sort and Pack
sort_his_lengths, sort_idx = torch.topk(lengths, k=len(lengths))
sort_his_vectors = his_vectors.index_select(dim=0, index=sort_idx)
history_packed = torch.nn.utils.rnn.pack_padded_sequence(
sort_his_vectors, sort_his_lengths.cpu(), batch_first=True)
# RNN
output, hidden = self.rnn(history_packed, None)
# Unsort
unsort_idx = torch.topk(sort_idx, k=len(lengths), largest=False)[1]
rnn_vector = hidden[-1].index_select(dim=0, index=unsort_idx)
# Predicts
# pred_vectors = self.pred_embeddings(i_ids)
pred_vectors = self.i_embeddings(i_ids)
rnn_vector = self.out(rnn_vector)
prediction = (rnn_vector[:, None, :] * pred_vectors).sum(-1)
return {'prediction': prediction.view(feed_dict['batch_size'], -1)}
| 2,685 | 35.794521 | 110 | py |
ReChorus | ReChorus-master/src/models/sequential/TiSASRec.py | # -*- coding: UTF-8 -*-
# @Author : Chenyang Wang
# @Email : THUwangcy@gmail.com
""" TiSASRec
Reference:
"Time Interval Aware Self-Attention for Sequential Recommendation"
Jiacheng Li et al., WSDM'2020.
CMD example:
python main.py --model_name TiSASRec --emb_size 64 --num_layers 1 --num_heads 1 --lr 1e-4 --l2 1e-6 \
--history_max 20 --dataset 'Grocery_and_Gourmet_Food'
"""
import torch
import torch.nn as nn
import numpy as np
from models.BaseModel import SequentialModel
class TiSASRec(SequentialModel):
reader = 'SeqReader'
runner = 'BaseRunner'
extra_log_args = ['emb_size', 'num_layers', 'num_heads', 'time_max']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--num_layers', type=int, default=1,
help='Number of self-attention layers.')
parser.add_argument('--num_heads', type=int, default=4,
help='Number of attention heads.')
parser.add_argument('--time_max', type=int, default=512,
help='Max time intervals.')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
super().__init__(args, corpus)
self.emb_size = args.emb_size
self.max_his = args.history_max
self.num_layers = args.num_layers
self.num_heads = args.num_heads
self.max_time = args.time_max
self.len_range = torch.from_numpy(np.arange(self.max_his)).to(self.device)
self.user_min_interval = dict()
for u, user_df in corpus.all_df.groupby('user_id'):
time_seqs = user_df['time'].values
interval_matrix = np.abs(time_seqs[:, None] - time_seqs[None, :])
min_interval = np.min(interval_matrix + (interval_matrix <= 0) * 0xFFFF)
self.user_min_interval[u] = min_interval
self._define_params()
self.apply(self.init_weights)
def _define_params(self):
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size)
self.p_k_embeddings = nn.Embedding(self.max_his + 1, self.emb_size)
self.p_v_embeddings = nn.Embedding(self.max_his + 1, self.emb_size)
self.t_k_embeddings = nn.Embedding(self.max_time + 1, self.emb_size)
self.t_v_embeddings = nn.Embedding(self.max_time + 1, self.emb_size)
self.transformer_block = nn.ModuleList([
TimeIntervalTransformerLayer(d_model=self.emb_size, d_ff=self.emb_size, n_heads=self.num_heads,
dropout=self.dropout, kq_same=False)
for _ in range(self.num_layers)
])
def forward(self, feed_dict):
self.check_list = []
i_ids = feed_dict['item_id'] # [batch_size, -1]
i_history = feed_dict['history_items'] # [batch_size, history_max]
t_history = feed_dict['history_times'] # [batch_size, history_max]
user_min_t = feed_dict['user_min_intervals'] # [batch_size]
lengths = feed_dict['lengths'] # [batch_size]
batch_size, seq_len = i_history.shape
valid_his = (i_history > 0).long()
his_vectors = self.i_embeddings(i_history)
# Position embedding
position = (lengths[:, None] - self.len_range[None, :seq_len]) * valid_his
pos_k = self.p_k_embeddings(position)
pos_v = self.p_v_embeddings(position)
# Interval embedding
interval_matrix = (t_history[:, :, None] - t_history[:, None, :]).abs()
interval_matrix = (interval_matrix / user_min_t.view(-1, 1, 1)).long().clamp(0, self.max_time)
inter_k = self.t_k_embeddings(interval_matrix)
inter_v = self.t_v_embeddings(interval_matrix)
# Self-attention
causality_mask = np.tril(np.ones((1, 1, seq_len, seq_len), dtype=np.int))
attn_mask = torch.from_numpy(causality_mask).to(self.device)
# attn_mask = valid_his.view(batch_size, 1, 1, seq_len)
for block in self.transformer_block:
his_vectors = block(his_vectors, pos_k, pos_v, inter_k, inter_v, attn_mask)
his_vectors = his_vectors * valid_his[:, :, None].float()
his_vector = his_vectors[torch.arange(batch_size), lengths - 1, :]
# his_vector = his_vectors.sum(1) / lengths[:, None].float()
# ↑ average pooling is shown to be more effective than the most recent embedding
i_vectors = self.i_embeddings(i_ids)
prediction = (his_vector[:, None, :] * i_vectors).sum(-1)
return {'prediction': prediction.view(batch_size, -1)}
class Dataset(SequentialModel.Dataset):
def _get_feed_dict(self, index):
feed_dict = super()._get_feed_dict(index)
user_id = self.data['user_id'][index]
min_interval = self.model.user_min_interval[user_id]
feed_dict['user_min_intervals'] = min_interval
return feed_dict
class TimeIntervalMultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It also needs position and interaction (time interval) key/value input.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
def forward(self, q, k, v, pos_k, pos_v, inter_k, inter_v, mask):
bs, seq_len = k.size(0), k.size(1)
# perform linear operation and split into h heads
k = (self.k_linear(k) + pos_k).view(bs, seq_len, self.h, self.d_k)
if not self.kq_same:
q = self.q_linear(q).view(bs, seq_len, self.h, self.d_k)
else:
q = self.k_linear(q).view(bs, seq_len, self.h, self.d_k)
v = (self.v_linear(v) + pos_v).view(bs, seq_len, self.h, self.d_k)
# transpose to get dimensions bs * h * -1 * d_k
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
# interaction (time interval) embeddings
inter_k = inter_k.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_v = inter_v.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_k = inter_k.transpose(2, 3).transpose(1, 2)
inter_v = inter_v.transpose(2, 3).transpose(1, 2) # bs, head, seq_len, seq_len, d_k
# calculate attention using function we will define next
output = self.scaled_dot_product_attention(q, k, v, inter_k, inter_v, self.d_k, mask)
# concatenate heads and put through final linear layer
output = output.transpose(1, 2).reshape(bs, -1, self.d_model)
return output
@staticmethod
def scaled_dot_product_attention(q, k, v, inter_k, inter_v, d_k, mask):
"""
Involve pair interaction embeddings when calculating attention scores and output
"""
scores = torch.matmul(q, k.transpose(-2, -1)) # bs, head, q_len, k_len
scores += (q[:, :, :, None, :] * inter_k).sum(-1)
scores = scores / d_k ** 0.5
scores.masked_fill_(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
output = torch.matmul(scores, v) # bs, head, q_len, d_k
output += (scores[:, :, :, :, None] * inter_v).sum(-2)
return output
class TimeIntervalTransformerLayer(nn.Module):
def __init__(self, d_model, d_ff, n_heads, dropout, kq_same=False):
super().__init__()
self.masked_attn_head = TimeIntervalMultiHeadAttention(d_model, n_heads, kq_same=kq_same)
# Two layer norm layer and two dropout layer
self.layer_norm1 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.layer_norm2 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
def forward(self, seq, pos_k, pos_v, inter_k, inter_v, mask):
context = self.masked_attn_head(seq, seq, seq, pos_k, pos_v, inter_k, inter_v, mask)
context = self.layer_norm1(self.dropout1(context) + seq)
output = self.linear1(context).relu()
output = self.linear2(output)
output = self.layer_norm2(self.dropout2(output) + context)
return output
| 8,550 | 41.755 | 107 | py |
ReChorus | ReChorus-master/src/utils/utils.py | # -*- coding: UTF-8 -*-
import os
import random
import logging
import torch
import datetime
import numpy as np
import pandas as pd
from typing import List, Dict, NoReturn, Any
def init_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def df_to_dict(df: pd.DataFrame) -> dict:
res = df.to_dict('list')
for key in res:
res[key] = np.array(res[key])
return res
def batch_to_gpu(batch: dict, device) -> dict:
for c in batch:
if type(batch[c]) is torch.Tensor:
batch[c] = batch[c].to(device)
return batch
def check(check_list: List[tuple]) -> NoReturn:
# observe selected tensors during training.
logging.info('')
for i, t in enumerate(check_list):
d = np.array(t[1].detach().cpu())
logging.info(os.linesep.join(
[t[0] + '\t' + str(d.shape), np.array2string(d, threshold=20)]
) + os.linesep)
def eval_list_columns(df: pd.DataFrame) -> pd.DataFrame:
for col in df.columns:
if pd.api.types.is_string_dtype(df[col]):
df[col] = df[col].apply(lambda x: eval(str(x))) # some list-value columns
return df
def format_metric(result_dict: Dict[str, Any]) -> str:
assert type(result_dict) == dict
format_str = []
metrics = np.unique([k.split('@')[0] for k in result_dict.keys()])
topks = np.unique([int(k.split('@')[1]) for k in result_dict.keys()])
for topk in np.sort(topks):
for metric in np.sort(metrics):
name = '{}@{}'.format(metric, topk)
m = result_dict[name]
if type(m) is float or type(m) is np.float or type(m) is np.float32 or type(m) is np.float64:
format_str.append('{}:{:<.4f}'.format(name, m))
elif type(m) is int or type(m) is np.int or type(m) is np.int32 or type(m) is np.int64:
format_str.append('{}:{}'.format(name, m))
return ','.join(format_str)
def format_arg_str(args, exclude_lst: list, max_len=20) -> str:
linesep = os.linesep
arg_dict = vars(args)
keys = [k for k in arg_dict.keys() if k not in exclude_lst]
values = [arg_dict[k] for k in keys]
key_title, value_title = 'Arguments', 'Values'
key_max_len = max(map(lambda x: len(str(x)), keys))
value_max_len = min(max(map(lambda x: len(str(x)), values)), max_len)
key_max_len, value_max_len = max([len(key_title), key_max_len]), max([len(value_title), value_max_len])
horizon_len = key_max_len + value_max_len + 5
res_str = linesep + '=' * horizon_len + linesep
res_str += ' ' + key_title + ' ' * (key_max_len - len(key_title)) + ' | ' \
+ value_title + ' ' * (value_max_len - len(value_title)) + ' ' + linesep + '=' * horizon_len + linesep
for key in sorted(keys):
value = arg_dict[key]
if value is not None:
key, value = str(key), str(value).replace('\t', '\\t')
value = value[:max_len-3] + '...' if len(value) > max_len else value
res_str += ' ' + key + ' ' * (key_max_len - len(key)) + ' | ' \
+ value + ' ' * (value_max_len - len(value)) + linesep
res_str += '=' * horizon_len
return res_str
def check_dir(file_name: str) -> NoReturn:
dir_path = os.path.dirname(file_name)
if not os.path.exists(dir_path):
print('make dirs:', dir_path)
os.makedirs(dir_path)
def non_increasing(lst: list) -> bool:
return all(x >= y for x, y in zip(lst, lst[1:]))
def get_time():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
| 3,723 | 33.803738 | 117 | py |
ReChorus | ReChorus-master/src/utils/layers.py | # -*- coding: UTF-8 -*-
import torch
import torch.nn as nn
import numpy as np
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It has projection layer for getting keys, queries and values. Followed by attention.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
def head_split(self, x): # get dimensions bs * h * seq_len * d_k
new_x_shape = x.size()[:-1] + (self.h, self.d_k)
return x.view(*new_x_shape).transpose(-2, -3)
def forward(self, q, k, v, mask=None):
origin_shape = q.size()
# perform linear operation and split into h heads
if not self.kq_same:
q = self.head_split(self.q_linear(q))
else:
q = self.head_split(self.k_linear(q))
k = self.head_split(self.k_linear(k))
v = self.head_split(self.v_linear(v))
# calculate attention using function we will define next
output = self.scaled_dot_product_attention(q, k, v, self.d_k, mask)
# concatenate heads and put through final linear layer
output = output.transpose(-2, -3).reshape(origin_shape)
return output
@staticmethod
def scaled_dot_product_attention(q, k, v, d_k, mask=None):
"""
This is called by Multi-head attention object to find the values.
"""
scores = torch.matmul(q, k.transpose(-2, -1)) / d_k ** 0.5 # bs, head, q_len, k_len
if mask is not None:
scores = scores.masked_fill(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
scores = scores.masked_fill(torch.isnan(scores), 0)
output = torch.matmul(scores, v) # bs, head, q_len, d_k
return output
class TransformerLayer(nn.Module):
def __init__(self, d_model, d_ff, n_heads, dropout=0, kq_same=False):
super().__init__()
"""
This is a Basic Block of Transformer. It contains one Multi-head attention object.
Followed by layer norm and position wise feedforward net and dropout layer.
"""
# Multi-Head Attention Block
self.masked_attn_head = MultiHeadAttention(d_model, n_heads, kq_same=kq_same)
# Two layer norm layer and two dropout layer
self.layer_norm1 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.layer_norm2 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
def forward(self, seq, mask=None):
context = self.masked_attn_head(seq, seq, seq, mask)
context = self.layer_norm1(self.dropout1(context) + seq)
output = self.linear1(context).relu()
output = self.linear2(output)
output = self.layer_norm2(self.dropout2(output) + context)
return output
| 3,225 | 36.08046 | 92 | py |
CMCL-2022 | CMCL-2022-master/main.py | import torch
from base_model import Transformer
import pandas as pd
from base_dataset import CreateDataset
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from tqdm import tqdm
from statistics import mean
from torchmetrics.functional import r2_score
import numpy as np
MAX_EPOCHS = 10
device = device = torch.device("cuda:1") if torch.cuda.is_available() else torch.device("cpu")
transformer_model_pretrained = 'bert-base-multilingual-cased'
data = data = pd.read_csv('training_data2022/training_data/train_left_concat.csv')
sentences = list(data['left_sentences'])
labels = data[['FFDAvg', 'FFDStd','TRTAvg', 'TRTStd']].to_numpy()
x_train,x_val,y_train,y_val = train_test_split(sentences,labels,test_size=0.2)
train_dataset = CreateDataset(x_train,y_train,transformer_model_pretrained)
val_dataset = CreateDataset(x_val,y_val,transformer_model_pretrained)
train_dataloader = DataLoader(train_dataset,batch_size=8,shuffle = True)
val_dataloader = DataLoader(val_dataset,batch_size=8,shuffle = True)
model = Transformer(transformer_model_pretrained,num_classes = 4)
##Shift to CUDA backend
model.to(device)
optimizer = torch.optim.AdamW(model.parameters(),lr = 1e-3)
loss_fn = torch.nn.MSELoss()
for epoch in range(MAX_EPOCHS):
print(f"Epoch {epoch+1}\n-------------------------------")
############-------Train------##############
size = len(train_dataloader.dataset)
model.train()
losses = []
for idx,batch in enumerate(tqdm(train_dataloader)):
x = batch[0]
x = {key:value.to(device) for key,value in x.items()}
y = batch[1].to(device)
y_pred = model(x)
loss = loss_fn(y_pred,y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
loss = mean(losses)
print('-----------Training Loss---------------::',loss)
############-------Validation-----##############
size = len(val_dataloader.dataset)
losses = []
preds = []
targets = []
model.eval()
with torch.no_grad():
for idx,batch in enumerate(tqdm(train_dataloader)):
x = batch[0]
x = {key:value.to(device) for key,value in x.items()}
y = batch[1].to(device)
y_pred = model(x)
loss = loss_fn(y_pred,y)
targets.append(y.detach())
preds.append(y_pred.detach())
losses.append(loss.item())
loss = mean(losses)
targets = torch.cat(targets)
preds = torch.cat(preds)
print('-----------Validation Loss---------------::',loss)
print('===========R2 Score :: ',r2_score(preds,targets).item())
| 2,717 | 28.225806 | 94 | py |
CMCL-2022 | CMCL-2022-master/base_model.py |
from torch import nn
from transformers import AutoConfig, AutoModel
class Transformer(nn.Module):
def __init__(self, model, num_classes=1):
super().__init__()
self.name = model
config = AutoConfig.from_pretrained(self.name)
config.output_hidden_states = True
self.transformer = AutoModel.from_config(config)
self.nb_features = self.transformer.pooler.dense.out_features
self.pooler = nn.Sequential(
nn.Linear(self.nb_features, self.nb_features),
nn.LeakyReLU(0.1),
)
self.logit = nn.Linear(self.nb_features, num_classes)
def forward(self, encodings):
# input_ids = encodings['input_ids']
# attention_mask = encodings['attention_mask']
# token_type_ids = encodings['token_type_ids']
output = self.transformer(**encodings)
hidden_states = output['hidden_states']
hidden_states = hidden_states[-1][:, 0] # Use the representation of the first token of the last layer
ft = self.pooler(hidden_states)
return self.logit(ft) | 1,126 | 27.897436 | 109 | py |
CMCL-2022 | CMCL-2022-master/base_dataset.py | from torch.utils.data import Dataset
from transformers import AutoTokenizer
import torch
class CreateDataset(Dataset):
def __init__(self,data,labels,model):
super().__init__()
self.data = data
self.labels = labels
tokenizer = AutoTokenizer.from_pretrained(model)
self.encodings = tokenizer(data, add_special_tokens = True, truncation = True, padding = "max_length", return_tensors = "pt",max_length=128)
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
input_encoding = {key: val[index].clone().detach() for key, val in self.encodings.items()}
return (input_encoding,torch.tensor(self.labels[index],dtype = torch.float)) | 723 | 44.25 | 150 | py |
CMCL-2022 | CMCL-2022-master/code/main.py | import pytorch_lightning as pl
from xlm_roberta import tfRegressor
import torch
from dataloader import TransduciveDataLoader
import pandas as pd
import numpy as np
langTexts = ['ZuCo1','ZuCo2','Provo','BSC','RSC','PAHEC','PoTeC','GECO-NL']
tf_name = 'xlm-roberta-base'
train_loc = 'data/training_data/train.csv'
val_loc = 'data/training_data/dev.csv'
test_loc = 'data/test_data_subtask1/sub1/test_copy.csv'
pred_loc = 'data/test_data_subtask1/sub1/test.csv'
predictions_loc = 'data/task1_predictions/preds.csv'
dataloader = TransduciveDataLoader(train_loc,val_loc,test_loc,langTexts,tf_name)
trainer = pl.Trainer(gpus = [1],
max_epochs = 10,
auto_lr_find = True)
model = tfRegressor(tf_name = tf_name,lr = 1e-2)
lr_finder = trainer.tuner.lr_find(model,dataloader)
# Plot with
fig = lr_finder.plot(suggest=True)
fig.savefig('lr_finder.png')
model.hparams.lr = lr_finder.suggestion()
trainer.fit(model,datamodule=dataloader)
predictions = trainer.predict(model,datamodule=dataloader)
preds_file = pd.read_csv(pred_loc)
predictions = torch.cat(predictions)
preds_file[['FFDAvg','FFDStd','TRTAvg','TRTStd']] = pd.DataFrame(np.array(predictions))
preds_file.to_csv(predictions_loc,index = False)
| 1,246 | 27.340909 | 87 | py |
CMCL-2022 | CMCL-2022-master/code/dataloader.py | import pytorch_lightning as pl
import torch.utils.data as TorchData
import pandas as pd
from dataset import TransduciveDataset
from utils import getLangText,seperateHyphenToSentence
import numpy as np
class TransduciveDataLoader(pl.LightningDataModule):
def __init__(self,train_location,val_location,test_loc,langTexts,tf_name):
super().__init__()
self.train_location = train_location
self.val_location = val_location
self.test_location = test_loc
self.langTexts = langTexts
self.tf_name = tf_name
def prepare_data(self) -> None:
self.train_df = pd.read_csv(self.train_location)
self.val_df = pd.read_csv(self.val_location)
self.predict_df = pd.read_csv(self.test_location)
self.train_dataset = self.datasetGen(self.train_df)
self.val_dataset = self.datasetGen(self.val_df)
self.predict_dataset = self.datasetGenPred(self.predict_df)
return super().prepare_data()
def datasetGen(self,df):
self.df = df.copy()
self.df['langText'] = self.df.sentence_id.apply(getLangText).astype(str)
self.df.sentence_id = self.df.sentence_id.apply(seperateHyphenToSentence)
self.df.sentence_id = self.df.sentence_id.astype(int)
self.texts = []
self.labels = []
for langText in self.langTexts:
df = self.df.copy()
df = df[df.langText == langText]
texts = []
labels = []
for i in df.sentence_id.unique():
rows = df[df.sentence_id == i]
label = rows[['FFDAvg','FFDStd','TRTAvg','TRTStd']].to_numpy()
text = rows.word.tolist()
texts.append(text)
labels.append(label)
self.texts.extend(texts)
self.labels.extend(labels)
self.labels = np.array(self.labels)
return TransduciveDataset(self.texts,self.labels,self.tf_name)
def datasetGenPred(self,df):
self.df = df.copy()
self.df['langText'] = self.df.sentence_id.apply(getLangText).astype(str)
self.df.sentence_id = self.df.sentence_id.apply(seperateHyphenToSentence)
self.df.sentence_id = self.df.sentence_id.astype(int)
self.texts = []
self.labels = []
for langText in self.langTexts:
df = self.df.copy()
df = df[df.langText == langText]
texts = []
labels = []
for i in df.sentence_id.unique():
rows = df[df.sentence_id == i]
label = -np.ones((len(rows),4))
text = rows.word.tolist()
texts.append(text)
labels.append(label)
self.texts.extend(texts)
self.labels.extend(labels)
self.labels = np.array(self.labels)
return TransduciveDataset(self.texts,self.labels,tf_name = self.tf_name,mode = 'test')
def train_dataloader(self):
return TorchData.DataLoader(self.train_dataset,batch_size=16,shuffle = True,num_workers=12)
def val_dataloader(self):
return TorchData.DataLoader(self.val_dataset,batch_size=16,num_workers=12)
def predict_dataloader(self):
return TorchData.DataLoader(self.predict_dataset,batch_size=16,num_workers=12)
# train_loc = 'data/training_data/train.csv'
# val_loc = 'data/training_data/dev.csv'
# langText = 'ZuCo1'
# dataloader = TransduciveDataLoader(train_loc,val_loc,langText)
# dataloader.prepare_data()
# for batch in dataloader.train_dataloader():
# enc_inputs,word_mask,labels= batch[0],batch[1],batch[2]
# break
| 3,617 | 38.758242 | 99 | py |
CMCL-2022 | CMCL-2022-master/code/dataset.py | import torch
from torch.utils.data import Dataset
from transformers import AutoTokenizer
MAX_LEN = 128
class TransduciveDataset(Dataset):
def __init__(self,texts,labels,mode ='train',tf_name = 'xlm-roberta-base') -> None:
super(TransduciveDataset,self).__init__()
try:
assert len(texts) == len(labels)
except AssertionError:
print(len(texts),len(labels))
self.texts = texts #[b,]
self.labels = labels #[b,x,4]
self.tf_name = tf_name
self.mode = mode
self.tokenizer = AutoTokenizer.from_pretrained(tf_name)
def __len__(self):
return len(self.texts)
def __getitem__(self, index):
encoded_inputs = self.tokenizer(self.texts[index],padding = 'max_length',is_split_into_words = True,max_length = MAX_LEN,truncation = True,return_tensors='pt')
labels = -1.0*torch.ones(MAX_LEN,4)
decoded_texts = self.tokenizer.convert_ids_to_tokens(encoded_inputs.input_ids[0])
if not 'xlm' in self.tf_name:
word_mask = [t!='[CLS]' and t!='[SEP]' and t!='[PAD]' and t[0]!='#' for t in decoded_texts]
else:
word_mask = [t[0]=='▁' for t in decoded_texts]
try:
word_mask = torch.tensor(word_mask)
labels[word_mask] = torch.tensor(self.labels[index],dtype = torch.float) #[128,4]
except RuntimeError:
print([(x,y) for x,y in zip(decoded_texts,word_mask)])
raise 'Improper Tokenization '
return encoded_inputs,word_mask,labels
| 1,563 | 41.27027 | 167 | py |
CMCL-2022 | CMCL-2022-master/code/xlm_roberta.py | import pytorch_lightning as pl
from transformers import AutoModel,AutoTokenizer
import torch
import numpy as np
class tfRegressor(pl.LightningModule):
def __init__(self,lr,tf_name):
super(tfRegressor,self).__init__()
self.fe = AutoModel.from_pretrained(tf_name)
self.lr = lr
self.linear = torch.nn.Linear(768,10024)
self.relu = torch.nn.LeakyReLU()
self.dropout = torch.nn.Dropout()
self.regressor = torch.nn.Linear(10024,4)
self.criterion = torch.nn.MSELoss()
def forward(self,encoded_inputs):
outputs = self.fe(**encoded_inputs)
outputs = outputs.last_hidden_state #[b,128,768]
outputs = self.relu(self.linear(outputs))
outputs = self.dropout(outputs)
preds = self.regressor(outputs) #[b,128,4]
return preds
def training_step(self,batch,idx):
encoded_inputs = batch[0] #{i/p_ids,attention_masks}
word_masks = batch[1] #[b,128]
labels = batch[2] #[b,128,4]
for key in encoded_inputs.keys():
encoded_inputs[key] = encoded_inputs[key].squeeze()
preds = self(encoded_inputs)
##Masking to generate equal number y_pred and y_true
preds[word_masks == 0] = -1
y_pred = preds
y_true = labels
assert y_pred.shape == y_true.shape
loss = self.criterion(y_pred,y_true)
self.log('Training Loss',loss,on_epoch=True)
return loss
def validation_step(self,batch,idx):
encoded_inputs = batch[0] #{i/p_ids,attention_masks}
word_masks = batch[1] #[b,128]
labels = batch[2] #[b,128,4]
for key in encoded_inputs.keys():
encoded_inputs[key] = encoded_inputs[key].squeeze()
preds = self(encoded_inputs)
##Masking to generate equal number y_pred and y_true
preds[word_masks == 0] = -1
y_pred = preds
y_true = labels
assert y_pred.shape == y_true.shape
loss = self.criterion(y_pred,y_true)
self.log('Validation Loss',loss,on_epoch=True)
return loss
def validation_epoch_end(self, outputs):
print('val loss ',torch.mean(torch.stack(outputs)))
def predict_step(self, batch,batch_idx):
encoded_inputs = batch[0] #{i/p_ids,attention_masks}
word_masks = batch[1] #[b,128]
labels = batch[2] #[b,128,4]
for key in encoded_inputs.keys():
encoded_inputs[key] = encoded_inputs[key].squeeze()
preds = self(encoded_inputs)
return preds[word_masks]
def configure_optimizers(self):
return torch.optim.AdamW(self.parameters(), lr=self.lr)
| 2,701 | 34.552632 | 63 | py |
CMCL-2022 | CMCL-2022-master/cmcl-shared-task-main/src/dataloader.py | import torch
import transformers
FEATURES_NAMES = ['FFDAvg', 'FFDStd', 'TRTAvg', 'TRTStd']
class EyeTrackingCSV(torch.utils.data.Dataset):
"""Tokenize sentences and load them into tensors. Assume dataframe has sentence_id."""
def __init__(self, df, mode = 'train',model_name='roberta-base'):
self.model_name = model_name
self.df = df.copy()
self.mode = mode
# Re-number the sentence ids, assuming they are [N, N+1, ...] for some N
self.sentence_ids = self.df.sentence_id.unique()
self.sentence_id_mapper = {}
for i in range(len(self.sentence_ids)):
self.sentence_id_mapper[i] = self.sentence_ids[i]
self.num_sentences = len(self.sentence_ids)
self.texts = []
for id in self.sentence_ids:
rows = self.df[self.df.sentence_id == id]
text = rows.word.tolist()
text[-1] = text[-1].replace('<EOS>', '')
self.texts.append(text)
# Tokenize all sentences
if 'roberta' in model_name:
self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, add_prefix_space=True)
elif 'bert' in model_name:
self.tokenizer = transformers.BertTokenizerFast.from_pretrained(model_name, add_prefix_space=True)
self.ids = self.tokenizer(self.texts, padding=True, is_split_into_words=True, return_offsets_mapping=True)
def __len__(self):
return self.num_sentences
def __getitem__(self, ix):
input_ids = self.ids['input_ids'][ix]
offset_mapping = self.ids['offset_mapping'][ix]
attention_mask = self.ids['attention_mask'][ix]
input_tokens = [self.tokenizer.convert_ids_to_tokens(x) for x in input_ids]
# First subword of each token starts with special character
if 'xlm-roberta' in self.model_name:
is_first_subword = [t[0] == '▁' for t in input_tokens]
elif 'roberta' in self.model_name:
is_first_subword = [t[0] == 'Ġ' for t in input_tokens]
elif 'bert' in self.model_name:
is_first_subword = [t0 == 0 and t1 > 0 for t0, t1 in offset_mapping]
ls = []
for i,val in enumerate(is_first_subword):
if val:
ls.append(i)
if self.mode =='train' or self.mode =='val':
features = -torch.ones((len(input_ids), 4))
try:
features[is_first_subword] = torch.Tensor(
self.df[self.df.sentence_id == self.sentence_id_mapper[ix]][FEATURES_NAMES].to_numpy()
)
except:
print('dataloader_train/val',ix,self.sentence_id_mapper[ix],len(ls))
# for x,y in zip(input_tokens,is_first_subword):
# print(x,y)
# raise Exception('Dataloader Length Not Matching')
return (
input_tokens,
torch.LongTensor(input_ids),
torch.LongTensor(attention_mask),
features,
)
else:
length = is_first_subword.count(True)
features = -torch.ones((len(input_ids), 4))
try:
features[is_first_subword] = torch.zeros(4)
except:
print('data_loader_test',ix,self.sentence_id_mapper[ix],len(ls))
return (
input_tokens,
torch.LongTensor(input_ids),
torch.LongTensor(attention_mask),
features,
)
| 3,140 | 34.292135 | 110 | py |
CMCL-2022 | CMCL-2022-master/cmcl-shared-task-main/src/model.py | import random
import numpy as np
import torch
import transformers
from tqdm import tqdm
import src.dataloader
device = torch.device('cuda:1')
class RobertaRegressionModel(torch.nn.Module):
def __init__(self, model_name='roberta-base'):
super(RobertaRegressionModel, self).__init__()
if 'roberta' in model_name:
self.roberta = transformers.AutoModel.from_pretrained(model_name)
elif 'bert' in model_name:
self.roberta = transformers.BertModel.from_pretrained(model_name)
EMBED_SIZE = 1024 if 'large' in model_name else 768
self.decoder = torch.nn.Sequential(
torch.nn.Linear(EMBED_SIZE, 4)
)
def forward(self, X_ids, X_attns, predict_mask):
"""
X_ids: (B, seqlen) tensor of token ids
X_attns: (B, seqlen) tensor of attention masks, 0 for [PAD] tokens and 1 otherwise
predict_mask: (B, seqlen) tensor, 1 for tokens that we need to predict
Output: (B, seqlen, 5) tensor of predictions, only predict when predict_mask == 1
"""
# (B, seqlen, 768)
temp = self.roberta(X_ids, attention_mask=X_attns).last_hidden_state
# (B, seqlen, 4)
Y_pred = self.decoder(temp)
# Where predict_mask == 0, set Y_pred to -1
Y_pred[predict_mask == 0] = -1
return Y_pred
class ModelTrainer():
"""Handles training and prediction given CSV"""
def __init__(self, model_name='xlm-roberta-base',text_name = 'ZuCo1'):
self.model_name = model_name
self.model = RobertaRegressionModel(model_name).to(device)
self.text_name = text_name
def train(self, train_df, valid_df=None, num_epochs=5, lr=5e-5, batch_size=12, feature_ids=[0,1,2,3]):
train_df = train_df[train_df.langText == self.text_name]
valid_df = valid_df[valid_df.langText == self.text_name]
train_data = src.dataloader.EyeTrackingCSV(train_df, model_name=self.model_name)
random.seed(12345)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True)
opt = torch.optim.AdamW(self.model.parameters(), lr=lr)
mse = torch.nn.L1Loss()
self.model.train()
for epoch in range(num_epochs):
for X_tokens, X_ids, X_attns, Y_true in train_loader:
opt.zero_grad()
X_ids = X_ids.to(device)
X_attns = X_attns.to(device)
Y_true = Y_true.to(device)
predict_mask = torch.sum(Y_true, axis=2) >= 0
Y_pred = self.model(X_ids, X_attns, predict_mask)
loss = mse(Y_true[:,:,feature_ids], Y_pred[:,:,feature_ids])
loss.backward()
opt.step()
print('Epoch:', epoch+1)
if valid_df is not None:
predict_df = self.predict(valid_df)
src.eval_metric.evaluate(predict_df, valid_df)
def predict(self, valid_df):
valid_data = src.dataloader.EyeTrackingCSV(valid_df,mode = 'val', model_name=self.model_name)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=16)
predict_df = valid_df.copy()
predict_df[['FFDAvg', 'FFDStd', 'TRTAvg', 'TRTStd']] = 9999
# Assume one-to-one matching between nonzero predictions and tokens
predictions = []
self.model.eval()
for X_tokens, X_ids, X_attns, Y_true in valid_loader:
X_ids = X_ids.to(device)
X_attns = X_attns.to(device)
predict_mask = torch.sum(Y_true, axis=2) >= 0
with torch.no_grad():
Y_pred = self.model(X_ids, X_attns, predict_mask).cpu()
for batch_ix in range(X_ids.shape[0]):
for row_ix in range(X_ids.shape[1]):
token_prediction = Y_pred[batch_ix, row_ix]
if token_prediction.sum() != -4.0:
token_prediction[token_prediction < 0] = 0
predictions.append(token_prediction)
predict_df[['FFDAvg', 'FFDStd', 'TRTAvg', 'TRTStd']] = np.vstack(predictions)
try:
predict_df[['FFDAvg', 'FFDStd', 'TRTAvg', 'TRTStd']] = np.vstack(predictions)
except:
print('predict',len(predictions),predictions[0].shape,predict_df.shape)
return predict_df
def test(self, test_df):
test_data = src.dataloader.EyeTrackingCSV(test_df,mode = 'test', model_name=self.model_name)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=16)
print(len(test_loader))
predict_df = test_df.copy()
predict_df[['FFDAvg', 'FFDStd', 'TRTAvg', 'TRTStd']] = 9999
# Assume one-to-one matching between nonzero predictions and tokens
predictions = []
self.model.eval()
for X_tokens, X_ids, X_attns, Y_true in test_loader:
print(Y_true.shape)
X_ids = X_ids.to(device)
X_attns = X_attns.to(device)
predict_mask = torch.sum(Y_true, axis=2) >= 0
print(predict_mask.shape)
with torch.no_grad():
Y_pred = self.model(X_ids, X_attns, predict_mask).cpu()
for batch_ix in range(X_ids.shape[0]):
for row_ix in range(X_ids.shape[1]):
token_prediction = Y_pred[batch_ix, row_ix]
if token_prediction.sum() != -4.0:
token_prediction[token_prediction < 0] = 0
predictions.append(token_prediction)
predict_df[['FFDAvg', 'FFDStd', 'TRTAvg', 'TRTStd']] = np.vstack(predictions)
try:
predict_df[['FFDAvg', 'FFDStd', 'TRTAvg', 'TRTStd']] = np.vstack(predictions)
except:
print('test',len(predictions),predictions[0].shape,predict_df.shape)
return predict_df
class ModelTrainerNew():
"""Handles training and prediction given CSV"""
def __init__(self, model_name='xlm-roberta-base'):
self.model_name = model_name
self.model = RobertaRegressionModel(model_name).to(device)
def train(self, train_df, valid_df=None, num_epochs=5, lr=5e-5, batch_size=12, feature_ids=[0,1,2,3]):
train_data = src.dataloader.EyeTrackingCSV(train_df, model_name=self.model_name)
random.seed(12345)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True)
opt = torch.optim.AdamW(self.model.parameters(), lr=lr)
mse = torch.nn.L1Loss()
self.model.train()
for epoch in range(num_epochs):
for X_tokens, X_ids, X_attns, Y_true in train_loader:
opt.zero_grad()
X_ids = X_ids.to(device)
X_attns = X_attns.to(device)
Y_true = Y_true.to(device)
predict_mask = torch.sum(Y_true, axis=2) >= 0
Y_pred = self.model(X_ids, X_attns, predict_mask)
loss = mse(Y_true[:,:,feature_ids], Y_pred[:,:,feature_ids])
loss.backward()
opt.step()
print('Epoch:', epoch+1)
if valid_df is not None:
predict_df = self.predict(valid_df)
src.eval_metric.evaluate(predict_df, valid_df)
def predict(self, valid_df):
valid_data = src.dataloader.EyeTrackingCSV(valid_df,mode = 'val', model_name=self.model_name)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=32)
predict_df = valid_df.copy()
predict_df[['FFDAvg', 'FFDStd', 'TRTAvg', 'TRTStd']] = 9999
# Assume one-to-one matching between nonzero predictions and tokens
predictions = []
self.model.eval()
for X_tokens, X_ids, X_attns, Y_true in valid_loader:
X_ids = X_ids.to(device)
X_attns = X_attns.to(device)
predict_mask = torch.sum(Y_true, axis=2) >= 0
with torch.no_grad():
Y_pred = self.model(X_ids, X_attns, predict_mask).cpu()
for batch_ix in range(X_ids.shape[0]):
for row_ix in range(X_ids.shape[1]):
token_prediction = Y_pred[batch_ix, row_ix]
if token_prediction.sum() != -4.0:
token_prediction[token_prediction < 0] = 0
predictions.append(token_prediction)
predict_df[['FFDAvg', 'FFDStd', 'TRTAvg', 'TRTStd']] = np.vstack(predictions)
try:
predict_df[['FFDAvg', 'FFDStd', 'TRTAvg', 'TRTStd']] = np.vstack(predictions)
except:
print('predict',len(predictions),predictions[0].shape,predict_df.shape)
return predict_df
def test(self, test_df):
test_data = src.dataloader.EyeTrackingCSV(test_df,mode = 'test', model_name=self.model_name)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32)
predict_df = test_df.copy()
predict_df[['FFDAvg', 'FFDStd', 'TRTAvg', 'TRTStd']] = 9999
# Assume one-to-one matching between nonzero predictions and tokens
predictions = []
self.model.eval()
for X_tokens, X_ids, X_attns, Y_true in test_loader:
X_ids = X_ids.to(device)
X_attns = X_attns.to(device)
predict_mask = torch.sum(Y_true, axis=2) >= 0
with torch.no_grad():
Y_pred = self.model(X_ids, X_attns, predict_mask).cpu()
for batch_ix in range(X_ids.shape[0]):
for row_ix in range(X_ids.shape[1]):
token_prediction = Y_pred[batch_ix, row_ix]
if token_prediction.sum() != -4.0:
token_prediction[token_prediction < 0] = 0
predictions.append(token_prediction)
predict_df[['FFDAvg', 'FFDStd', 'TRTAvg', 'TRTStd']] = np.vstack(predictions)
try:
predict_df[['FFDAvg', 'FFDStd', 'TRTAvg', 'TRTStd']] = np.vstack(predictions)
except:
print('test',len(predictions),predictions[0].shape,predict_df.shape)
return predict_df
| 9,123 | 36.240816 | 104 | py |
CMCL-2022 | CMCL-2022-master/cmcl-shared-task-main/notebooks/RoBERTaRegression.py | #!/usr/bin/env python
# coding: utf-8
# # RoBERTa Regression
# In[1]:
import sys
sys.path.append('../')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tqdm
import torch
from collections import defaultdict, Counter
import random
import math
import pickle
import src.eval_metric
import src.model
import src.dataloader
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
pd.options.display.max_columns = 100
pd.options.display.max_rows = 100
# In[2]:
train_df = pd.read_csv("../data/training_data/train.csv")
valid_df = pd.read_csv("../data/training_data/valid.csv")
provo_df = pd.read_csv("../data/provo.csv")
# ## Fine-tune model
# In[3]:
model_trainer = src.model.ModelTrainer()
# In[ ]:
model_trainer.train(provo_df, num_epochs=100)
# In[ ]:
model_trainer.train(train_df, valid_df, num_epochs=150)
# ## Make predictions
# In[ ]:
predict_df = model_trainer.predict(valid_df)
predict_df
# In[6]:
predict_df.to_csv("predictions.csv", index=False)
# In[ ]:
src.eval_metric.evaluate(predict_df, valid_df)
| 1,196 | 13.597561 | 57 | py |
CMCL-2022 | CMCL-2022-master/cmcl-shared-task-main/notebooks/ProvoProcess.py | #!/usr/bin/env python
# coding: utf-8
# # Process Provo Corpus
# In[1]:
import sys
sys.path.append('../')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tqdm
import torch
from collections import defaultdict, Counter
import random
import math
import pickle
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
pd.options.display.max_columns = 100
pd.options.display.max_rows = 100
# ## Read data
# In[2]:
df_raw = pd.read_csv("../data/ProvoCorpus.csv")
# In[3]:
# Rename to be similar to ZuCo
df = pd.DataFrame({
'participant_id': df_raw['Participant_ID'],
'text_id': df_raw['Text_ID'],
'orig_sentence_id': df_raw['Sentence_Number'],
'word_id': df_raw['Word_In_Sentence_Number'],
'word': df_raw['Word'],
'nFix': df_raw['IA_FIXATION_COUNT'],
'FFD': df_raw['IA_FIRST_FIXATION_DURATION'],
'GPT': df_raw['IA_REGRESSION_PATH_DURATION'],
'TRT': df_raw['IA_DWELL_TIME'],
})
df = df.fillna(0)
df['orig_sentence_id'] = df['orig_sentence_id'].astype(int)
df['word_id'] = df['word_id'].astype(int)
df['nFix'] = df['nFix'].astype(float)
df['TRT'] = df['TRT'].astype(float)
# In[4]:
# Renumber sentences ids from original (text id, sentence id) starting from 0
df = df[~((df.orig_sentence_id == 0) | (df.word_id == 0))]
id_map = {}
for _, row in df.iterrows():
k = (row['text_id'], row['orig_sentence_id'])
if k in id_map:
v = id_map[k]
else:
v = len(id_map)
id_map[k] = v
df['sentence_id'] = df.apply(lambda row: id_map[(row['text_id'], row['orig_sentence_id'])], axis=1)
df = df[['participant_id', 'sentence_id', 'word_id', 'word', 'nFix', 'FFD', 'GPT', 'TRT']]
# ## Take averages across participants
# In[10]:
agg_df = df.groupby(['sentence_id', 'word_id', 'word']).mean().reset_index()
agg_df['fixProp'] = df.groupby(['sentence_id', 'word_id', 'word'])['nFix'] .apply(lambda col: (col != 0).sum() / len(col)).reset_index()['nFix']
# In[11]:
# Scale to have the same mean and standard deviation as ZuCo data
agg_fts = agg_df[['nFix', 'FFD', 'GPT', 'TRT', 'fixProp']]
agg_df[['nFix', 'FFD', 'GPT', 'TRT', 'fixProp']] = (agg_fts - agg_fts.mean(axis=0)) / agg_fts.std(axis=0)
agg_df['nFix'] = 15.10 + 9.42 * agg_df['nFix']
agg_df['FFD'] = 3.19 + 1.42 * agg_df['FFD']
agg_df['GPT'] = 6.35 + 5.91 * agg_df['GPT']
agg_df['TRT'] = 5.31 + 3.64 * agg_df['TRT']
agg_df['fixProp'] = 67.06 + 26.06 * agg_df['fixProp']
# In[12]:
agg_df.to_csv('../data/provo.csv', index=False)
# ## Sanity check
# In[13]:
agg_df.describe()
# In[14]:
sns.pairplot(agg_df[['nFix', 'FFD', 'GPT', 'TRT', 'fixProp']])
| 2,710 | 21.591667 | 146 | py |
CMCL-2022 | CMCL-2022-master/cmcl-shared-task-main/notebooks/InitialExplore.py | #!/usr/bin/env python
# coding: utf-8
# # Some initial exploration
# In[1]:
import sys
sys.path.append('../')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tqdm
import torch
from collections import defaultdict, Counter
import random
import math
import pickle
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
pd.options.display.max_columns = 100
pd.options.display.max_rows = 100
# In[2]:
df = pd.read_csv("../data/training_data/train_and_valid.csv")
#df = pd.read_csv("../data/provo.csv")
# In[3]:
df[df.sentence_id == 2]
# In[4]:
df.describe()
# In[ ]:
sns.set_style("white")
g = sns.pairplot(df[['nFix', 'FFD', 'GPT', 'TRT', 'fixProp']],
corner=True, height=1.2, plot_kws={'edgecolor':"none", 's':3})
#g.set(xlim=(0, 100))
g.axes[0, 0].set_xlim((0, 100))
g.axes[1, 1].set_xlim((0, 12))
g.axes[2, 2].set_xlim((0, 70))
g.axes[3, 3].set_xlim((0, 40))
g.axes[4, 4].set_xlim((0, 100))
plt.show()
| 1,094 | 16.380952 | 79 | py |
CMCL-2022 | CMCL-2022-master/cmcl-shared-task-main/notebooks/MedianBaseline.py | #!/usr/bin/env python
# coding: utf-8
# # Median Baseline
# In[1]:
import sys
sys.path.append('../')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tqdm
import torch
from collections import defaultdict, Counter
import random
import math
import pickle
import string
import wordfreq
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
import src.eval_metric
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
pd.options.display.max_columns = 100
pd.options.display.max_rows = 100
# In[2]:
train_df = pd.read_csv("../data/training_data/train.csv")
valid_df = pd.read_csv("../data/training_data/valid.csv")
# In[3]:
output_var_names = ['nFix', 'FFD', 'GPT', 'TRT', 'fixProp']
predict_df = valid_df.copy()
for feat_name in output_var_names:
predict_df[feat_name] = train_df[feat_name].median()
# In[4]:
src.eval_metric.evaluate(predict_df, valid_df)
# ## Simple Feature-based Regression
# In[5]:
input_var_names = ['length', 'logfreq', 'has_upper', 'has_punct']
def get_features(token):
token = token.replace('<EOS>', '')
return pd.Series({
'length': len(token),
'logfreq': wordfreq.zipf_frequency(token, 'en'),
'has_upper': 0 if token.lower() == token else 1,
'has_punct': 1 if any(j in string.punctuation for j in token) else 0,
})
def clip_to_100(val):
if val < 0:
return 0
if val > 100:
return 100
return val
# In[6]:
train_df[input_var_names] = train_df.word.apply(get_features)
# In[7]:
valid_df[input_var_names] = valid_df.word.apply(get_features)
# In[11]:
predict_df = valid_df.copy()
for feat_name in output_var_names:
#model = LinearRegression()
model = SVR()
model.fit(train_df[input_var_names], train_df[feat_name])
predict_df[feat_name] = model.predict(predict_df[input_var_names])
predict_df[feat_name] = predict_df[feat_name].apply(clip_to_100)
# In[12]:
src.eval_metric.evaluate(predict_df, valid_df)
| 2,091 | 17.678571 | 73 | py |
CMCL-2022 | CMCL-2022-master/cmcl-shared-task-main/notebooks/RobertaRegression.py | # %% [markdown]
# # RoBERTa Regression
# %%
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tqdm
import torch
from collections import defaultdict, Counter
import random
import math
import pickle
import os
import src.eval_metric
import src.model
import src.dataloader
pd.options.display.max_columns = 100
pd.options.display.max_rows = 100
# %%
train_df = pd.read_csv("../../data/training_data/train.csv")
valid_df = pd.read_csv("../../data/training_data/dev.csv")
# %% [markdown]
# ## Fine-tune model
# %%
model_trainer = src.model.ModelTrainer(text_name='ZuCo2')
# %%
model_trainer.train(train_df, valid_df, num_epochs=150)
# %% [markdown]
# ## Make predictions
# %%
predict_df = model_trainer.predict(valid_df)
predict_df
# %%
predict_df.to_csv("predictions.csv", index=False)
# %%
src.eval_metric.evaluate(predict_df, valid_df)
# %%
| 914 | 15.339286 | 60 | py |
RESPECT | RESPECT-main/reinforce_baselines.py | import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from scipy.stats import ttest_rel
import copy
from train import rollout, get_inner_model
class Baseline(object):
def wrap_dataset(self, dataset):
return dataset
def unwrap_batch(self, batch):
return batch, None
def eval(self, x, c):
raise NotImplementedError("Override this method")
def get_learnable_parameters(self):
return []
def epoch_callback(self, model, epoch):
pass
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass
class WarmupBaseline(Baseline):
def __init__(self, baseline, n_epochs=1, warmup_exp_beta=0.8, ):
super(Baseline, self).__init__()
self.baseline = baseline
assert n_epochs > 0, "n_epochs to warmup must be positive"
self.warmup_baseline = ExponentialBaseline(warmup_exp_beta)
self.alpha = 0
self.n_epochs = n_epochs
def wrap_dataset(self, dataset):
if self.alpha > 0:
return self.baseline.wrap_dataset(dataset)
return self.warmup_baseline.wrap_dataset(dataset)
def unwrap_batch(self, batch):
if self.alpha > 0:
return self.baseline.unwrap_batch(batch)
return self.warmup_baseline.unwrap_batch(batch)
def eval(self, x, c):
if self.alpha == 1:
return self.baseline.eval(x, c)
if self.alpha == 0:
return self.warmup_baseline.eval(x, c)
v, l = self.baseline.eval(x, c)
vw, lw = self.warmup_baseline.eval(x, c)
# Return convex combination of baseline and of loss
return self.alpha * v + (1 - self.alpha) * vw, self.alpha * l + (1 - self.alpha * lw)
def epoch_callback(self, model, epoch):
# Need to call epoch callback of inner model (also after first epoch if we have not used it)
self.baseline.epoch_callback(model, epoch)
self.alpha = (epoch + 1) / float(self.n_epochs)
if epoch < self.n_epochs:
print("Set warmup alpha = {}".format(self.alpha))
def state_dict(self):
# Checkpointing within warmup stage makes no sense, only save inner baseline
return self.baseline.state_dict()
def load_state_dict(self, state_dict):
# Checkpointing within warmup stage makes no sense, only load inner baseline
self.baseline.load_state_dict(state_dict)
class NoBaseline(Baseline):
def eval(self, x, c):
return 0, 0 # No baseline, no loss
class ExponentialBaseline(Baseline):
def __init__(self, beta):
super(Baseline, self).__init__()
self.beta = beta
self.v = None
def eval(self, x, c):
if self.v is None:
v = c.mean()
else:
v = self.beta * self.v + (1. - self.beta) * c.mean()
self.v = v.detach() # Detach since we never want to backprop
return self.v, 0 # No loss
def state_dict(self):
return {
'v': self.v
}
def load_state_dict(self, state_dict):
self.v = state_dict['v']
class CriticBaseline(Baseline):
def __init__(self, critic):
super(Baseline, self).__init__()
self.critic = critic
def eval(self, x, c):
v = self.critic(x)
# Detach v since actor should not backprop through baseline, only for loss
return v.detach(), F.mse_loss(v, c.detach())
def get_learnable_parameters(self):
return list(self.critic.parameters())
def epoch_callback(self, model, epoch):
pass
def state_dict(self):
return {
'critic': self.critic.state_dict()
}
def load_state_dict(self, state_dict):
critic_state_dict = state_dict.get('critic', {})
if not isinstance(critic_state_dict, dict): # backwards compatibility
critic_state_dict = critic_state_dict.state_dict()
self.critic.load_state_dict({**self.critic.state_dict(), **critic_state_dict})
class RolloutBaseline(Baseline):
def __init__(self, model, problem, opts, epoch=-1):
super(Baseline, self).__init__()
self.problem = problem
self.opts = opts
self.dataset = torch.load(opts.eval_dataset_path, map_location=opts.device)
self._update_model(model, epoch)
def _update_model(self, model, epoch, dataset=None):
self.model = copy.deepcopy(model)
# Always generate baseline dataset when updating model to prevent overfitting to the baseline dataset
"""
if dataset is not None:
if len(dataset) != self.opts.val_size:
print("Warning: not using saved baseline dataset since val_size does not match")
dataset = None
elif (dataset[0] if self.problem.NAME == 'tsp' else dataset[0]['loc']).size(0) != self.opts.graph_size:
print("Warning: not using saved baseline dataset since graph_size does not match")
dataset = None
if dataset is None:
self.dataset = self.problem.make_dataset(
#size=self.opts.graph_size, num_samples=self.opts.val_size, distribution=self.opts.data_distribution)
size=self.opts.eval_graph_size, num_samples=self.opts.val_size, distribution=self.opts.data_distribution)
else:
self.dataset = dataset
"""
print("Evaluating baseline model on evaluation dataset")
if epoch == -1:
self.bl_vals = rollout(self.model, self.dataset, self.opts, measures=True).cpu().numpy()
else:
self.bl_vals = rollout(self.model, self.dataset, self.opts).cpu().numpy()
self.mean = self.bl_vals.mean()
self.epoch = epoch
def wrap_dataset(self, dataset):
print("Evaluating baseline on dataset...")
# Need to convert baseline to 2D to prevent converting to double, see
# https://discuss.pytorch.org/t/dataloader-gives-double-instead-of-float/717/3
return BaselineDataset(dataset, rollout(self.model, dataset, self.opts).view(-1, 1))
def unwrap_batch(self, batch):
return batch['data'], batch['baseline'].view(-1) # Flatten result to undo wrapping as 2D
def eval(self, x, c):
# Use volatile mode for efficient inference (single batch so we do not use rollout function)
with torch.no_grad():
v, _, _, _, _, _, _, _, _ = self.model(x[0], x[1], self.opts)
#v, _, _, _, _, _, _, _, _ = self.model(x)
# There is no loss
return v, 0
def epoch_callback(self, model, epoch):
"""
Challenges the current baseline with the model and replaces the baseline model if it is improved.
:param model: The model to challenge the baseline by
:param epoch: The current epoch
"""
print("Evaluating candidate model on evaluation dataset")
candidate_vals = rollout(model, self.dataset, self.opts).cpu().numpy()
candidate_mean = candidate_vals.mean()
print("Epoch {} candidate mean {}, baseline epoch {} mean {}, difference {}".format(
epoch, candidate_mean, self.epoch, self.mean, candidate_mean - self.mean))
if candidate_mean - self.mean < 0:
# Calc p value
t, p = ttest_rel(candidate_vals, self.bl_vals)
p_val = p / 2 # one-sided
assert t < 0, "T-statistic should be negative"
print("p-value: {}".format(p_val))
if p_val < self.opts.bl_alpha:
print('Update baseline')
self._update_model(model, epoch)
def state_dict(self):
return {
'model': self.model,
'dataset': self.dataset,
'epoch': self.epoch
}
def load_state_dict(self, state_dict):
# We make it such that it works whether model was saved as data parallel or not
load_model = copy.deepcopy(self.model)
get_inner_model(load_model).load_state_dict(get_inner_model(state_dict['model']).state_dict())
self._update_model(load_model, state_dict['epoch'], state_dict['dataset'])
class BaselineDataset(Dataset):
def __init__(self, dataset=None, baseline=None):
super(BaselineDataset, self).__init__()
self.dataset = dataset
self.baseline = baseline
assert (len(self.dataset) == len(self.baseline))
def __getitem__(self, item):
return {
'data': self.dataset[item],
'baseline': self.baseline[item]
}
def __len__(self):
return len(self.dataset)
| 8,641 | 32.890196 | 121 | py |
RESPECT | RESPECT-main/run.py | #!/usr/bin/env python
import os
import json
import pprint as pp
import torch
import torch.optim as optim
from tensorboard_logger import Logger as TbLogger
from nets.critic_network import CriticNetwork
from options import get_options
from train import train_epoch, validate, get_inner_model
#from train_single import train_epoch, validate, get_inner_model
from reinforce_baselines import NoBaseline, ExponentialBaseline, CriticBaseline, RolloutBaseline, WarmupBaseline
from nets.attention_model import AttentionModel
from nets.pointer_network import PointerNetwork, CriticNetworkLSTM
from utils import torch_load_cpu, load_problem
from dataset import TopoSortDataset
def run(opts):
# Pretty print the run args
pp.pprint(vars(opts))
# Set the random seed
torch.manual_seed(opts.seed)
# Optionally configure tensorboard
tb_logger = None
if not opts.no_tensorboard:
tb_logger = TbLogger(os.path.join(opts.log_dir, "{}_{}".format(opts.problem, opts.graph_size), opts.run_name))
os.makedirs(opts.save_dir)
# Save arguments so exact configuration can always be found
with open(os.path.join(opts.save_dir, "args.json"), 'w') as f:
json.dump(vars(opts), f, indent=True)
# Set the device
opts.device = torch.device("cuda:0" if opts.use_cuda else "cpu")
# Figure out what's the problem
problem = load_problem(opts.problem)
# Load data from load_path
load_data = {}
assert opts.load_path is None or opts.resume is None, "Only one of load path and resume can be given"
load_path = opts.load_path if opts.load_path is not None else opts.resume
if load_path is not None:
print(' [*] Loading data from {}'.format(load_path))
load_data = torch_load_cpu(load_path)
# Initialize model
model_class = {
'attention': AttentionModel,
'pointer': PointerNetwork
}.get(opts.model, None)
assert model_class is not None, "Unknown model: {}".format(model_class)
model = model_class(
opts.embedding_dim,
opts.hidden_dim,
problem,
n_encode_layers=opts.n_encode_layers,
mask_inner=True,
mask_logits=True,
normalization=opts.normalization,
tanh_clipping=opts.tanh_clipping,
checkpoint_encoder=opts.checkpoint_encoder,
shrink_size=opts.shrink_size,
num_coordinates=opts.num_coordinates
).to(opts.device)
if opts.use_cuda and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# Overwrite model parameters by parameters to load
model_ = get_inner_model(model)
model_.load_state_dict({**model_.state_dict(), **load_data.get('model', {})})
# Initialize baseline
if opts.baseline == 'exponential':
baseline = ExponentialBaseline(opts.exp_beta)
elif opts.baseline == 'critic' or opts.baseline == 'critic_lstm':
assert problem.NAME == 'tsp', "Critic only supported for TSP"
baseline = CriticBaseline(
(
CriticNetworkLSTM(
2,
opts.embedding_dim,
opts.hidden_dim,
opts.n_encode_layers,
opts.tanh_clipping
)
if opts.baseline == 'critic_lstm'
else
CriticNetwork(
2,
opts.embedding_dim,
opts.hidden_dim,
opts.n_encode_layers,
opts.normalization
)
).to(opts.device)
)
elif opts.baseline == 'rollout':
baseline = RolloutBaseline(model, problem, opts)
else:
assert opts.baseline is None, "Unknown baseline: {}".format(opts.baseline)
baseline = NoBaseline()
if opts.bl_warmup_epochs > 0:
baseline = WarmupBaseline(baseline, opts.bl_warmup_epochs, warmup_exp_beta=opts.exp_beta)
# Load baseline from data, make sure script is called with same type of baseline
if 'baseline' in load_data:
baseline.load_state_dict(load_data['baseline'])
# Initialize optimizer
optimizer = optim.Adam(
[{'params': model.parameters(), 'lr': opts.lr_model}]
+ (
[{'params': baseline.get_learnable_parameters(), 'lr': opts.lr_critic}]
if len(baseline.get_learnable_parameters()) > 0
else []
)
)
# Load optimizer state
if 'optimizer' in load_data:
optimizer.load_state_dict(load_data['optimizer'])
for state in optimizer.state.values():
for k, v in state.items():
# if isinstance(v, torch.Tensor):
if torch.is_tensor(v):
state[k] = v.to(opts.device)
# Initialize learning rate scheduler, decay by lr_decay once per epoch!
lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: opts.lr_decay ** epoch)
# Start the actual training loop
#val_dataset = problem.make_dataset(
#size=opts.graph_size, num_samples=opts.val_size, filename=opts.val_dataset, distribution=opts.data_distribution)
# size=opts.eval_graph_size, num_samples=opts.val_size, filename=opts.val_dataset, distribution=opts.data_distribution, seed=1920)
#val_dataset = torch.load(opts.dataset_path + "TopoSort50_Dataset_Validation_10_in_degree.pt", map_location=opts.device)
val_dataset = torch.load(opts.eval_dataset_path, map_location=opts.device)
#train_dataset = problem.make_dataset(
# size=opts.graph_size, num_samples=opts.epoch_size, distribution=opts.data_distribution)
#train_dataset = torch.load(opts.dataset_path + "TopoSort20_Dataset_Training_10_in_degree.pt", map_location=opts.device)
# 09/25/22 prepare github anonymous
if (not val_dataset):
train_dataset = torch.load(opts.train_dataset_path, map_location=opts.device)
#train_dataset = None
if opts.resume:
epoch_resume = int(os.path.splitext(os.path.split(opts.resume)[-1])[0].split("-")[1])
torch.set_rng_state(load_data['rng_state'])
if opts.use_cuda:
torch.cuda.set_rng_state_all(load_data['cuda_rng_state'])
# Set the random states
# Dumping of state was done before epoch callback, so do that now (model is loaded)
baseline.epoch_callback(model, epoch_resume)
print("Resuming after {}".format(epoch_resume))
opts.epoch_start = epoch_resume + 1
if opts.eval_only:
validate(model, val_dataset, opts, measures=False, plot=True)
#validate(model, val_dataset, opts, measures=True, plot=False)
else:
for epoch in range(opts.epoch_start, opts.epoch_start + opts.n_epochs):
train_epoch(
model,
optimizer,
baseline,
lr_scheduler,
epoch,
val_dataset,
train_dataset,
problem,
tb_logger,
opts
)
if __name__ == "__main__":
run(get_options())
| 7,073 | 36.231579 | 137 | py |
RESPECT | RESPECT-main/train_model_run.py | import os
import time
from tqdm import tqdm
import torch
import math
from torch.utils.data import DataLoader
from torch.nn import DataParallel
from nets.attention_model import set_decode_type
from utils.log_utils import log_values
from utils import move_to
import warnings
def get_inner_model(model):
return model.module if isinstance(model, DataParallel) else model
def validate(model, dataset, opts, measures=True, plot=False):
# Validate
print('Validating...')
cost = rollout(model, dataset, opts, measures, plot)
#cost = rollout(model, dataset, opts, False, False)
avg_cost = cost.mean()
print('Validation overall avg_cost: {} +- {}'.format(
avg_cost, torch.std(cost) / math.sqrt(len(cost))))
return avg_cost
def rollout(model, dataset, opts, measures=False, plot_data=False):
# Put in greedy evaluation mode!
set_decode_type(model, "greedy")
model.eval()
def eval_model_bat(bat):
with torch.no_grad():
#cost, _, _, _, _, _ = model(move_to(bat, opts.device))
#cost, _, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = model(move_to(bat[0], opts.device), labels=move_to(bat[1], opts.device), Measures=measures, Plot_Data=plot_data)
cost, _, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = model(move_to(bat[0], opts.device), bat[1], opts, Measures=measures, Plot_Data=plot_data)
#cost, _, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = model(move_to(bat, opts.device), Measures=measures, Plot_Data=plot_data)
#return cost.data.cpu(), misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
return cost.data.cpu(), misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
if not measures:
return torch.cat([
eval_model_bat(bat)[0]
for bat
#in tqdm(DataLoader(dataset, batch_size=opts.eval_batch_size), disable=opts.no_progress_bar)
in tqdm(DataLoader(dataset, batch_size=opts.eval_batch_size), disable=opts.no_progress_bar)
], 0)
else:
count = 0
cost_all = torch.tensor([]).data.cpu()
#misMatch_y_all, misMatch_x_all, recall_accuracy_all, radius_mean_all, radius_max_all = [], [], [], [], []
#misMatch_y_all, misMatch_x_all, recall_accuracy_all, radius_mean_all = 0., 0., 0., 0.
misMatch_all, recall_accuracy_all, radius_mean_all = 0., 0., 0.
radius_max = torch.FloatTensor([0.]).cuda()
recall_accuracy_max = torch.FloatTensor([0.]).cuda()
recall_accuracy_min = torch.FloatTensor([1.]).cuda()
"""
radius_max = 0
recall_accuracy_max = 0.
recall_accuracy_min = 1.
"""
#misMatch_all = []
for bat in tqdm(DataLoader(dataset, batch_size=opts.eval_batch_size), disable=opts.no_progress_bar):
#cost_batch, misMatch_y_b, misMatch_x_b, recall_accuracy_b, radius_mean_b, radius_max_b, recall_accuracy_max_b, recall_accuracy_min_b = eval_model_bat(bat)
cost_batch, misMatch_b, _, recall_accuracy_b, radius_mean_b, radius_max_b, recall_accuracy_max_b, recall_accuracy_min_b = eval_model_bat(bat)
#cost_batch, misMatch_y_b, misMatch_x_b, _, _, _ = eval_model_bat(bat)
#if first_iteration:
if count == 0:
cost_all = cost_batch
else:
cost_all = torch.cat((cost_all, cost_batch), 0)
#misMatch_all += misMatch_b
#misMatch_y_all += misMatch_y_b
#misMatch_x_all += misMatch_x_b
misMatch_all += misMatch_b
recall_accuracy_all += recall_accuracy_b
#radius_mean_all += radius_mean_b
recall_accuracy_max = torch.max(recall_accuracy_max, recall_accuracy_max_b)
recall_accuracy_min = torch.min(recall_accuracy_min, recall_accuracy_min_b)
#radius_max = torch.max(radius_max, radius_max_b)
#recall_accuracy_all.append(recall_accuracy_b)
#radius_mean_all.append(radius_mean_b)
#radius_max_all.append(radius_max_b)
#recall_accuracy_all += recall_accuracy_b
#recall_accuracy_max = max(recall_accuracy_max_b, recall_accuracy_max)
#recall_accuracy_min = min(recall_accuracy_min_b, recall_accuracy_min)
#radius_mean_all += radius_mean_b
#radius_max = max(radius_max, radius_max_b)
count += 1
#print("Validation count of misMatch on y axis: {:.2f}".format((misMatch_y_all/count).item()))
#print("Validation count of misMatch on x axis: {:.2f}".format((misMatch_x_all/count).item()))
print("Validation count of misMatch: {:.2f}".format((misMatch_all/count).item()))
print("Validation mean of recall_accuracy: {:.2f}".format((recall_accuracy_all/count).item()))
print("Validation max of recall_accuracy: {:.2f}".format((recall_accuracy_max).item()))
print("Validation min of recall_accuracy: {:.2f}".format((recall_accuracy_min).item()))
#print("Validation mean of radius_misposition: {:.2f}".format((radius_mean_all/count).item()))
#print("Validation max of radius_misposition: {:d}".format(round(radius_max.item())))
#print("Validation count of misMatch on y axis: ", misMatch_y_all/count)
#print("Validation count of misMatch on x axis: ", misMatch_x_all/count)
#print("Validation mean of recall_accuracy: {:.2f}".format(recall_accuracy_all/count))
#print("Validation max of recall_accuracy: {:.2f}".format(recall_accuracy_max))
#print("Validation min of recall_accuracy: {:.2f}".format(recall_accuracy_min))
#print("Validation mean of radius_misposition: {:.2f}".format(radius_mean_all/count))
#print("Validation max of radius_misposition: {:d}".format(round(radius_max)))
#print("Validation min of recall_accuracy: {:.2f}".format(recall_accuracy_min*100))
#print("Validation mean of recall_accuracy: {:.2f}".format(sum(recall_accuracy_all)*100/len(recall_accuracy_all)))
#print("Validation mean of radius_misposition: {:.2f}".format(sum(radius_mean_all)/len(radius_mean_all)))
#radius_max_all.sort()
#print("Validation max of radius_misposition: {:d}".format(radius_max_all[-1]))
return cost_all
def clip_grad_norms(param_groups, max_norm=math.inf):
"""
Clips the norms for all param groups to max_norm and returns gradient norms before clipping
:param optimizer:
:param max_norm:
:param gradient_norms_log:
:return: grad_norms, clipped_grad_norms: list with (clipped) gradient norms per group
"""
grad_norms = [
torch.nn.utils.clip_grad_norm_(
group['params'],
max_norm if max_norm > 0 else math.inf, # Inf so no clipping but still call to calc
norm_type=2
)
for group in param_groups
]
grad_norms_clipped = [min(g_norm, max_norm) for g_norm in grad_norms] if max_norm > 0 else grad_norms
return grad_norms, grad_norms_clipped
def train_epoch(model, optimizer, baseline, lr_scheduler, epoch, val_dataset, train_dataset, problem, tb_logger, opts):
print("Start train epoch {}, lr={} for run {}".format(epoch, optimizer.param_groups[0]['lr'], opts.run_name))
step = epoch * (opts.epoch_size // opts.batch_size)
start_time = time.time()
if not opts.no_tensorboard:
tb_logger.log_value('learnrate_pg0', optimizer.param_groups[0]['lr'], step)
# Generate new training data for each epoch
#training_dataset = baseline.wrap_dataset(problem.make_dataset(
# size=opts.graph_size, num_samples=opts.epoch_size, distribution=opts.data_distribution))
training_dataset = baseline.wrap_dataset(train_dataset)
#training_dataloader = DataLoader(training_dataset, batch_size=opts.batch_size, num_workers=1)
#training_dataloader = DataLoader(training_dataset, batch_size=opts.batch_size)
training_dataloader = DataLoader(training_dataset, batch_size=opts.batch_size)
# Put model in train mode!
model.train()
set_decode_type(model, "sampling")
for batch_id, batch in enumerate(tqdm(training_dataloader, disable=opts.no_progress_bar)):
train_batch(
model,
optimizer,
baseline,
epoch,
batch_id,
step,
batch,
tb_logger,
opts
)
step += 1
epoch_duration = time.time() - start_time
print("Finished epoch {}, took {} s".format(epoch, time.strftime('%H:%M:%S', time.gmtime(epoch_duration))))
if (opts.checkpoint_epochs != 0 and epoch % opts.checkpoint_epochs == 0) or epoch == opts.n_epochs - 1:
print('Saving model and state...')
torch.save(
{
'model': get_inner_model(model).state_dict(),
'optimizer': optimizer.state_dict(),
'rng_state': torch.get_rng_state(),
'cuda_rng_state': torch.cuda.get_rng_state_all(),
'baseline': baseline.state_dict()
},
os.path.join(opts.save_dir, 'epoch-{}.pt'.format(epoch))
)
if epoch == opts.n_epochs-1:
#avg_reward = validate(model, val_dataset, opts, plot=True)
avg_reward = validate(model, val_dataset, opts)
else:
avg_reward = validate(model, val_dataset, opts)
if not opts.no_tensorboard:
tb_logger.log_value('val_avg_reward', avg_reward, step)
baseline.epoch_callback(model, epoch)
# lr_scheduler should be called at end of epoch
lr_scheduler.step()
def train_batch(
model,
optimizer,
baseline,
epoch,
batch_id,
step,
batch,
tb_logger,
opts
):
x, bl_val = baseline.unwrap_batch(batch)
#x = move_to(x, opts.device)
x[0] = move_to(x[0], opts.device)
x[1] = move_to(x[1], opts.device)
bl_val = move_to(bl_val, opts.device) if bl_val is not None else None
# Evaluate model, get costs and log probabilities
cost, log_likelihood, _, _, _, _, _, _, _ = model(x[0], x[1], opts)
#cost, log_likelihood, _, _, _, _, _, _, _ = model(x)
# Evaluate baseline, get baseline loss if any (only for critic)
bl_val, bl_loss = baseline.eval(x, cost) if bl_val is None else (bl_val, 0)
# Calculate loss
reinforce_loss = ((cost - bl_val) * log_likelihood).mean()
loss = reinforce_loss + bl_loss
# Perform backward pass and optimization step
optimizer.zero_grad()
loss.backward()
# Clip gradient norms and get (clipped) gradient norms for logging
grad_norms = clip_grad_norms(optimizer.param_groups, opts.max_grad_norm)
optimizer.step()
# Logging
if step % int(opts.log_step) == 0:
log_values(cost, grad_norms, epoch, batch_id, step,
log_likelihood, reinforce_loss, bl_loss, tb_logger, opts)
| 11,203 | 43.995984 | 243 | py |
RESPECT | RESPECT-main/options.py | import os
import time
import argparse
import torch
def get_options(args=None):
parser = argparse.ArgumentParser(
description="Attention based model for solving the Travelling Salesman Problem with Reinforcement Learning")
# Data
parser.add_argument('--problem', default='toposort', help="The problem to solve, default 'tsp'")
parser.add_argument('--graph_size', type=int, default=30, help="The size of the problem graph")
parser.add_argument('--eval_graph_size', type=int, default=50, help="The size of the problem graph during evaluation")
parser.add_argument('--num_coordinates', type=int, default=15, help="Input dimension the problem graph")
parser.add_argument('--batch_size', type=int, default=128, help='Number of instances per batch during training')
parser.add_argument('--epoch_size', type=int, default=128000, help='Number of instances per epoch during training')
parser.add_argument('--val_size', type=int, default=1024,
help='Number of instances used for reporting validation performance')
parser.add_argument('--val_dataset', type=str, default=None, help='Dataset file to use for validation')
# Model
parser.add_argument('--model', default='pointer', help="Model, 'attention' (default) or 'pointer'")
parser.add_argument('--embedding_dim', type=int, default=256, help='Dimension of input embedding')
parser.add_argument('--hidden_dim', type=int, default=256, help='Dimension of hidden layers in Enc/Dec')
parser.add_argument('--n_encode_layers', type=int, default=3,
help='Number of layers in the encoder/critic network')
parser.add_argument('--tanh_clipping', type=float, default=10.,
help='Clip the parameters to within +- this value using tanh. '
'Set to 0 to not perform any clipping.')
parser.add_argument('--normalization', default='batch', help="Normalization type, 'batch' (default) or 'instance'")
parser.add_argument('--n_head_encoder', type=int, default=1, help='Num of encoder header for Multihead Attention')
parser.add_argument('--n_head_decoder', type=int, default=2, help='Num of decoder header for Multihead Attention')
parser.add_argument('--n_layer_encoder', type=int, default=1, help='Num of encoder layer for Multihead Attention')
parser.add_argument('--n_layer_decoder', type=int, default=2, help='Num of decoder layer for Multihead Attention')
# Training
parser.add_argument('--lr_model', type=float, default=1e-4, help="Set the learning rate for the actor network")
parser.add_argument('--lr_critic', type=float, default=1e-4, help="Set the learning rate for the critic network")
parser.add_argument('--lr_decay', type=float, default=1., help='Learning rate decay per epoch')
parser.add_argument('--eval_only', action='store_true', help='Set this value to only evaluate model')
parser.add_argument('--n_epochs', type=int, default=300, help='The number of epochs to train')
parser.add_argument('--seed', type=int, default=1234, help='Random seed to use')
parser.add_argument('--max_grad_norm', type=float, default=1.0,
help='Maximum L2 norm for gradient clipping, default 1.0 (0 to disable clipping)')
parser.add_argument('--no_cuda', action='store_true', help='Disable CUDA')
parser.add_argument('--exp_beta', type=float, default=0.8,
help='Exponential moving average baseline decay (default 0.8)')
parser.add_argument('--baseline', default='rollout',
help="Baseline to use: 'rollout', 'critic' or 'exponential'. Defaults to no baseline.")
parser.add_argument('--bl_alpha', type=float, default=0.05,
help='Significance in the t-test for updating rollout baseline')
parser.add_argument('--bl_warmup_epochs', type=int, default=None,
help='Number of epochs to warmup the baseline, default None means 1 for rollout (exponential '
'used for warmup phase), 0 otherwise. Can only be used with rollout baseline.')
parser.add_argument('--eval_batch_size', type=int, default=512,
help="Batch size to use during (baseline) evaluation")
parser.add_argument('--checkpoint_encoder', action='store_true',
help='Set to decrease memory usage by checkpointing encoder')
parser.add_argument('--shrink_size', type=int, default=None,
help='Shrink the batch size if at least this many instances in the batch are finished'
' to save memory (default None means no shrinking)')
parser.add_argument('--data_distribution', type=str, default=None,
help='Data distribution to use during training, defaults and options depend on problem.')
parser.add_argument('--graph_file', type=str, default=None, help='Graph file recording arrangemnet of the nodes result and label')
# Misc
parser.add_argument('--log_step', type=int, default=50, help='Log info every log_step steps')
parser.add_argument('--log_dir', default='logs', help='Directory to write TensorBoard information to')
parser.add_argument('--run_name', default='run', help='Name to identify the run')
parser.add_argument('--output_dir', default='outputs', help='Directory to write output models to')
parser.add_argument('--epoch_start', type=int, default=0,
help='Start at epoch # (relevant for learning rate decay)')
parser.add_argument('--checkpoint_epochs', type=int, default=1,
help='Save checkpoint every n epochs (default 1), 0 to save no checkpoints')
parser.add_argument('--load_path', help='Path to load model parameters and optimizer state from')
parser.add_argument('--resume', help='Resume from previous checkpoint file')
parser.add_argument('--train_dataset_path', help='Path to load stored training dataset')
parser.add_argument('--eval_dataset_path', help='Path to load stored validation dataset')
parser.add_argument('--no_tensorboard', action='store_true', help='Disable logging TensorBoard files')
parser.add_argument('--no_progress_bar', action='store_true', help='Disable progress bar')
opts = parser.parse_args(args)
opts.use_cuda = torch.cuda.is_available() and not opts.no_cuda
opts.run_name = "{}_{}".format(opts.run_name, time.strftime("%Y%m%dT%H%M%S"))
opts.save_dir = os.path.join(
opts.output_dir,
"{}_{}".format(opts.problem, opts.graph_size),
opts.run_name
)
if opts.bl_warmup_epochs is None:
opts.bl_warmup_epochs = 1 if opts.baseline == 'rollout' else 0
assert (opts.bl_warmup_epochs == 0) or (opts.baseline == 'rollout')
assert opts.epoch_size % opts.batch_size == 0, "Epoch size must be integer multiple of batch size!"
return opts
| 6,946 | 69.887755 | 134 | py |
RESPECT | RESPECT-main/reinforce_baselines_single.py | import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from scipy.stats import ttest_rel
import copy
from train import rollout, get_inner_model
class Baseline(object):
def wrap_dataset(self, dataset):
return dataset
def unwrap_batch(self, batch):
return batch, None
def eval(self, x, c):
raise NotImplementedError("Override this method")
def get_learnable_parameters(self):
return []
def epoch_callback(self, model, epoch):
pass
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass
class WarmupBaseline(Baseline):
def __init__(self, baseline, n_epochs=1, warmup_exp_beta=0.8, ):
super(Baseline, self).__init__()
self.baseline = baseline
assert n_epochs > 0, "n_epochs to warmup must be positive"
self.warmup_baseline = ExponentialBaseline(warmup_exp_beta)
self.alpha = 0
self.n_epochs = n_epochs
def wrap_dataset(self, dataset):
if self.alpha > 0:
return self.baseline.wrap_dataset(dataset)
return self.warmup_baseline.wrap_dataset(dataset)
def unwrap_batch(self, batch):
if self.alpha > 0:
return self.baseline.unwrap_batch(batch)
return self.warmup_baseline.unwrap_batch(batch)
def eval(self, x, c):
if self.alpha == 1:
return self.baseline.eval(x, c)
if self.alpha == 0:
return self.warmup_baseline.eval(x, c)
v, l = self.baseline.eval(x, c)
vw, lw = self.warmup_baseline.eval(x, c)
# Return convex combination of baseline and of loss
return self.alpha * v + (1 - self.alpha) * vw, self.alpha * l + (1 - self.alpha * lw)
def epoch_callback(self, model, epoch):
# Need to call epoch callback of inner model (also after first epoch if we have not used it)
self.baseline.epoch_callback(model, epoch)
self.alpha = (epoch + 1) / float(self.n_epochs)
if epoch < self.n_epochs:
print("Set warmup alpha = {}".format(self.alpha))
def state_dict(self):
# Checkpointing within warmup stage makes no sense, only save inner baseline
return self.baseline.state_dict()
def load_state_dict(self, state_dict):
# Checkpointing within warmup stage makes no sense, only load inner baseline
self.baseline.load_state_dict(state_dict)
class NoBaseline(Baseline):
def eval(self, x, c):
return 0, 0 # No baseline, no loss
class ExponentialBaseline(Baseline):
def __init__(self, beta):
super(Baseline, self).__init__()
self.beta = beta
self.v = None
def eval(self, x, c):
if self.v is None:
v = c.mean()
else:
v = self.beta * self.v + (1. - self.beta) * c.mean()
self.v = v.detach() # Detach since we never want to backprop
return self.v, 0 # No loss
def state_dict(self):
return {
'v': self.v
}
def load_state_dict(self, state_dict):
self.v = state_dict['v']
class CriticBaseline(Baseline):
def __init__(self, critic):
super(Baseline, self).__init__()
self.critic = critic
def eval(self, x, c):
v = self.critic(x)
# Detach v since actor should not backprop through baseline, only for loss
return v.detach(), F.mse_loss(v, c.detach())
def get_learnable_parameters(self):
return list(self.critic.parameters())
def epoch_callback(self, model, epoch):
pass
def state_dict(self):
return {
'critic': self.critic.state_dict()
}
def load_state_dict(self, state_dict):
critic_state_dict = state_dict.get('critic', {})
if not isinstance(critic_state_dict, dict): # backwards compatibility
critic_state_dict = critic_state_dict.state_dict()
self.critic.load_state_dict({**self.critic.state_dict(), **critic_state_dict})
class RolloutBaseline(Baseline):
def __init__(self, model, problem, opts, epoch=-1):
super(Baseline, self).__init__()
self.problem = problem
self.opts = opts
self.dataset = torch.load(opts.eval_dataset_path, map_location=opts.device)
self._update_model(model, epoch)
def _update_model(self, model, epoch, dataset=None):
self.model = copy.deepcopy(model)
# Always generate baseline dataset when updating model to prevent overfitting to the baseline dataset
"""
if dataset is not None:
if len(dataset) != self.opts.val_size:
print("Warning: not using saved baseline dataset since val_size does not match")
dataset = None
elif (dataset[0] if self.problem.NAME == 'tsp' else dataset[0]['loc']).size(0) != self.opts.graph_size:
print("Warning: not using saved baseline dataset since graph_size does not match")
dataset = None
if dataset is None:
self.dataset = self.problem.make_dataset(
#size=self.opts.graph_size, num_samples=self.opts.val_size, distribution=self.opts.data_distribution)
size=self.opts.eval_graph_size, num_samples=self.opts.val_size, distribution=self.opts.data_distribution)
else:
self.dataset = dataset
"""
print("Evaluating baseline model on evaluation dataset")
if epoch == -1:
self.bl_vals = rollout(self.model, self.dataset, self.opts, measures=True).cpu().numpy()
else:
self.bl_vals = rollout(self.model, self.dataset, self.opts).cpu().numpy()
self.mean = self.bl_vals.mean()
self.epoch = epoch
def wrap_dataset(self, dataset):
print("Evaluating baseline on dataset...")
# Need to convert baseline to 2D to prevent converting to double, see
# https://discuss.pytorch.org/t/dataloader-gives-double-instead-of-float/717/3
return BaselineDataset(dataset, rollout(self.model, dataset, self.opts).view(-1, 1))
def unwrap_batch(self, batch):
return batch['data'], batch['baseline'].view(-1) # Flatten result to undo wrapping as 2D
def eval(self, x, c):
# Use volatile mode for efficient inference (single batch so we do not use rollout function)
with torch.no_grad():
v, _, _, _, _, _, _, _, _ = self.model(x[0], x[1], self.opts)
#v, _, _, _, _, _, _, _, _ = self.model(x)
# There is no loss
return v, 0
def epoch_callback(self, model, epoch):
"""
Challenges the current baseline with the model and replaces the baseline model if it is improved.
:param model: The model to challenge the baseline by
:param epoch: The current epoch
"""
print("Evaluating candidate model on evaluation dataset")
candidate_vals = rollout(model, self.dataset, self.opts).cpu().numpy()
candidate_mean = candidate_vals.mean()
print("Epoch {} candidate mean {}, baseline epoch {} mean {}, difference {}".format(
epoch, candidate_mean, self.epoch, self.mean, candidate_mean - self.mean))
if candidate_mean - self.mean < 0:
# Calc p value
t, p = ttest_rel(candidate_vals, self.bl_vals)
p_val = p / 2 # one-sided
assert t < 0, "T-statistic should be negative"
print("p-value: {}".format(p_val))
if p_val < self.opts.bl_alpha:
print('Update baseline')
self._update_model(model, epoch)
def state_dict(self):
return {
'model': self.model,
'dataset': self.dataset,
'epoch': self.epoch
}
def load_state_dict(self, state_dict):
# We make it such that it works whether model was saved as data parallel or not
load_model = copy.deepcopy(self.model)
get_inner_model(load_model).load_state_dict(get_inner_model(state_dict['model']).state_dict())
self._update_model(load_model, state_dict['epoch'], state_dict['dataset'])
class BaselineDataset(Dataset):
def __init__(self, dataset=None, baseline=None):
super(BaselineDataset, self).__init__()
self.dataset = dataset
self.baseline = baseline
assert (len(self.dataset) == len(self.baseline))
def __getitem__(self, item):
return {
'data': self.dataset[item],
'baseline': self.baseline[item]
}
def __len__(self):
return len(self.dataset)
| 8,641 | 32.890196 | 121 | py |
RESPECT | RESPECT-main/train.py | import os
import time
from tqdm import tqdm
import torch
import math
from torch.utils.data import DataLoader
from torch.nn import DataParallel
from nets.attention_model import set_decode_type
from utils.log_utils import log_values
from utils import move_to
import warnings
def get_inner_model(model):
return model.module if isinstance(model, DataParallel) else model
def validate(model, dataset, opts, measures=True, plot=False):
# Validate
print('Validating...')
cost = rollout(model, dataset, opts, measures, plot)
#cost = rollout(model, dataset, opts, False, False)
avg_cost = cost.mean()
print('Validation overall avg_cost: {} +- {}'.format(
avg_cost, torch.std(cost) / math.sqrt(len(cost))))
return avg_cost
import time
def rollout(model, dataset, opts, measures=False, plot_data=False):
# Put in greedy evaluation mode!
set_decode_type(model, "greedy")
model.eval()
def eval_model_bat(bat):
with torch.no_grad():
duplicate = 64
print(bat[0].shape, bat[0].repeat(duplicate,1,1).shape)
print(len(bat[1]))
label_len = len(bat[1])
start = time.time()
#print(bat.shape)
# 0%| | 0/1 [00:00<?, ?it/s]torch.Size([1, 429, 15])
#cost, _, _, _, _, _ = model(move_to(bat, opts.device))
#cost, _, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = model(move_to(bat[0], opts.device), labels=move_to(bat[1], opts.device), Measures=measures, Plot_Data=plot_data)
# 12/19/2021 5:27 pm comment batch test
#cost, _, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = model(move_to(bat[0], opts.device), move_to(bat[1], opts.device), opts, Measures=measures, Plot_Data=plot_data)
# 12/19/2021 5:27 pm - batch expansion
cost, _, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = model(move_to(bat[0].repeat(duplicate,1,1), opts.device), move_to(torch.randint(10, (duplicate, label_len)).float(), opts.device), opts, Measures=measures, Plot_Data=plot_data)
#cost, _, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = model(move_to(bat, opts.device), Measures=measures, Plot_Data=plot_data)
#return cost.data.cpu(), misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
end = time.time()
print("Batch inference runtime test over %d (%.4f per scheduling)" % (duplicate, (end - start)/duplicate ))
print(end - start)
return cost.data.cpu(), misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
if not measures:
return torch.cat([
eval_model_bat(bat)[0]
for bat
#in tqdm(DataLoader(dataset, batch_size=opts.eval_batch_size), disable=opts.no_progress_bar)
in tqdm(DataLoader(dataset, batch_size=opts.eval_batch_size, shuffle=True), disable=opts.no_progress_bar)
], 0)
else:
count = 0
cost_all = torch.tensor([]).data.cpu()
#misMatch_y_all, misMatch_x_all, recall_accuracy_all, radius_mean_all, radius_max_all = [], [], [], [], []
#misMatch_y_all, misMatch_x_all, recall_accuracy_all, radius_mean_all = 0., 0., 0., 0.
misMatch_all, recall_accuracy_all, radius_mean_all = 0., 0., 0.
radius_max = torch.FloatTensor([0.]).cuda()
recall_accuracy_max = torch.FloatTensor([0.]).cuda()
recall_accuracy_min = torch.FloatTensor([1.]).cuda()
"""
radius_max = 0
recall_accuracy_max = 0.
recall_accuracy_min = 1.
"""
#misMatch_all = []
for bat in tqdm(DataLoader(dataset, batch_size=opts.eval_batch_size), disable=opts.no_progress_bar):
#cost_batch, misMatch_y_b, misMatch_x_b, recall_accuracy_b, radius_mean_b, radius_max_b, recall_accuracy_max_b, recall_accuracy_min_b = eval_model_bat(bat)
cost_batch, misMatch_b, _, recall_accuracy_b, radius_mean_b, radius_max_b, recall_accuracy_max_b, recall_accuracy_min_b = eval_model_bat(bat)
#cost_batch, misMatch_y_b, misMatch_x_b, _, _, _ = eval_model_bat(bat)
#if first_iteration:
if count == 0:
cost_all = cost_batch
else:
cost_all = torch.cat((cost_all, cost_batch), 0)
#misMatch_all += misMatch_b
#misMatch_y_all += misMatch_y_b
#misMatch_x_all += misMatch_x_b
misMatch_all += misMatch_b
recall_accuracy_all += recall_accuracy_b
#radius_mean_all += radius_mean_b
recall_accuracy_max = torch.max(recall_accuracy_max, recall_accuracy_max_b)
recall_accuracy_min = torch.min(recall_accuracy_min, recall_accuracy_min_b)
#radius_max = torch.max(radius_max, radius_max_b)
#recall_accuracy_all.append(recall_accuracy_b)
#radius_mean_all.append(radius_mean_b)
#radius_max_all.append(radius_max_b)
#recall_accuracy_all += recall_accuracy_b
#recall_accuracy_max = max(recall_accuracy_max_b, recall_accuracy_max)
#recall_accuracy_min = min(recall_accuracy_min_b, recall_accuracy_min)
#radius_mean_all += radius_mean_b
#radius_max = max(radius_max, radius_max_b)
count += 1
#print("Validation count of misMatch on y axis: {:.2f}".format((misMatch_y_all/count).item()))
#print("Validation count of misMatch on x axis: {:.2f}".format((misMatch_x_all/count).item()))
print("Validation count of misMatch: {:.2f}".format((misMatch_all/count).item()))
print("Validation mean of recall_accuracy: {:.2f}".format((recall_accuracy_all/count).item()))
print("Validation max of recall_accuracy: {:.2f}".format((recall_accuracy_max).item()))
print("Validation min of recall_accuracy: {:.2f}".format((recall_accuracy_min).item()))
#print("Validation mean of radius_misposition: {:.2f}".format((radius_mean_all/count).item()))
#print("Validation max of radius_misposition: {:d}".format(round(radius_max.item())))
#print("Validation count of misMatch on y axis: ", misMatch_y_all/count)
#print("Validation count of misMatch on x axis: ", misMatch_x_all/count)
#print("Validation mean of recall_accuracy: {:.2f}".format(recall_accuracy_all/count))
#print("Validation max of recall_accuracy: {:.2f}".format(recall_accuracy_max))
#print("Validation min of recall_accuracy: {:.2f}".format(recall_accuracy_min))
#print("Validation mean of radius_misposition: {:.2f}".format(radius_mean_all/count))
#print("Validation max of radius_misposition: {:d}".format(round(radius_max)))
#print("Validation min of recall_accuracy: {:.2f}".format(recall_accuracy_min*100))
#print("Validation mean of recall_accuracy: {:.2f}".format(sum(recall_accuracy_all)*100/len(recall_accuracy_all)))
#print("Validation mean of radius_misposition: {:.2f}".format(sum(radius_mean_all)/len(radius_mean_all)))
#radius_max_all.sort()
#print("Validation max of radius_misposition: {:d}".format(radius_max_all[-1]))
return cost_all
def clip_grad_norms(param_groups, max_norm=math.inf):
"""
Clips the norms for all param groups to max_norm and returns gradient norms before clipping
:param optimizer:
:param max_norm:
:param gradient_norms_log:
:return: grad_norms, clipped_grad_norms: list with (clipped) gradient norms per group
"""
grad_norms = [
torch.nn.utils.clip_grad_norm_(
group['params'],
max_norm if max_norm > 0 else math.inf, # Inf so no clipping but still call to calc
norm_type=2
)
for group in param_groups
]
grad_norms_clipped = [min(g_norm, max_norm) for g_norm in grad_norms] if max_norm > 0 else grad_norms
return grad_norms, grad_norms_clipped
def train_epoch(model, optimizer, baseline, lr_scheduler, epoch, val_dataset, train_dataset, problem, tb_logger, opts):
print("Start train epoch {}, lr={} for run {}".format(epoch, optimizer.param_groups[0]['lr'], opts.run_name))
step = epoch * (opts.epoch_size // opts.batch_size)
start_time = time.time()
if not opts.no_tensorboard:
tb_logger.log_value('learnrate_pg0', optimizer.param_groups[0]['lr'], step)
# Generate new training data for each epoch
#training_dataset = baseline.wrap_dataset(problem.make_dataset(
# size=opts.graph_size, num_samples=opts.epoch_size, distribution=opts.data_distribution))
training_dataset = baseline.wrap_dataset(train_dataset)
#training_dataloader = DataLoader(training_dataset, batch_size=opts.batch_size, num_workers=1)
#training_dataloader = DataLoader(training_dataset, batch_size=opts.batch_size)
training_dataloader = DataLoader(training_dataset, batch_size=opts.batch_size, shuffle=True)
# Put model in train mode!
model.train()
set_decode_type(model, "sampling")
for batch_id, batch in enumerate(tqdm(training_dataloader, disable=opts.no_progress_bar)):
train_batch(
model,
optimizer,
baseline,
epoch,
batch_id,
step,
batch,
tb_logger,
opts
)
step += 1
epoch_duration = time.time() - start_time
print("Finished epoch {}, took {} s".format(epoch, time.strftime('%H:%M:%S', time.gmtime(epoch_duration))))
if (opts.checkpoint_epochs != 0 and epoch % opts.checkpoint_epochs == 0) or epoch == opts.n_epochs - 1:
print('Saving model and state...')
torch.save(
{
'model': get_inner_model(model).state_dict(),
'optimizer': optimizer.state_dict(),
'rng_state': torch.get_rng_state(),
'cuda_rng_state': torch.cuda.get_rng_state_all(),
'baseline': baseline.state_dict()
},
os.path.join(opts.save_dir, 'epoch-{}.pt'.format(epoch))
)
if epoch == opts.n_epochs-1:
#avg_reward = validate(model, val_dataset, opts, plot=True)
avg_reward = validate(model, val_dataset, opts)
else:
avg_reward = validate(model, val_dataset, opts)
if not opts.no_tensorboard:
tb_logger.log_value('val_avg_reward', avg_reward, step)
baseline.epoch_callback(model, epoch)
# lr_scheduler should be called at end of epoch
lr_scheduler.step()
def train_batch(
model,
optimizer,
baseline,
epoch,
batch_id,
step,
batch,
tb_logger,
opts
):
x, bl_val = baseline.unwrap_batch(batch)
#x = move_to(x, opts.device)
x[0] = move_to(x[0], opts.device)
x[1] = move_to(x[1], opts.device)
bl_val = move_to(bl_val, opts.device) if bl_val is not None else None
# Evaluate model, get costs and log probabilities
cost, log_likelihood, _, _, _, _, _, _, _ = model(x[0], x[1], opts)
#cost, log_likelihood, _, _, _, _, _, _, _ = model(x)
# Evaluate baseline, get baseline loss if any (only for critic)
bl_val, bl_loss = baseline.eval(x, cost) if bl_val is None else (bl_val, 0)
# Calculate loss
reinforce_loss = ((cost - bl_val) * log_likelihood).mean()
loss = reinforce_loss + bl_loss
# Perform backward pass and optimization step
optimizer.zero_grad()
loss.backward()
# Clip gradient norms and get (clipped) gradient norms for logging
grad_norms = clip_grad_norms(optimizer.param_groups, opts.max_grad_norm)
optimizer.step()
# Logging
if step % int(opts.log_step) == 0:
log_values(cost, grad_norms, epoch, batch_id, step,
log_likelihood, reinforce_loss, bl_loss, tb_logger, opts)
| 12,296 | 45.579545 | 295 | py |
RESPECT | RESPECT-main/dataset/dataset_generator.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
#from problems.toposort.state_toposort import StateTopoSort
#from utils.beam_search import beam_search
#from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check, graph_sorting_DAG
from collections import defaultdict
from itertools import combinations
import networkx as nx
import networkx.algorithms.isomorphism as iso
import numpy as np
from torch.utils.data import DataLoader, SubsetRandomSampler
from tqdm import tqdm
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, in_degree_fixed=10, resource_constraint_level={-1:4}, level_range=[4, 10], weight_multiply=10, weight_constraint=15, shift=0., distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data = []
#self.label = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
self.label = []
graphs_collection = []
if seed > 0:
random.seed(seed)
for _ in range(num_samples):
graph, D = self._dag_generator(size, in_degree_fixed, len(resource_constraint_level.keys()), level_range, weight_multiply, shift)
schedule = self._scheduling(D, D.number_of_nodes(), resource_constraint_level, weight_constraint)
level_index = [0 for _ in range(size)]
for i in range(len(schedule)):
for node in schedule[i]:
level_index[node] = i+1
order = [i for i in range(size)]
random.shuffle(order)
label_new = []
graph_new = []
for i in order:
embedding = graph[i]
graph_new.append(embedding)
label_new.append(level_index[embedding[-2]])
self.data.append(torch.FloatTensor(graph_new))
self.label.append(torch.FloatTensor(label_new))
self.size = len(self.data)
def _embedding_generator(self, size, in_degree_fixed, level_range, weight_multiply, shift):
num_level = random.randint(level_range[0], level_range[1])
level = [1 for _ in range(num_level)]
remaining = size - num_level
traverse = 0
while remaining > 0:
addition = random.randint(0, remaining)
level[traverse % num_level] += addition
traverse += 1
remaining -= addition
caution = [i+1 for i, val in enumerate(level) if val < in_degree_fixed]
labels = [i for i in range(size)]
random.shuffle(labels)
level_to_nodes = defaultdict(int)
labels_distribution = []
distributor = 0
for i in range(num_level):
level_to_nodes[i+1] = labels[distributor:(distributor+level[i])]
labels_distribution.append(labels[distributor:(distributor+level[i])])
distributor += level[i]
graph = []
graph_edges = []
for i in range(num_level):
for j in range(level[i]):
embedding = [i+1]
embedding.extend([random.randint(0, i) for _ in range(in_degree_fixed)])
if max(embedding[1:]) < i:
embedding[random.randint(1, in_degree_fixed)] = i
for constraint in caution:
while embedding[1:].count(constraint) > level[constraint-1]:
embedding[embedding.index(constraint)] = 0
nodes_to_be_assigned = embedding[1:]
while len(nodes_to_be_assigned) > 0:
node = nodes_to_be_assigned[0]
occurrences = nodes_to_be_assigned.count(node)
if node > 0:
embedding.extend(random.sample(level_to_nodes[node], occurrences))
else:
embedding.extend([-1 for _ in range(occurrences)])
nodes_to_be_assigned = [element for element in nodes_to_be_assigned if element != node]
label_to_current_node = random.sample(labels_distribution[i], 1)
labels_distribution[i].remove(label_to_current_node[0])
embedding.append(label_to_current_node[0])
embedding.append(random.random() * weight_multiply + shift)
graph.append(embedding)
if i > 0:
for predecessor in embedding[-2-in_degree_fixed:-2]:
if predecessor > -1:
graph_edges.append((predecessor, label_to_current_node[0]))
G = nx.DiGraph()
G.add_edges_from(graph_edges)
return graph, G
def _dag_generator(self, size, in_degree_fixed, num_operators, level_range, weight_multiply, shift):
while True:
graph, D = self._embedding_generator(size, in_degree_fixed, level_range, weight_multiply, shift)
if nx.is_connected(D.to_undirected()) and nx.is_directed_acyclic_graph(D) and D.number_of_nodes() == size:
break
attributes = {graph[i][-2]:{'operator':random.randint(-num_operators, -1), 'priority':0, 'label':graph[i][-2], 'weight':graph[i][-1]} for i in range(size)}
nx.set_node_attributes(D, attributes)
DAG = self._priority_sorting(D)
return graph, DAG
def _scheduling(self, D, size, resource_constraint_level, weight_constraint):
DAG = D.reverse(copy=True)
op = DAG.nodes[0]['operator']
resource_constraint = resource_constraint_level[op]
def path_exploring(resource_constraint, weight_constraint):
if DAG.number_of_nodes() <= 0:
return []
candidates = sorted([n for n, d in DAG.in_degree() if d==0], key=lambda x: (DAG.nodes[x]['priority'], -x), reverse=True)
schedule = candidates[:resource_constraint]
while True:
weight_in_all = sum([DAG.nodes[node]['weight'] for node in schedule])
if weight_in_all <= weight_constraint:
break
schedule.pop()
DAG.remove_nodes_from(schedule)
return [schedule] + path_exploring(resource_constraint, weight_constraint)
return path_exploring(resource_constraint, weight_constraint)
"""
def _scheduling(self, D, size, resource_constraint_level, weight_constraint):
DAG = D.reverse(copy=True)
candidates = [head for head in range(size) if DAG.in_degree(head) == 0]
candidates.sort(key = lambda x: (DAG.nodes[x]['priority'], -x), reverse=True)
op = DAG.nodes[candidates[0]]['operator']
resource_constraint = resource_constraint_level[op]
def path_exploring(candidates, resource_constraint, weight_constraint):
if len(candidates) <= 0:
return []
schedule = candidates[:resource_constraint]
while True:
weight_in_all = sum([D.nodes[node]['weight'] for node in schedule])
if weight_in_all <= weight_constraint:
break
schedule.pop()
candidates = set(candidates[len(schedule):])
for node in schedule:
candidates = candidates.union(set(DAG.successors(node)))
return [schedule] + path_exploring(sorted(list(candidates), key=lambda x: (DAG.nodes[x]['priority'], -x), reverse=True), resource_constraint, weight_constraint)
return path_exploring(candidates, resource_constraint, weight_constraint)
"""
def _priority_sorting(self, D):
DAG = D.reverse(copy=True)
leaf = set([node for node in range(DAG.number_of_nodes()) if DAG.out_degree(node) == 0])
def priority_rewrite(nodes, level):
visited = set()
for node in nodes:
if DAG.nodes[node]['priority'] >= level:
continue
DAG.nodes[node]['priority'] = level
upper_level = set(DAG.predecessors(node)).difference(visited)
priority_rewrite(upper_level, level+1)
visited = visited.union(upper_level)
return
priority_rewrite(leaf, 1)
return DAG.reverse(copy=True)
def __len__(self):
return self.size
def __getitem__(self, idx):
#return self.data[idx], self.label[idx]
return self.data[idx], self.label[idx]
if __name__ == '__main__':
#resource_constraint_lvl = {-1:1, -2:1, -3:1}
#resource_constraint_lvl = {-1:3}
training_lvl = {-1:5000}
eval_lvl = {-1:5000}
"""
myDataset = TopoSortDataset(size=30, num_samples=1000, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[10, 25], weight_multiply=375., shift=25., weight_constraint=1000.)
torch.save(myDataset, "training_dataset/operator_type_1/small_volume_adaptive_learning/TopoSort30_Dataset_4_in_degree_3resource_priorityInverse_1K_cw25to400_weight1000.pt")
myDataset = TopoSortDataset(size=30, num_samples=1000, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[10, 25], weight_multiply=275., shift=25., weight_constraint=1000.)
torch.save(myDataset, "training_dataset/operator_type_1/small_volume_adaptive_learning/TopoSort30_Dataset_4_in_degree_3resource_priorityInverse_1K_cw25to300_weight1000.pt")
myDataset = TopoSortDataset(size=30, num_samples=1000, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[10, 25], weight_multiply=175., shift=25., weight_constraint=1000.)
torch.save(myDataset, "training_dataset/operator_type_1/small_volume_adaptive_learning/TopoSort30_Dataset_4_in_degree_3resource_priorityInverse_1K_cw25to200_weight1000.pt")
myDataset = TopoSortDataset(size=30, num_samples=1000, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[10, 25], weight_multiply=75., shift=25., weight_constraint=1000.)
torch.save(myDataset, "training_dataset/operator_type_1/small_volume_adaptive_learning/TopoSort30_Dataset_4_in_degree_3resource_priorityInverse_1K_cw25to100_weight1000.pt")
myDataset = TopoSortDataset(size=50, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[16, 40], weight_multiply=375., shift=25., weight_constraint=1000.)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort50_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw25to400_weight1000.pt")
myDataset = TopoSortDataset(size=50, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[16, 40], weight_multiply=275., shift=25., weight_constraint=1000.)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort50_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw25to300_weight1000.pt")
myDataset = TopoSortDataset(size=50, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[16, 40], weight_multiply=175., shift=25., weight_constraint=1000.)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort50_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw25to200_weight1000.pt")
myDataset = TopoSortDataset(size=50, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[16, 40], weight_multiply=75., shift=25., weight_constraint=1000.)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort50_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw25to100_weight1000.pt")
"""
#myDataset = TopoSortDataset(size=30, num_samples=1000, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[10, 25], weight_multiply=9.5, shift=0.5, weight_constraint=35)
#torch.save(myDataset, "training_dataset/operator_type_1/small_volume_adaptive_learning/TopoSort30_Dataset_4_in_degree_3resource_priorityInverse_1K_cw05to10_weight35.pt")
#myDataset = TopoSortDataset(size=30, num_samples=1000, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[10, 25], weight_multiply=475., shift=25., weight_constraint=1000)
#torch.save(myDataset, "training_dataset/operator_type_1/small_volume_adaptive_learning/TopoSort30_Dataset_4_in_degree_3resource_priorityInverse_1K_cw25to500_weight1000.pt")
#myDataset = TopoSortDataset(size=10, num_samples=5, in_degree_fixed=4, resource_constraint_level=resource_constraint_lvl, level_range=[4, 10], weight_multiply=10, shift=1., weight_constraint=15)
#torch.save(myDataset, "eval_dataset/operator_type_1/TopoSort15_Dataset_Eval_3_in_degree_5samples.pt")
#torch.save(myDataset, "training_dataset/operator_type_1/TopoSort10_Dataset_Training_4_in_degree_10samples.pt")
myDataset = TopoSortDataset(size=30, num_samples=128000, in_degree_fixed=6, resource_constraint_level=training_lvl, level_range=[10, 25], weight_multiply=5., weight_constraint=35.)
#myDataset = TopoSortDataset(size=30, num_samples=100, in_degree_fixed=4, resource_constraint_level=training_lvl, level_range=[10, 25], weight_multiply=5, weight_constraint=5)
torch.save(myDataset, "training_dataset/operator_type_1/TopoSort30_Dataset_Training_6_in_degree_3resource_priorityInverse_128k_10to25_weight35.pt")
myDataset = TopoSortDataset(size=50, num_samples=10240, in_degree_fixed=6, resource_constraint_level=eval_lvl, level_range=[16, 40], weight_multiply=5., weight_constraint=35.)
torch.save(myDataset, "eval_dataset/operator_type_1/TopoSort50_Dataset_Eval_6_in_degree_3resource_priorityInverse_10K_16to40_weight35.pt")
#myDataset = TopoSortDataset(size=50, num_samples=128000, in_degree_fixed=4, resource_constraint_level=training_lvl, level_range=[20, 40], weight_multiply=5, weight_constraint=5)
#myDataset = TopoSortDataset(size=30, num_samples=100, in_degree_fixed=4, resource_constraint_level=training_lvl, level_range=[10, 25], weight_multiply=5, weight_constraint=5)
#torch.save(myDataset, "training_dataset/operator_type_1/TopoSort50_Dataset_Training_4_in_degree_3resource_priorityInverse_128k_20to40_weight5.pt")
#myDataset = TopoSortDataset(size=20, num_samples=128000, in_degree_fixed=3, resource_constraint_level=training_lvl)
#torch.save(myDataset, "training_dataset/operator_type_1/small_volume/TopoSort20_Dataset_Training_3_in_degree_1resources_priorityInverse_128K_nonRepeated.pt")
#eval_lvl = {-1:3}
#myDataset = TopoSortDataset(size=20, num_samples=10240, in_degree_fixed=3, resource_constraint_level=eval_lvl)
#torch.save(myDataset, "eval_dataset/operator_type_1/small_volume/TopoSort20_Dataset_Eval_3_in_degree_1resources_priorityInverse_10K_nonRepeated.pt")
"""
myDataset = TopoSortDataset(size=50, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[16, 40], weight_multiply=9.5, shift=0.5, weight_constraint=35)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort50_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw05to10_weight35.pt")
myDataset = TopoSortDataset(size=100, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[16*2, 80], weight_multiply=9.5, shift=0.5, weight_constraint=35)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort100_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw05to10_weight35.pt")
myDataset = TopoSortDataset(size=200, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[64, 160], weight_multiply=9.5, shift=0.5, weight_constraint=35)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort200_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw05to10_weight35.pt")
myDataset = TopoSortDataset(size=300, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[96, 240], weight_multiply=9.5, shift=0.5, weight_constraint=35)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort300_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw05to10_weight35.pt")
myDataset = TopoSortDataset(size=400, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[128, 320], weight_multiply=9.5, shift=0.5, weight_constraint=35)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort400_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw05to10_weight35.pt")
myDataset = TopoSortDataset(size=500, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[160, 400], weight_multiply=9.5, shift=0.5, weight_constraint=35)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort500_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw05to10_weight35.pt")
myDataset = TopoSortDataset(size=50, num_samples=1000, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[16, 40], weight_multiply=1.95, shift=0.05, weight_constraint=8)
torch.save(myDataset, "training_dataset/operator_type_1/small_volume_adaptive_learning/TopoSort50_Dataset_4_in_degree_3resource_priorityInverse_1K_cw005to2_weight8.pt")
myDataset = TopoSortDataset(size=100, num_samples=1000, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[16*2, 80], weight_multiply=1.95, shift=0.05, weight_constraint=8)
torch.save(myDataset, "training_dataset/operator_type_1/small_volume_adaptive_learning/TopoSort100_Dataset_4_in_degree_3resource_priorityInverse_1K_cw005to2_weight8.pt")
myDataset = TopoSortDataset(size=200, num_samples=1000, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[64, 160], weight_multiply=1.95, shift=0.05, weight_constraint=8)
torch.save(myDataset, "training_dataset/operator_type_1/small_volume_adaptive_learning/TopoSort200_Dataset_4_in_degree_3resource_priorityInverse_1K_cw005to2_weight8.pt")
myDataset = TopoSortDataset(size=300, num_samples=1000, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[96, 240], weight_multiply=1.95, shift=0.05, weight_constraint=8)
torch.save(myDataset, "training_dataset/operator_type_1/small_volume_adaptive_learning/TopoSort300_Dataset_4_in_degree_3resource_priorityInverse_1K_cw005to2_weight8.pt")
myDataset = TopoSortDataset(size=400, num_samples=1000, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[128, 320], weight_multiply=1.95, shift=0.05, weight_constraint=8)
torch.save(myDataset, "training_dataset/operator_type_1/small_volume_adaptive_learning/TopoSort400_Dataset_4_in_degree_3resource_priorityInverse_1K_cw005to2_weight8.pt")
myDataset = TopoSortDataset(size=500, num_samples=1000, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[160, 400], weight_multiply=1.95, shift=0.05, weight_constraint=8)
torch.save(myDataset, "training_dataset/operator_type_1/small_volume_adaptive_learning/TopoSort500_Dataset_4_in_degree_3resource_priorityInverse_1K_cw005to2_weight8.pt")
myDataset = TopoSortDataset(size=50, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[16, 40], weight_multiply=475., shift=25., weight_constraint=1000)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort50_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw25to500_weight1000.pt")
myDataset = TopoSortDataset(size=100, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[32, 80], weight_multiply=475., shift=25., weight_constraint=1000)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort100_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw25to500_weight1000.pt")
myDataset = TopoSortDataset(size=200, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[64, 160], weight_multiply=475., shift=25., weight_constraint=1000)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort200_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw25to500_weight1000.pt")
myDataset = TopoSortDataset(size=300, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[96, 240], weight_multiply=475., shift=25., weight_constraint=1000)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort300_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw25to500_weight1000.pt")
myDataset = TopoSortDataset(size=400, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[128, 320], weight_multiply=475., shift=25., weight_constraint=1000)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort400_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw25to500_weight1000.pt")
myDataset = TopoSortDataset(size=500, num_samples=10240, in_degree_fixed=4, resource_constraint_level=eval_lvl, level_range=[160, 400], weight_multiply=475., shift=25., weight_constraint=1000)
torch.save(myDataset, "eval_dataset/operator_type_1/validation_set/TopoSort500_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_cw25to500_weight1000.pt")
"""
#for i in range(1000, 5001, 1000):
# myDataset = TopoSortDataset(size=i, num_samples=10)
# torch.save(myDataset, "eval_large_size/TopoSort" + str(i) + "_Dataset_Validation_10_in_degree.pt")
#torch.save(myDataset, "TopoSort20_Dataset_Training_10_in_degree.pt")
#myDataset = torch.load("eval_dataset/operator_type_1/TopoSort50_Dataset_Eval_4_in_degree_3resource_priorityInverse_10K_20to40_weight5.pt")
#dataset = torch.load("eval_dataset/operator_type_1/TopoSort15_Dataset_Eval_3_in_degree_5samples.pt", map_location=torch.device('cuda'))
#dataset = torch.load("training_dataset/operator_type_1/TopoSort10_Dataset_Training_4_in_degree_10samples.pt", map_location=torch.device('cuda'))
#indices = torch.randperm(len(myDataset))[:3]
#training_dataloader = DataLoader(myDataset, batch_size=1, sampler=SubsetRandomSampler(indices))
#print(indices)
#training_dataloader = DataLoader(dataset, batch_size=5)
#for batch_id, batch in enumerate(tqdm(training_dataloader)):
#for batch in tqdm(training_dataloader):
#print(batch_id)
#print("training_data: ", batch[0])
#print("training_label: ", batch[1])
| 22,823 | 67.954683 | 229 | py |
RESPECT | RESPECT-main/nets/pointer_network_singleTraining.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import numpy as np
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from utils import move_to
class Encoder(nn.Module):
"""Maps a graph represented as an input sequence
to a hidden vector"""
def __init__(self, input_dim, hidden_dim):
super(Encoder, self).__init__()
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(input_dim, hidden_dim)
self.init_hx, self.init_cx = self.init_hidden(hidden_dim)
def forward(self, x, hidden):
output, hidden = self.lstm(x, hidden)
return output, hidden
def init_hidden(self, hidden_dim):
"""Trainable initial hidden state"""
std = 1. / math.sqrt(hidden_dim)
enc_init_hx = nn.Parameter(torch.FloatTensor(hidden_dim))
enc_init_hx.data.uniform_(-std, std)
enc_init_cx = nn.Parameter(torch.FloatTensor(hidden_dim))
enc_init_cx.data.uniform_(-std, std)
return enc_init_hx, enc_init_cx
class Attention(nn.Module):
"""A generic attention module for a decoder in seq2seq"""
def __init__(self, dim, use_tanh=False, C=10):
super(Attention, self).__init__()
self.use_tanh = use_tanh
self.project_query = nn.Linear(dim, dim)
self.project_ref = nn.Conv1d(dim, dim, 1, 1)
self.C = C # tanh exploration
self.tanh = nn.Tanh()
self.v = nn.Parameter(torch.FloatTensor(dim))
self.v.data.uniform_(-(1. / math.sqrt(dim)), 1. / math.sqrt(dim))
def forward(self, query, ref):
"""
Args:
query: is the hidden state of the decoder at the current
time step. batch x dim
ref: the set of hidden states from the encoder.
sourceL x batch x hidden_dim
"""
# ref is now [batch_size x hidden_dim x sourceL]
ref = ref.permute(1, 2, 0)
q = self.project_query(query).unsqueeze(2) # batch x dim x 1
e = self.project_ref(ref) # batch_size x hidden_dim x sourceL
# expand the query by sourceL
# batch x dim x sourceL
expanded_q = q.repeat(1, 1, e.size(2))
# batch x 1 x hidden_dim
v_view = self.v.unsqueeze(0).expand(
expanded_q.size(0), len(self.v)).unsqueeze(1)
# [batch_size x 1 x hidden_dim] * [batch_size x hidden_dim x sourceL]
u = torch.bmm(v_view, self.tanh(expanded_q + e)).squeeze(1)
if self.use_tanh:
logits = self.C * self.tanh(u)
else:
logits = u
return e, logits
class Decoder(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
tanh_exploration,
use_tanh,
n_glimpses=1,
mask_glimpses=True,
mask_logits=True):
super(Decoder, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.n_glimpses = n_glimpses
self.mask_glimpses = mask_glimpses
self.mask_logits = mask_logits
self.use_tanh = use_tanh
self.tanh_exploration = tanh_exploration
self.decode_type = None # Needs to be set explicitly before use
#encoder_layers = TransformerEncoderLayer(embedding_dim, 1, hidden_dim, dropout=0.5)
#self.transformer_encoder = TransformerEncoder(encoder_layers, 1)
self.lstm = nn.LSTMCell(embedding_dim, hidden_dim)
self.pointer = Attention(hidden_dim, use_tanh=use_tanh, C=tanh_exploration)
self.glimpse = Attention(hidden_dim, use_tanh=False)
self.sm = nn.Softmax(dim=1)
def update_mask(self, mask, selected):
return mask.clone().scatter_(1, selected.unsqueeze(-1), True)
def recurrence(self, x, h_in, prev_mask, prev_idxs, step, context):
logit_mask = self.update_mask(prev_mask, prev_idxs) if prev_idxs is not None else prev_mask
logits, h_out = self.calc_logits(x, h_in, logit_mask, context, self.mask_glimpses, self.mask_logits)
# Calculate log_softmax for better numerical stability
log_p = torch.log_softmax(logits, dim=1)
probs = log_p.exp()
if not self.mask_logits:
# If self.mask_logits, this would be redundant, otherwise we must mask to make sure we don't resample
# Note that as a result the vector of probs may not sum to one (this is OK for .multinomial sampling)
# But practically by not masking the logits, a model is learned over all sequences (also infeasible)
# while only during sampling feasibility is enforced (a.k.a. by setting to 0. here)
probs[logit_mask] = 0.
# For consistency we should also mask out in log_p, but the values set to 0 will not be sampled and
# Therefore not be used by the reinforce estimator
return h_out, log_p, probs, logit_mask
def calc_logits(self, x, h_in, logit_mask, context, mask_glimpses=None, mask_logits=None):
if mask_glimpses is None:
mask_glimpses = self.mask_glimpses
if mask_logits is None:
mask_logits = self.mask_logits
#x = self.transformer_encoder(x)
hy, cy = self.lstm(x, h_in)
g_l, h_out = hy, (hy, cy)
for i in range(self.n_glimpses):
ref, logits = self.glimpse(g_l, context)
# For the glimpses, only mask before softmax so we have always an L1 norm 1 readout vector
if mask_glimpses:
logits[logit_mask] = -np.inf
# [batch_size x h_dim x sourceL] * [batch_size x sourceL x 1] =
# [batch_size x h_dim x 1]
g_l = torch.bmm(ref, self.sm(logits).unsqueeze(2)).squeeze(2)
_, logits = self.pointer(g_l, context)
#logits = nn.functional.normalize(logits)
# Masking before softmax makes probs sum to one
if mask_logits:
logits[logit_mask] = -np.inf
return logits, h_out
def forward(self, decoder_input, embedded_inputs, hidden, context, eval_tours=None):
"""
Args:
decoder_input: The initial input to the decoder
size is [batch_size x embedding_dim]. Trainable parameter.
embedded_inputs: [sourceL x batch_size x embedding_dim]
hidden: the prev hidden state, size is [batch_size x hidden_dim].
Initially this is set to (enc_h[-1], enc_c[-1])
context: encoder outputs, [sourceL x batch_size x hidden_dim]
"""
batch_size = context.size(1)
outputs = []
selections = []
steps = range(embedded_inputs.size(0))
idxs = None
mask = Variable(
embedded_inputs.data.new().byte().new(embedded_inputs.size(1), embedded_inputs.size(0)).zero_(),
requires_grad=False
)
for i in steps:
hidden, log_p, probs, mask = self.recurrence(decoder_input, hidden, mask, idxs, i, context)
# select the next inputs for the decoder [batch_size x hidden_dim]
idxs = self.decode(
probs,
mask
) if eval_tours is None else eval_tours[:, i]
idxs = idxs.detach() # Otherwise pytorch complains it want's a reward, todo implement this more properly?
# Gather input embedding of selected
decoder_input = torch.gather(
embedded_inputs,
0,
idxs.contiguous().view(1, batch_size, 1).expand(1, batch_size, *embedded_inputs.size()[2:])
).squeeze(0)
# use outs to point to next object
outputs.append(log_p)
selections.append(idxs)
return (torch.stack(outputs, 1), torch.stack(selections, 1)), hidden
def decode(self, probs, mask):
if self.decode_type == "greedy":
_, idxs = probs.max(1)
assert not mask.gather(1, idxs.unsqueeze(-1)).data.any(), \
"Decode greedy: infeasible action has maximum probability"
elif self.decode_type == "sampling":
idxs = probs.multinomial(1).squeeze(1)
# Check if sampling went OK, can go wrong due to bug on GPU
while mask.gather(1, idxs.unsqueeze(-1)).data.any():
print(' [!] resampling due to race condition')
#idxs = probs.multinomial().squeeze(1)
idxs = probs.multinomial(1).squeeze(1)
else:
assert False, "Unknown decode type"
return idxs
class CriticNetworkLSTM(nn.Module):
"""Useful as a baseline in REINFORCE updates"""
def __init__(self,
embedding_dim,
hidden_dim,
n_process_block_iters,
tanh_exploration,
use_tanh):
super(CriticNetworkLSTM, self).__init__()
self.hidden_dim = hidden_dim
self.n_process_block_iters = n_process_block_iters
self.encoder = Encoder(embedding_dim, hidden_dim)
self.process_block = Attention(hidden_dim, use_tanh=use_tanh, C=tanh_exploration)
self.sm = nn.Softmax(dim=1)
self.decoder = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self, inputs):
"""
Args:
inputs: [embedding_dim x batch_size x sourceL] of embedded inputs
"""
inputs = inputs.transpose(0, 1).contiguous()
encoder_hx = self.encoder.init_hx.unsqueeze(0).repeat(inputs.size(1), 1).unsqueeze(0)
encoder_cx = self.encoder.init_cx.unsqueeze(0).repeat(inputs.size(1), 1).unsqueeze(0)
# encoder forward pass
enc_outputs, (enc_h_t, enc_c_t) = self.encoder(inputs, (encoder_hx, encoder_cx))
# grab the hidden state and process it via the process block
process_block_state = enc_h_t[-1]
for i in range(self.n_process_block_iters):
ref, logits = self.process_block(process_block_state, enc_outputs)
process_block_state = torch.bmm(ref, self.sm(logits).unsqueeze(2)).squeeze(2)
# produce the final scalar output
out = self.decoder(process_block_state)
return out
class PointerNetwork(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
problem,
n_encode_layers=None,
tanh_clipping=10.,
mask_inner=True,
mask_logits=True,
normalization=None,
num_coordinates=11,
**kwargs):
super(PointerNetwork, self).__init__()
self.problem = problem
assert problem.NAME == "tsp" or problem.NAME == "toposort", "Pointer Network only supported for TSP and TopoSort"
self.input_dim = num_coordinates
#self.input_dim = 3
self.encoder = Encoder(
embedding_dim,
hidden_dim)
self.decoder = Decoder(
embedding_dim,
hidden_dim,
tanh_exploration=tanh_clipping,
use_tanh=tanh_clipping > 0,
n_glimpses=1,
mask_glimpses=mask_inner,
mask_logits=mask_logits
)
# Trainable initial hidden states
std = 1. / math.sqrt(embedding_dim)
self.decoder_in_0 = nn.Parameter(torch.FloatTensor(embedding_dim))
self.decoder_in_0.data.uniform_(-std, std)
self.embedding = nn.Parameter(torch.FloatTensor(self.input_dim, embedding_dim))
self.embedding.data.uniform_(-std, std)
self.bn1 = nn.BatchNorm1d(embedding_dim)
self.bn2 = nn.BatchNorm1d(hidden_dim)
"""
self.bn3 = nn.BatchNorm1d(hidden_dim)
encoder_layers = TransformerEncoderLayer(hidden_dim, 1, hidden_dim, dropout=0.5)
self.transformer_encoder = TransformerEncoder(encoder_layers, 1)
"""
def set_decode_type(self, decode_type):
self.decoder.decode_type = decode_type
def forward(self, inputs, labels, opts, eval_tours=None, return_pi=False, Measures=False, Plot_Data=False):
#def forward(self, inputs, eval_tours=None, return_pi=False, Measures=False, Plot_Data=False):
#batch_size, graph_size, input_dim = inputs.size()
#indices = torch.tensor([0, 9, 10]).cuda() # (level, index, memory) for input dim of dataset as 11
#inputs = torch.index_select(inputs_11, 2, indices).cuda()
batch_size, graph_size, input_dim = inputs.size()
"""
embedded_inputs = torch.mm(
#inputs.transpose(0, 1).contiguous().view(-1, input_dim),
inputs.transpose(0, 1).contiguous().view(-1, input_dim),
self.embedding
).view(graph_size, batch_size, -1)
"""
#inputs_weight_free = inputs.detach().clone()
#inputs_weight_free.index_fill_(2, move_to(torch.tensor([10]), opts.device), 0.)
embedded_inputs = self.bn1(torch.mm(
inputs.transpose(0, 1).contiguous().view(-1, input_dim),
#move_to(inputs_weight_free, opts.device).transpose(0, 1).contiguous().view(-1, input_dim),
self.embedding
).view(graph_size, batch_size, -1).transpose(0, 1).transpose(1, 2)).transpose(1, 2).transpose(0, 1)
#).view(graph_size, batch_size, -1).transpose(1, 2)).transpose(1, 2)
# query the actor net for the input indices
# making up the output, and the pointer attn
_log_p, pi = self._inner(embedded_inputs, eval_tours)
#cost, mask, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, Measures, Plot_Data)
cost, mask, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, labels, Measures, Plot_Data, opts.graph_file)
#cost, mask, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, labels, Measures, Plot_Data)
# Log likelyhood is calculated within the model since returning it per action does not work well with
# DataParallel since sequences can be of different lengths
ll = self._calc_log_likelihood(_log_p, pi, mask)
if return_pi:
#return cost, ll, pi, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
return cost, ll, pi, misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost, ll, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
return cost, ll, misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
def _calc_log_likelihood(self, _log_p, a, mask):
# Get log_p corresponding to selected actions
log_p = _log_p.gather(2, a.unsqueeze(-1)).squeeze(-1)
# Optional: mask out actions irrelevant to objective so they do not get reinforced
if mask is not None:
log_p[mask] = 0
assert (log_p > -1000).data.all(), "Logprobs should not be -inf, check sampling procedure!"
# Calculate log_likelihood
return log_p.sum(1)
def _inner(self, inputs, eval_tours=None):
encoder_hx = encoder_cx = Variable(
torch.zeros(1, inputs.size(1), self.encoder.hidden_dim, out=inputs.data.new()),
requires_grad=False
)
# encoder forward pass
enc_h, (enc_h_t, enc_c_t) = self.encoder(inputs, (encoder_hx, encoder_cx))
enc_h = self.bn2(enc_h.transpose(1, 2)).transpose(1, 2)
dec_init_state = (enc_h_t[-1], enc_c_t[-1])
# repeat decoder_in_0 across batch
decoder_input = self.decoder_in_0.unsqueeze(0).repeat(inputs.size(1), 1)
"""
enc_h = self.transformer_encoder(enc_h)
enc_h = self.bn3(enc_h.transpose(1, 2)).transpose(1, 2)
"""
(pointer_probs, input_idxs), dec_hidden_t = self.decoder(decoder_input,
inputs,
dec_init_state,
enc_h,
eval_tours)
return pointer_probs, input_idxs
| 16,623 | 40.25062 | 198 | py |
RESPECT | RESPECT-main/nets/pointer_network.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import numpy as np
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from utils import move_to
class Encoder(nn.Module):
"""Maps a graph represented as an input sequence
to a hidden vector"""
def __init__(self, input_dim, hidden_dim):
super(Encoder, self).__init__()
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(input_dim, hidden_dim)
self.init_hx, self.init_cx = self.init_hidden(hidden_dim)
def forward(self, x, hidden):
output, hidden = self.lstm(x, hidden)
return output, hidden
def init_hidden(self, hidden_dim):
"""Trainable initial hidden state"""
std = 1. / math.sqrt(hidden_dim)
enc_init_hx = nn.Parameter(torch.FloatTensor(hidden_dim))
enc_init_hx.data.uniform_(-std, std)
enc_init_cx = nn.Parameter(torch.FloatTensor(hidden_dim))
enc_init_cx.data.uniform_(-std, std)
return enc_init_hx, enc_init_cx
class Attention(nn.Module):
"""A generic attention module for a decoder in seq2seq"""
def __init__(self, dim, use_tanh=False, C=10):
super(Attention, self).__init__()
self.use_tanh = use_tanh
self.project_query = nn.Linear(dim, dim)
self.project_ref = nn.Conv1d(dim, dim, 1, 1)
self.C = C # tanh exploration
self.tanh = nn.Tanh()
self.v = nn.Parameter(torch.FloatTensor(dim))
self.v.data.uniform_(-(1. / math.sqrt(dim)), 1. / math.sqrt(dim))
def forward(self, query, ref):
"""
Args:
query: is the hidden state of the decoder at the current
time step. batch x dim
ref: the set of hidden states from the encoder.
sourceL x batch x hidden_dim
"""
# ref is now [batch_size x hidden_dim x sourceL]
ref = ref.permute(1, 2, 0)
q = self.project_query(query).unsqueeze(2) # batch x dim x 1
e = self.project_ref(ref) # batch_size x hidden_dim x sourceL
# expand the query by sourceL
# batch x dim x sourceL
expanded_q = q.repeat(1, 1, e.size(2))
# batch x 1 x hidden_dim
v_view = self.v.unsqueeze(0).expand(
expanded_q.size(0), len(self.v)).unsqueeze(1)
# [batch_size x 1 x hidden_dim] * [batch_size x hidden_dim x sourceL]
u = torch.bmm(v_view, self.tanh(expanded_q + e)).squeeze(1)
if self.use_tanh:
logits = self.C * self.tanh(u)
else:
logits = u
return e, logits
class Decoder(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
tanh_exploration,
use_tanh,
n_glimpses=1,
mask_glimpses=True,
mask_logits=True):
super(Decoder, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.n_glimpses = n_glimpses
self.mask_glimpses = mask_glimpses
self.mask_logits = mask_logits
self.use_tanh = use_tanh
self.tanh_exploration = tanh_exploration
self.decode_type = None # Needs to be set explicitly before use
#encoder_layers = TransformerEncoderLayer(embedding_dim, 1, hidden_dim, dropout=0.5)
#self.transformer_encoder = TransformerEncoder(encoder_layers, 1)
self.lstm = nn.LSTMCell(embedding_dim, hidden_dim)
self.pointer = Attention(hidden_dim, use_tanh=use_tanh, C=tanh_exploration)
self.glimpse = Attention(hidden_dim, use_tanh=False)
self.sm = nn.Softmax(dim=1)
def update_mask(self, mask, selected):
return mask.clone().scatter_(1, selected.unsqueeze(-1), True)
def recurrence(self, x, h_in, prev_mask, prev_idxs, step, context):
logit_mask = self.update_mask(prev_mask, prev_idxs) if prev_idxs is not None else prev_mask
logits, h_out = self.calc_logits(x, h_in, logit_mask, context, self.mask_glimpses, self.mask_logits)
# Calculate log_softmax for better numerical stability
log_p = torch.log_softmax(logits, dim=1)
probs = log_p.exp()
if not self.mask_logits:
# If self.mask_logits, this would be redundant, otherwise we must mask to make sure we don't resample
# Note that as a result the vector of probs may not sum to one (this is OK for .multinomial sampling)
# But practically by not masking the logits, a model is learned over all sequences (also infeasible)
# while only during sampling feasibility is enforced (a.k.a. by setting to 0. here)
probs[logit_mask] = 0.
# For consistency we should also mask out in log_p, but the values set to 0 will not be sampled and
# Therefore not be used by the reinforce estimator
return h_out, log_p, probs, logit_mask
def calc_logits(self, x, h_in, logit_mask, context, mask_glimpses=None, mask_logits=None):
if mask_glimpses is None:
mask_glimpses = self.mask_glimpses
if mask_logits is None:
mask_logits = self.mask_logits
#x = self.transformer_encoder(x)
hy, cy = self.lstm(x, h_in)
g_l, h_out = hy, (hy, cy)
for i in range(self.n_glimpses):
ref, logits = self.glimpse(g_l, context)
# For the glimpses, only mask before softmax so we have always an L1 norm 1 readout vector
if mask_glimpses:
logits[logit_mask] = -np.inf
# [batch_size x h_dim x sourceL] * [batch_size x sourceL x 1] =
# [batch_size x h_dim x 1]
g_l = torch.bmm(ref, self.sm(logits).unsqueeze(2)).squeeze(2)
_, logits = self.pointer(g_l, context)
#logits = nn.functional.normalize(logits)
# Masking before softmax makes probs sum to one
if mask_logits:
logits[logit_mask] = -np.inf
return logits, h_out
def forward(self, decoder_input, embedded_inputs, hidden, context, eval_tours=None):
"""
Args:
decoder_input: The initial input to the decoder
size is [batch_size x embedding_dim]. Trainable parameter.
embedded_inputs: [sourceL x batch_size x embedding_dim]
hidden: the prev hidden state, size is [batch_size x hidden_dim].
Initially this is set to (enc_h[-1], enc_c[-1])
context: encoder outputs, [sourceL x batch_size x hidden_dim]
"""
batch_size = context.size(1)
outputs = []
selections = []
steps = range(embedded_inputs.size(0))
idxs = None
mask = Variable(
embedded_inputs.data.new().byte().new(embedded_inputs.size(1), embedded_inputs.size(0)).zero_(),
requires_grad=False
)
for i in steps:
hidden, log_p, probs, mask = self.recurrence(decoder_input, hidden, mask, idxs, i, context)
# select the next inputs for the decoder [batch_size x hidden_dim]
idxs = self.decode(
probs,
mask
) if eval_tours is None else eval_tours[:, i]
idxs = idxs.detach() # Otherwise pytorch complains it want's a reward, todo implement this more properly?
# Gather input embedding of selected
decoder_input = torch.gather(
embedded_inputs,
0,
idxs.contiguous().view(1, batch_size, 1).expand(1, batch_size, *embedded_inputs.size()[2:])
).squeeze(0)
# use outs to point to next object
outputs.append(log_p)
selections.append(idxs)
return (torch.stack(outputs, 1), torch.stack(selections, 1)), hidden
def decode(self, probs, mask):
if self.decode_type == "greedy":
_, idxs = probs.max(1)
assert not mask.gather(1, idxs.unsqueeze(-1)).data.any(), \
"Decode greedy: infeasible action has maximum probability"
elif self.decode_type == "sampling":
idxs = probs.multinomial(1).squeeze(1)
# Check if sampling went OK, can go wrong due to bug on GPU
while mask.gather(1, idxs.unsqueeze(-1)).data.any():
print(' [!] resampling due to race condition')
#idxs = probs.multinomial().squeeze(1)
idxs = probs.multinomial(1).squeeze(1)
else:
assert False, "Unknown decode type"
return idxs
class CriticNetworkLSTM(nn.Module):
"""Useful as a baseline in REINFORCE updates"""
def __init__(self,
embedding_dim,
hidden_dim,
n_process_block_iters,
tanh_exploration,
use_tanh):
super(CriticNetworkLSTM, self).__init__()
self.hidden_dim = hidden_dim
self.n_process_block_iters = n_process_block_iters
self.encoder = Encoder(embedding_dim, hidden_dim)
self.process_block = Attention(hidden_dim, use_tanh=use_tanh, C=tanh_exploration)
self.sm = nn.Softmax(dim=1)
self.decoder = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self, inputs):
"""
Args:
inputs: [embedding_dim x batch_size x sourceL] of embedded inputs
"""
inputs = inputs.transpose(0, 1).contiguous()
encoder_hx = self.encoder.init_hx.unsqueeze(0).repeat(inputs.size(1), 1).unsqueeze(0)
encoder_cx = self.encoder.init_cx.unsqueeze(0).repeat(inputs.size(1), 1).unsqueeze(0)
# encoder forward pass
enc_outputs, (enc_h_t, enc_c_t) = self.encoder(inputs, (encoder_hx, encoder_cx))
# grab the hidden state and process it via the process block
process_block_state = enc_h_t[-1]
for i in range(self.n_process_block_iters):
ref, logits = self.process_block(process_block_state, enc_outputs)
process_block_state = torch.bmm(ref, self.sm(logits).unsqueeze(2)).squeeze(2)
# produce the final scalar output
out = self.decoder(process_block_state)
return out
class PointerNetwork(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
problem,
n_encode_layers=None,
tanh_clipping=10.,
mask_inner=True,
mask_logits=True,
normalization=None,
num_coordinates=11,
**kwargs):
super(PointerNetwork, self).__init__()
self.problem = problem
assert problem.NAME == "tsp" or problem.NAME == "toposort", "Pointer Network only supported for TSP and TopoSort"
self.input_dim = num_coordinates
#self.input_dim = 3
self.encoder = Encoder(
embedding_dim,
hidden_dim)
self.decoder = Decoder(
embedding_dim,
hidden_dim,
tanh_exploration=tanh_clipping,
use_tanh=tanh_clipping > 0,
n_glimpses=1,
mask_glimpses=mask_inner,
mask_logits=mask_logits
)
# Trainable initial hidden states
std = 1. / math.sqrt(embedding_dim)
self.decoder_in_0 = nn.Parameter(torch.FloatTensor(embedding_dim))
self.decoder_in_0.data.uniform_(-std, std)
self.embedding = nn.Parameter(torch.FloatTensor(self.input_dim, embedding_dim))
self.embedding.data.uniform_(-std, std)
self.bn1 = nn.BatchNorm1d(embedding_dim)
self.bn2 = nn.BatchNorm1d(hidden_dim)
"""
self.bn3 = nn.BatchNorm1d(hidden_dim)
encoder_layers = TransformerEncoderLayer(hidden_dim, 1, hidden_dim, dropout=0.5)
self.transformer_encoder = TransformerEncoder(encoder_layers, 1)
"""
def set_decode_type(self, decode_type):
self.decoder.decode_type = decode_type
def forward(self, inputs, labels, opts, eval_tours=None, return_pi=False, Measures=False, Plot_Data=False):
#def forward(self, inputs, eval_tours=None, return_pi=False, Measures=False, Plot_Data=False):
#batch_size, graph_size, input_dim = inputs.size()
#indices = torch.tensor([0, 9, 10]).cuda() # (level, index, memory) for input dim of dataset as 11
#inputs = torch.index_select(inputs_11, 2, indices).cuda()
batch_size, graph_size, input_dim = inputs.size()
"""
embedded_inputs = torch.mm(
#inputs.transpose(0, 1).contiguous().view(-1, input_dim),
inputs.transpose(0, 1).contiguous().view(-1, input_dim),
self.embedding
).view(graph_size, batch_size, -1)
"""
#inputs_weight_free = inputs.detach().clone()
#inputs_weight_free.index_fill_(2, move_to(torch.tensor([10]), opts.device), 0.)
embedded_inputs = self.bn1(torch.mm(
inputs.transpose(0, 1).contiguous().view(-1, input_dim),
#move_to(inputs_weight_free, opts.device).transpose(0, 1).contiguous().view(-1, input_dim),
self.embedding
).view(graph_size, batch_size, -1).transpose(0, 1).transpose(1, 2)).transpose(1, 2).transpose(0, 1)
#).view(graph_size, batch_size, -1).transpose(1, 2)).transpose(1, 2)
# query the actor net for the input indices
# making up the output, and the pointer attn
_log_p, pi = self._inner(embedded_inputs, eval_tours)
#cost, mask, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, Measures, Plot_Data)
cost, mask, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, labels, Measures, Plot_Data, opts.graph_file)
#cost, mask, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, labels, Measures, Plot_Data)
# Log likelyhood is calculated within the model since returning it per action does not work well with
# DataParallel since sequences can be of different lengths
ll = self._calc_log_likelihood(_log_p, pi, mask)
if return_pi:
#return cost, ll, pi, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
return cost, ll, pi, misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost, ll, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
return cost, ll, misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
def _calc_log_likelihood(self, _log_p, a, mask):
# Get log_p corresponding to selected actions
log_p = _log_p.gather(2, a.unsqueeze(-1)).squeeze(-1)
# Optional: mask out actions irrelevant to objective so they do not get reinforced
if mask is not None:
log_p[mask] = 0
assert (log_p > -1000).data.all(), "Logprobs should not be -inf, check sampling procedure!"
# Calculate log_likelihood
return log_p.sum(1)
def _inner(self, inputs, eval_tours=None):
encoder_hx = encoder_cx = Variable(
torch.zeros(1, inputs.size(1), self.encoder.hidden_dim, out=inputs.data.new()),
requires_grad=False
)
# encoder forward pass
enc_h, (enc_h_t, enc_c_t) = self.encoder(inputs, (encoder_hx, encoder_cx))
enc_h = self.bn2(enc_h.transpose(1, 2)).transpose(1, 2)
dec_init_state = (enc_h_t[-1], enc_c_t[-1])
# repeat decoder_in_0 across batch
decoder_input = self.decoder_in_0.unsqueeze(0).repeat(inputs.size(1), 1)
"""
enc_h = self.transformer_encoder(enc_h)
enc_h = self.bn3(enc_h.transpose(1, 2)).transpose(1, 2)
"""
(pointer_probs, input_idxs), dec_hidden_t = self.decoder(decoder_input,
inputs,
dec_init_state,
enc_h,
eval_tours)
return pointer_probs, input_idxs
| 16,623 | 40.25062 | 198 | py |
RESPECT | RESPECT-main/nets/pointer_network_originalbatch.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import numpy as np
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from utils import move_to
class Encoder(nn.Module):
"""Maps a graph represented as an input sequence
to a hidden vector"""
def __init__(self, input_dim, hidden_dim):
super(Encoder, self).__init__()
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(input_dim, hidden_dim)
self.init_hx, self.init_cx = self.init_hidden(hidden_dim)
def forward(self, x, hidden):
output, hidden = self.lstm(x, hidden)
return output, hidden
def init_hidden(self, hidden_dim):
"""Trainable initial hidden state"""
std = 1. / math.sqrt(hidden_dim)
enc_init_hx = nn.Parameter(torch.FloatTensor(hidden_dim))
enc_init_hx.data.uniform_(-std, std)
enc_init_cx = nn.Parameter(torch.FloatTensor(hidden_dim))
enc_init_cx.data.uniform_(-std, std)
return enc_init_hx, enc_init_cx
class Attention(nn.Module):
"""A generic attention module for a decoder in seq2seq"""
def __init__(self, dim, use_tanh=False, C=10):
super(Attention, self).__init__()
self.use_tanh = use_tanh
self.project_query = nn.Linear(dim, dim)
self.project_ref = nn.Conv1d(dim, dim, 1, 1)
self.C = C # tanh exploration
self.tanh = nn.Tanh()
self.v = nn.Parameter(torch.FloatTensor(dim))
self.v.data.uniform_(-(1. / math.sqrt(dim)), 1. / math.sqrt(dim))
def forward(self, query, ref):
"""
Args:
query: is the hidden state of the decoder at the current
time step. batch x dim
ref: the set of hidden states from the encoder.
sourceL x batch x hidden_dim
"""
# ref is now [batch_size x hidden_dim x sourceL]
ref = ref.permute(1, 2, 0)
q = self.project_query(query).unsqueeze(2) # batch x dim x 1
e = self.project_ref(ref) # batch_size x hidden_dim x sourceL
# expand the query by sourceL
# batch x dim x sourceL
expanded_q = q.repeat(1, 1, e.size(2))
# batch x 1 x hidden_dim
v_view = self.v.unsqueeze(0).expand(
expanded_q.size(0), len(self.v)).unsqueeze(1)
# [batch_size x 1 x hidden_dim] * [batch_size x hidden_dim x sourceL]
u = torch.bmm(v_view, self.tanh(expanded_q + e)).squeeze(1)
if self.use_tanh:
logits = self.C * self.tanh(u)
else:
logits = u
return e, logits
class Decoder(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
tanh_exploration,
use_tanh,
n_glimpses=1,
mask_glimpses=True,
mask_logits=True):
super(Decoder, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.n_glimpses = n_glimpses
self.mask_glimpses = mask_glimpses
self.mask_logits = mask_logits
self.use_tanh = use_tanh
self.tanh_exploration = tanh_exploration
self.decode_type = None # Needs to be set explicitly before use
#encoder_layers = TransformerEncoderLayer(embedding_dim, 1, hidden_dim, dropout=0.5)
#self.transformer_encoder = TransformerEncoder(encoder_layers, 1)
self.lstm = nn.LSTMCell(embedding_dim, hidden_dim)
self.pointer = Attention(hidden_dim, use_tanh=use_tanh, C=tanh_exploration)
self.glimpse = Attention(hidden_dim, use_tanh=False)
self.sm = nn.Softmax(dim=1)
def update_mask(self, mask, selected):
return mask.clone().scatter_(1, selected.unsqueeze(-1), True)
def recurrence(self, x, h_in, prev_mask, prev_idxs, step, context):
logit_mask = self.update_mask(prev_mask, prev_idxs) if prev_idxs is not None else prev_mask
logits, h_out = self.calc_logits(x, h_in, logit_mask, context, self.mask_glimpses, self.mask_logits)
# Calculate log_softmax for better numerical stability
log_p = torch.log_softmax(logits, dim=1)
probs = log_p.exp()
if not self.mask_logits:
# If self.mask_logits, this would be redundant, otherwise we must mask to make sure we don't resample
# Note that as a result the vector of probs may not sum to one (this is OK for .multinomial sampling)
# But practically by not masking the logits, a model is learned over all sequences (also infeasible)
# while only during sampling feasibility is enforced (a.k.a. by setting to 0. here)
probs[logit_mask] = 0.
# For consistency we should also mask out in log_p, but the values set to 0 will not be sampled and
# Therefore not be used by the reinforce estimator
return h_out, log_p, probs, logit_mask
def calc_logits(self, x, h_in, logit_mask, context, mask_glimpses=None, mask_logits=None):
if mask_glimpses is None:
mask_glimpses = self.mask_glimpses
if mask_logits is None:
mask_logits = self.mask_logits
#x = self.transformer_encoder(x)
hy, cy = self.lstm(x, h_in)
g_l, h_out = hy, (hy, cy)
for i in range(self.n_glimpses):
ref, logits = self.glimpse(g_l, context)
# For the glimpses, only mask before softmax so we have always an L1 norm 1 readout vector
if mask_glimpses:
logits[logit_mask] = -np.inf
# [batch_size x h_dim x sourceL] * [batch_size x sourceL x 1] =
# [batch_size x h_dim x 1]
g_l = torch.bmm(ref, self.sm(logits).unsqueeze(2)).squeeze(2)
_, logits = self.pointer(g_l, context)
# Masking before softmax makes probs sum to one
if mask_logits:
logits[logit_mask] = -np.inf
return logits, h_out
def forward(self, decoder_input, embedded_inputs, hidden, context, eval_tours=None):
"""
Args:
decoder_input: The initial input to the decoder
size is [batch_size x embedding_dim]. Trainable parameter.
embedded_inputs: [sourceL x batch_size x embedding_dim]
hidden: the prev hidden state, size is [batch_size x hidden_dim].
Initially this is set to (enc_h[-1], enc_c[-1])
context: encoder outputs, [sourceL x batch_size x hidden_dim]
"""
batch_size = context.size(1)
outputs = []
selections = []
steps = range(embedded_inputs.size(0))
idxs = None
mask = Variable(
embedded_inputs.data.new().byte().new(embedded_inputs.size(1), embedded_inputs.size(0)).zero_(),
requires_grad=False
)
for i in steps:
hidden, log_p, probs, mask = self.recurrence(decoder_input, hidden, mask, idxs, i, context)
# select the next inputs for the decoder [batch_size x hidden_dim]
idxs = self.decode(
probs,
mask
) if eval_tours is None else eval_tours[:, i]
idxs = idxs.detach() # Otherwise pytorch complains it want's a reward, todo implement this more properly?
# Gather input embedding of selected
decoder_input = torch.gather(
embedded_inputs,
0,
idxs.contiguous().view(1, batch_size, 1).expand(1, batch_size, *embedded_inputs.size()[2:])
).squeeze(0)
# use outs to point to next object
outputs.append(log_p)
selections.append(idxs)
return (torch.stack(outputs, 1), torch.stack(selections, 1)), hidden
def decode(self, probs, mask):
if self.decode_type == "greedy":
_, idxs = probs.max(1)
assert not mask.gather(1, idxs.unsqueeze(-1)).data.any(), \
"Decode greedy: infeasible action has maximum probability"
elif self.decode_type == "sampling":
idxs = probs.multinomial(1).squeeze(1)
# Check if sampling went OK, can go wrong due to bug on GPU
while mask.gather(1, idxs.unsqueeze(-1)).data.any():
print(' [!] resampling due to race condition')
#idxs = probs.multinomial().squeeze(1)
idxs = probs.multinomial(1).squeeze(1)
else:
assert False, "Unknown decode type"
return idxs
class CriticNetworkLSTM(nn.Module):
"""Useful as a baseline in REINFORCE updates"""
def __init__(self,
embedding_dim,
hidden_dim,
n_process_block_iters,
tanh_exploration,
use_tanh):
super(CriticNetworkLSTM, self).__init__()
self.hidden_dim = hidden_dim
self.n_process_block_iters = n_process_block_iters
self.encoder = Encoder(embedding_dim, hidden_dim)
self.process_block = Attention(hidden_dim, use_tanh=use_tanh, C=tanh_exploration)
self.sm = nn.Softmax(dim=1)
self.decoder = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self, inputs):
"""
Args:
inputs: [embedding_dim x batch_size x sourceL] of embedded inputs
"""
inputs = inputs.transpose(0, 1).contiguous()
encoder_hx = self.encoder.init_hx.unsqueeze(0).repeat(inputs.size(1), 1).unsqueeze(0)
encoder_cx = self.encoder.init_cx.unsqueeze(0).repeat(inputs.size(1), 1).unsqueeze(0)
# encoder forward pass
enc_outputs, (enc_h_t, enc_c_t) = self.encoder(inputs, (encoder_hx, encoder_cx))
# grab the hidden state and process it via the process block
process_block_state = enc_h_t[-1]
for i in range(self.n_process_block_iters):
ref, logits = self.process_block(process_block_state, enc_outputs)
process_block_state = torch.bmm(ref, self.sm(logits).unsqueeze(2)).squeeze(2)
# produce the final scalar output
out = self.decoder(process_block_state)
return out
class PointerNetwork(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
problem,
n_encode_layers=None,
tanh_clipping=10.,
mask_inner=True,
mask_logits=True,
normalization=None,
num_coordinates=11,
**kwargs):
super(PointerNetwork, self).__init__()
self.problem = problem
assert problem.NAME == "tsp" or problem.NAME == "toposort", "Pointer Network only supported for TSP and TopoSort"
self.input_dim = num_coordinates
#self.input_dim = 5
self.encoder = Encoder(
embedding_dim,
hidden_dim)
self.decoder = Decoder(
embedding_dim,
hidden_dim,
tanh_exploration=tanh_clipping,
use_tanh=tanh_clipping > 0,
n_glimpses=1,
mask_glimpses=mask_inner,
mask_logits=mask_logits
)
# Trainable initial hidden states
std = 1. / math.sqrt(embedding_dim)
self.decoder_in_0 = nn.Parameter(torch.FloatTensor(embedding_dim))
self.decoder_in_0.data.uniform_(-std, std)
self.embedding = nn.Parameter(torch.FloatTensor(self.input_dim, embedding_dim))
self.embedding.data.uniform_(-std, std)
self.bn1 = nn.BatchNorm1d(embedding_dim)
self.bn2 = nn.BatchNorm1d(hidden_dim)
self.bn3 = nn.BatchNorm1d(hidden_dim)
encoder_layers = TransformerEncoderLayer(hidden_dim, 1, hidden_dim, dropout=0.5)
self.transformer_encoder = TransformerEncoder(encoder_layers, 1)
def set_decode_type(self, decode_type):
self.decoder.decode_type = decode_type
def forward(self, inputs, labels, opts, eval_tours=None, return_pi=False, Measures=False, Plot_Data=False):
#def forward(self, inputs, eval_tours=None, return_pi=False, Measures=False, Plot_Data=False):
#batch_size, graph_size, input_dim = inputs.size()
batch_size, graph_size, input_dim = inputs.size()
"""
embedded_inputs = torch.mm(
#inputs.transpose(0, 1).contiguous().view(-1, input_dim),
inputs.transpose(0, 1).contiguous().view(-1, input_dim),
self.embedding
).view(graph_size, batch_size, -1)
"""
#inputs_weight_free = inputs.detach().clone()
#inputs_weight_free.index_fill_(2, move_to(torch.tensor([10]), opts.device), 0.)
embedded_inputs = self.bn1(torch.mm(
inputs.transpose(0, 1).contiguous().view(-1, input_dim),
#move_to(inputs_weight_free, opts.device).transpose(0, 1).contiguous().view(-1, input_dim),
self.embedding
).view(graph_size, batch_size, -1).transpose(0, 1).transpose(1, 2)).transpose(1, 2).transpose(0, 1)
# query the actor net for the input indices
# making up the output, and the pointer attn
_log_p, pi = self._inner(embedded_inputs, eval_tours)
#cost, mask, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, Measures, Plot_Data)
cost, mask, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, labels, Measures, Plot_Data, opts.graph_file)
#cost, mask, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, labels, Measures, Plot_Data)
# Log likelyhood is calculated within the model since returning it per action does not work well with
# DataParallel since sequences can be of different lengths
ll = self._calc_log_likelihood(_log_p, pi, mask)
if return_pi:
#return cost, ll, pi, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
return cost, ll, pi, misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost, ll, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
return cost, ll, misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
def _calc_log_likelihood(self, _log_p, a, mask):
# Get log_p corresponding to selected actions
log_p = _log_p.gather(2, a.unsqueeze(-1)).squeeze(-1)
# Optional: mask out actions irrelevant to objective so they do not get reinforced
if mask is not None:
log_p[mask] = 0
assert (log_p > -1000).data.all(), "Logprobs should not be -inf, check sampling procedure!"
# Calculate log_likelihood
return log_p.sum(1)
def _inner(self, inputs, eval_tours=None):
encoder_hx = encoder_cx = Variable(
torch.zeros(1, inputs.size(1), self.encoder.hidden_dim, out=inputs.data.new()),
requires_grad=False
)
# encoder forward pass
enc_h, (enc_h_t, enc_c_t) = self.encoder(inputs, (encoder_hx, encoder_cx))
enc_h = self.bn2(enc_h.transpose(1, 2)).transpose(1, 2)
dec_init_state = (enc_h_t[-1], enc_c_t[-1])
# repeat decoder_in_0 across batch
decoder_input = self.decoder_in_0.unsqueeze(0).repeat(inputs.size(1), 1)
enc_h = self.transformer_encoder(enc_h)
enc_h = self.bn3(enc_h.transpose(1, 2)).transpose(1, 2)
(pointer_probs, input_idxs), dec_hidden_t = self.decoder(decoder_input,
inputs,
dec_init_state,
enc_h,
eval_tours)
return pointer_probs, input_idxs
| 16,271 | 40.510204 | 198 | py |
RESPECT | RESPECT-main/nets/pointer_network_model_run.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import numpy as np
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from utils import move_to
class Encoder(nn.Module):
"""Maps a graph represented as an input sequence
to a hidden vector"""
def __init__(self, input_dim, hidden_dim):
super(Encoder, self).__init__()
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(input_dim, hidden_dim)
self.init_hx, self.init_cx = self.init_hidden(hidden_dim)
def forward(self, x, hidden):
output, hidden = self.lstm(x, hidden)
return output, hidden
def init_hidden(self, hidden_dim):
"""Trainable initial hidden state"""
std = 1. / math.sqrt(hidden_dim)
enc_init_hx = nn.Parameter(torch.FloatTensor(hidden_dim))
enc_init_hx.data.uniform_(-std, std)
enc_init_cx = nn.Parameter(torch.FloatTensor(hidden_dim))
enc_init_cx.data.uniform_(-std, std)
return enc_init_hx, enc_init_cx
class Attention(nn.Module):
"""A generic attention module for a decoder in seq2seq"""
def __init__(self, dim, use_tanh=False, C=10):
super(Attention, self).__init__()
self.use_tanh = use_tanh
self.project_query = nn.Linear(dim, dim)
self.project_ref = nn.Conv1d(dim, dim, 1, 1)
self.C = C # tanh exploration
self.tanh = nn.Tanh()
self.v = nn.Parameter(torch.FloatTensor(dim))
self.v.data.uniform_(-(1. / math.sqrt(dim)), 1. / math.sqrt(dim))
def forward(self, query, ref):
"""
Args:
query: is the hidden state of the decoder at the current
time step. batch x dim
ref: the set of hidden states from the encoder.
sourceL x batch x hidden_dim
"""
# ref is now [batch_size x hidden_dim x sourceL]
ref = ref.permute(1, 2, 0)
q = self.project_query(query).unsqueeze(2) # batch x dim x 1
e = self.project_ref(ref) # batch_size x hidden_dim x sourceL
# expand the query by sourceL
# batch x dim x sourceL
expanded_q = q.repeat(1, 1, e.size(2))
# batch x 1 x hidden_dim
v_view = self.v.unsqueeze(0).expand(
expanded_q.size(0), len(self.v)).unsqueeze(1)
# [batch_size x 1 x hidden_dim] * [batch_size x hidden_dim x sourceL]
u = torch.bmm(v_view, self.tanh(expanded_q + e)).squeeze(1)
if self.use_tanh:
logits = self.C * self.tanh(u)
else:
logits = u
return e, logits
class Decoder(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
tanh_exploration,
use_tanh,
n_glimpses=1,
mask_glimpses=True,
mask_logits=True):
super(Decoder, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.n_glimpses = n_glimpses
self.mask_glimpses = mask_glimpses
self.mask_logits = mask_logits
self.use_tanh = use_tanh
self.tanh_exploration = tanh_exploration
self.decode_type = None # Needs to be set explicitly before use
#encoder_layers = TransformerEncoderLayer(embedding_dim, 1, hidden_dim, dropout=0.5)
#self.transformer_encoder = TransformerEncoder(encoder_layers, 1)
self.lstm = nn.LSTMCell(embedding_dim, hidden_dim)
self.pointer = Attention(hidden_dim, use_tanh=use_tanh, C=tanh_exploration)
self.glimpse = Attention(hidden_dim, use_tanh=False)
self.sm = nn.Softmax(dim=1)
def update_mask(self, mask, selected):
return mask.clone().scatter_(1, selected.unsqueeze(-1), True)
def recurrence(self, x, h_in, prev_mask, prev_idxs, step, context):
logit_mask = self.update_mask(prev_mask, prev_idxs) if prev_idxs is not None else prev_mask
logits, h_out = self.calc_logits(x, h_in, logit_mask, context, self.mask_glimpses, self.mask_logits)
# Calculate log_softmax for better numerical stability
log_p = torch.log_softmax(logits, dim=1)
probs = log_p.exp()
if not self.mask_logits:
# If self.mask_logits, this would be redundant, otherwise we must mask to make sure we don't resample
# Note that as a result the vector of probs may not sum to one (this is OK for .multinomial sampling)
# But practically by not masking the logits, a model is learned over all sequences (also infeasible)
# while only during sampling feasibility is enforced (a.k.a. by setting to 0. here)
probs[logit_mask] = 0.
# For consistency we should also mask out in log_p, but the values set to 0 will not be sampled and
# Therefore not be used by the reinforce estimator
return h_out, log_p, probs, logit_mask
def calc_logits(self, x, h_in, logit_mask, context, mask_glimpses=None, mask_logits=None):
if mask_glimpses is None:
mask_glimpses = self.mask_glimpses
if mask_logits is None:
mask_logits = self.mask_logits
#x = self.transformer_encoder(x)
hy, cy = self.lstm(x, h_in)
g_l, h_out = hy, (hy, cy)
for i in range(self.n_glimpses):
ref, logits = self.glimpse(g_l, context)
# For the glimpses, only mask before softmax so we have always an L1 norm 1 readout vector
if mask_glimpses:
logits[logit_mask] = -np.inf
# [batch_size x h_dim x sourceL] * [batch_size x sourceL x 1] =
# [batch_size x h_dim x 1]
g_l = torch.bmm(ref, self.sm(logits).unsqueeze(2)).squeeze(2)
_, logits = self.pointer(g_l, context)
# Masking before softmax makes probs sum to one
if mask_logits:
logits[logit_mask] = -np.inf
return logits, h_out
def forward(self, decoder_input, embedded_inputs, hidden, context, eval_tours=None):
"""
Args:
decoder_input: The initial input to the decoder
size is [batch_size x embedding_dim]. Trainable parameter.
embedded_inputs: [sourceL x batch_size x embedding_dim]
hidden: the prev hidden state, size is [batch_size x hidden_dim].
Initially this is set to (enc_h[-1], enc_c[-1])
context: encoder outputs, [sourceL x batch_size x hidden_dim]
"""
batch_size = context.size(1)
outputs = []
selections = []
steps = range(embedded_inputs.size(0))
idxs = None
mask = Variable(
embedded_inputs.data.new().byte().new(embedded_inputs.size(1), embedded_inputs.size(0)).zero_(),
requires_grad=False
)
for i in steps:
hidden, log_p, probs, mask = self.recurrence(decoder_input, hidden, mask, idxs, i, context)
# select the next inputs for the decoder [batch_size x hidden_dim]
idxs = self.decode(
probs,
mask
) if eval_tours is None else eval_tours[:, i]
idxs = idxs.detach() # Otherwise pytorch complains it want's a reward, todo implement this more properly?
# Gather input embedding of selected
decoder_input = torch.gather(
embedded_inputs,
0,
idxs.contiguous().view(1, batch_size, 1).expand(1, batch_size, *embedded_inputs.size()[2:])
).squeeze(0)
# use outs to point to next object
outputs.append(log_p)
selections.append(idxs)
return (torch.stack(outputs, 1), torch.stack(selections, 1)), hidden
def decode(self, probs, mask):
if self.decode_type == "greedy":
_, idxs = probs.max(1)
assert not mask.gather(1, idxs.unsqueeze(-1)).data.any(), \
"Decode greedy: infeasible action has maximum probability"
elif self.decode_type == "sampling":
idxs = probs.multinomial(1).squeeze(1)
# Check if sampling went OK, can go wrong due to bug on GPU
while mask.gather(1, idxs.unsqueeze(-1)).data.any():
print(' [!] resampling due to race condition')
#idxs = probs.multinomial().squeeze(1)
idxs = probs.multinomial(1).squeeze(1)
else:
assert False, "Unknown decode type"
return idxs
class CriticNetworkLSTM(nn.Module):
"""Useful as a baseline in REINFORCE updates"""
def __init__(self,
embedding_dim,
hidden_dim,
n_process_block_iters,
tanh_exploration,
use_tanh):
super(CriticNetworkLSTM, self).__init__()
self.hidden_dim = hidden_dim
self.n_process_block_iters = n_process_block_iters
self.encoder = Encoder(embedding_dim, hidden_dim)
self.process_block = Attention(hidden_dim, use_tanh=use_tanh, C=tanh_exploration)
self.sm = nn.Softmax(dim=1)
self.decoder = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self, inputs):
"""
Args:
inputs: [embedding_dim x batch_size x sourceL] of embedded inputs
"""
inputs = inputs.transpose(0, 1).contiguous()
encoder_hx = self.encoder.init_hx.unsqueeze(0).repeat(inputs.size(1), 1).unsqueeze(0)
encoder_cx = self.encoder.init_cx.unsqueeze(0).repeat(inputs.size(1), 1).unsqueeze(0)
# encoder forward pass
enc_outputs, (enc_h_t, enc_c_t) = self.encoder(inputs, (encoder_hx, encoder_cx))
# grab the hidden state and process it via the process block
process_block_state = enc_h_t[-1]
for i in range(self.n_process_block_iters):
ref, logits = self.process_block(process_block_state, enc_outputs)
process_block_state = torch.bmm(ref, self.sm(logits).unsqueeze(2)).squeeze(2)
# produce the final scalar output
out = self.decoder(process_block_state)
return out
class PointerNetwork(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
problem,
n_encode_layers=None,
tanh_clipping=10.,
mask_inner=True,
mask_logits=True,
normalization=None,
num_coordinates=11,
**kwargs):
super(PointerNetwork, self).__init__()
self.problem = problem
assert problem.NAME == "tsp" or problem.NAME == "toposort", "Pointer Network only supported for TSP and TopoSort"
self.input_dim = num_coordinates
#self.input_dim = 5
self.encoder = Encoder(
embedding_dim,
hidden_dim)
self.decoder = Decoder(
embedding_dim,
hidden_dim,
tanh_exploration=tanh_clipping,
use_tanh=tanh_clipping > 0,
n_glimpses=1,
mask_glimpses=mask_inner,
mask_logits=mask_logits
)
# Trainable initial hidden states
std = 1. / math.sqrt(embedding_dim)
self.decoder_in_0 = nn.Parameter(torch.FloatTensor(embedding_dim))
self.decoder_in_0.data.uniform_(-std, std)
self.embedding = nn.Parameter(torch.FloatTensor(self.input_dim, embedding_dim))
self.embedding.data.uniform_(-std, std)
self.bn1 = nn.BatchNorm1d(embedding_dim)
self.bn2 = nn.BatchNorm1d(hidden_dim)
self.bn3 = nn.BatchNorm1d(hidden_dim)
encoder_layers = TransformerEncoderLayer(hidden_dim, 1, hidden_dim, dropout=0.5)
self.transformer_encoder = TransformerEncoder(encoder_layers, 1)
def set_decode_type(self, decode_type):
self.decoder.decode_type = decode_type
def forward(self, inputs, labels, opts, eval_tours=None, return_pi=False, Measures=False, Plot_Data=False):
#def forward(self, inputs, eval_tours=None, return_pi=False, Measures=False, Plot_Data=False):
#batch_size, graph_size, input_dim = inputs.size()
batch_size, graph_size, input_dim = inputs.size()
"""
embedded_inputs = torch.mm(
#inputs.transpose(0, 1).contiguous().view(-1, input_dim),
inputs.transpose(0, 1).contiguous().view(-1, input_dim),
self.embedding
).view(graph_size, batch_size, -1)
"""
#inputs_weight_free = inputs.detach().clone()
#inputs_weight_free.index_fill_(2, move_to(torch.tensor([10]), opts.device), 0.)
embedded_inputs = self.bn1(torch.mm(
inputs.transpose(0, 1).contiguous().view(-1, input_dim),
#move_to(inputs_weight_free, opts.device).transpose(0, 1).contiguous().view(-1, input_dim),
self.embedding
).view(graph_size, batch_size, -1).transpose(0, 1).transpose(1, 2)).transpose(1, 2).transpose(0, 1)
#).view(graph_size, batch_size, -1).transpose(1, 2)).transpose(1, 2)
# query the actor net for the input indices
# making up the output, and the pointer attn
_log_p, pi = self._inner(embedded_inputs, eval_tours)
#cost, mask, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, Measures, Plot_Data)
cost, mask, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, labels, Measures, Plot_Data, opts.graph_file)
#cost, mask, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, labels, Measures, Plot_Data)
# Log likelyhood is calculated within the model since returning it per action does not work well with
# DataParallel since sequences can be of different lengths
ll = self._calc_log_likelihood(_log_p, pi, mask)
if return_pi:
#return cost, ll, pi, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
return cost, ll, pi, misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost, ll, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
return cost, ll, misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
def _calc_log_likelihood(self, _log_p, a, mask):
# Get log_p corresponding to selected actions
log_p = _log_p.gather(2, a.unsqueeze(-1)).squeeze(-1)
# Optional: mask out actions irrelevant to objective so they do not get reinforced
if mask is not None:
log_p[mask] = 0
assert (log_p > -1000).data.all(), "Logprobs should not be -inf, check sampling procedure!"
# Calculate log_likelihood
return log_p.sum(1)
def _inner(self, inputs, eval_tours=None):
encoder_hx = encoder_cx = Variable(
torch.zeros(1, inputs.size(1), self.encoder.hidden_dim, out=inputs.data.new()),
requires_grad=False
)
# encoder forward pass
enc_h, (enc_h_t, enc_c_t) = self.encoder(inputs, (encoder_hx, encoder_cx))
enc_h = self.bn2(enc_h.transpose(1, 2)).transpose(1, 2)
dec_init_state = (enc_h_t[-1], enc_c_t[-1])
# repeat decoder_in_0 across batch
decoder_input = self.decoder_in_0.unsqueeze(0).repeat(inputs.size(1), 1)
enc_h = self.transformer_encoder(enc_h)
enc_h = self.bn3(enc_h.transpose(1, 2)).transpose(1, 2)
(pointer_probs, input_idxs), dec_hidden_t = self.decoder(decoder_input,
inputs,
dec_init_state,
enc_h,
eval_tours)
return pointer_probs, input_idxs
| 16,348 | 40.600509 | 198 | py |
RESPECT | RESPECT-main/nets/attention_model.py | import torch
from torch import nn
from torch.utils.checkpoint import checkpoint
import math
from typing import NamedTuple
from utils.tensor_functions import compute_in_batches
from nets.graph_encoder import GraphAttentionEncoder
from torch.nn import DataParallel
from utils.beam_search import CachedLookup
from utils.functions import sample_many
def set_decode_type(model, decode_type):
if isinstance(model, DataParallel):
model = model.module
model.set_decode_type(decode_type)
bypass = super
class AttentionModelFixed(NamedTuple):
"""
Context for AttentionModel decoder that is fixed during decoding so can be precomputed/cached
This class allows for efficient indexing of multiple Tensors at once
"""
node_embeddings: torch.Tensor
context_node_projected: torch.Tensor
glimpse_key: torch.Tensor
glimpse_val: torch.Tensor
logit_key: torch.Tensor
def __getitem__(self, key):
if torch.is_tensor(key) or isinstance(key, slice):
return AttentionModelFixed(
node_embeddings=self.node_embeddings[key],
context_node_projected=self.context_node_projected[key],
glimpse_key=self.glimpse_key[:, key], # dim 0 are the heads
glimpse_val=self.glimpse_val[:, key], # dim 0 are the heads
logit_key=self.logit_key[key]
)
#return super(AttentionModelFixed, self).__getitem__(key)
return bypass(AttentionModelFixed, self).__getitem__(key)
class AttentionModel(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
problem,
n_encode_layers=2,
tanh_clipping=10.,
mask_inner=True,
mask_logits=True,
normalization='batch',
n_heads=8,
checkpoint_encoder=False,
shrink_size=None):
super(AttentionModel, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.n_encode_layers = n_encode_layers
self.decode_type = None
self.temp = 1.0
self.allow_partial = problem.NAME == 'sdvrp'
self.is_vrp = problem.NAME == 'cvrp' or problem.NAME == 'sdvrp'
self.is_orienteering = problem.NAME == 'op'
self.is_pctsp = problem.NAME == 'pctsp'
self.tanh_clipping = tanh_clipping
self.mask_inner = mask_inner
self.mask_logits = mask_logits
self.problem = problem
self.n_heads = n_heads
self.checkpoint_encoder = checkpoint_encoder
self.shrink_size = shrink_size
# Problem specific context parameters (placeholder and step context dimension)
if self.is_vrp or self.is_orienteering or self.is_pctsp:
# Embedding of last node + remaining_capacity / remaining length / remaining prize to collect
step_context_dim = embedding_dim + 1
if self.is_pctsp:
node_dim = 4 # x, y, expected_prize, penalty
else:
node_dim = 3 # x, y, demand / prize
# Special embedding projection for depot node
self.init_embed_depot = nn.Linear(2, embedding_dim)
if self.is_vrp and self.allow_partial: # Need to include the demand if split delivery allowed
self.project_node_step = nn.Linear(1, 3 * embedding_dim, bias=False)
else: # TSP
assert problem.NAME == "tsp", "Unsupported problem: {}".format(problem.NAME)
step_context_dim = 2 * embedding_dim # Embedding of first and last node
node_dim = 2 # x, y
# Learned input symbols for first action
self.W_placeholder = nn.Parameter(torch.Tensor(2 * embedding_dim))
self.W_placeholder.data.uniform_(-1, 1) # Placeholder should be in range of activations
self.init_embed = nn.Linear(node_dim, embedding_dim)
self.embedder = GraphAttentionEncoder(
n_heads=n_heads,
embed_dim=embedding_dim,
n_layers=self.n_encode_layers,
normalization=normalization
)
# For each node we compute (glimpse key, glimpse value, logit key) so 3 * embedding_dim
self.project_node_embeddings = nn.Linear(embedding_dim, 3 * embedding_dim, bias=False)
self.project_fixed_context = nn.Linear(embedding_dim, embedding_dim, bias=False)
self.project_step_context = nn.Linear(step_context_dim, embedding_dim, bias=False)
assert embedding_dim % n_heads == 0
# Note n_heads * val_dim == embedding_dim so input to project_out is embedding_dim
self.project_out = nn.Linear(embedding_dim, embedding_dim, bias=False)
def set_decode_type(self, decode_type, temp=None):
self.decode_type = decode_type
if temp is not None: # Do not change temperature if not provided
self.temp = temp
def forward(self, input, return_pi=False):
"""
:param input: (batch_size, graph_size, node_dim) input node features or dictionary with multiple tensors
:param return_pi: whether to return the output sequences, this is optional as it is not compatible with
using DataParallel as the results may be of different lengths on different GPUs
:return:
"""
if self.checkpoint_encoder and self.training: # Only checkpoint if we need gradients
embeddings, _ = checkpoint(self.embedder, self._init_embed(input))
else:
embeddings, _ = self.embedder(self._init_embed(input))
_log_p, pi = self._inner(input, embeddings)
cost, mask = self.problem.get_costs(input, pi)
# Log likelyhood is calculated within the model since returning it per action does not work well with
# DataParallel since sequences can be of different lengths
ll = self._calc_log_likelihood(_log_p, pi, mask)
if return_pi:
return cost, ll, pi
return cost, ll
def beam_search(self, *args, **kwargs):
return self.problem.beam_search(*args, **kwargs, model=self)
def precompute_fixed(self, input):
embeddings, _ = self.embedder(self._init_embed(input))
# Use a CachedLookup such that if we repeatedly index this object with the same index we only need to do
# the lookup once... this is the case if all elements in the batch have maximum batch size
return CachedLookup(self._precompute(embeddings))
def propose_expansions(self, beam, fixed, expand_size=None, normalize=False, max_calc_batch_size=4096):
# First dim = batch_size * cur_beam_size
log_p_topk, ind_topk = compute_in_batches(
lambda b: self._get_log_p_topk(fixed[b.ids], b.state, k=expand_size, normalize=normalize),
max_calc_batch_size, beam, n=beam.size()
)
assert log_p_topk.size(1) == 1, "Can only have single step"
# This will broadcast, calculate log_p (score) of expansions
score_expand = beam.score[:, None] + log_p_topk[:, 0, :]
# We flatten the action as we need to filter and this cannot be done in 2d
flat_action = ind_topk.view(-1)
flat_score = score_expand.view(-1)
flat_feas = flat_score > -1e10 # != -math.inf triggers
# Parent is row idx of ind_topk, can be found by enumerating elements and dividing by number of columns
flat_parent = torch.arange(flat_action.size(-1), out=flat_action.new()) / ind_topk.size(-1)
# Filter infeasible
feas_ind_2d = torch.nonzero(flat_feas)
if len(feas_ind_2d) == 0:
# Too bad, no feasible expansions at all :(
return None, None, None
feas_ind = feas_ind_2d[:, 0]
return flat_parent[feas_ind], flat_action[feas_ind], flat_score[feas_ind]
def _calc_log_likelihood(self, _log_p, a, mask):
# Get log_p corresponding to selected actions
log_p = _log_p.gather(2, a.unsqueeze(-1)).squeeze(-1)
# Optional: mask out actions irrelevant to objective so they do not get reinforced
if mask is not None:
log_p[mask] = 0
assert (log_p > -1000).data.all(), "Logprobs should not be -inf, check sampling procedure!"
# Calculate log_likelihood
return log_p.sum(1)
def _init_embed(self, input):
if self.is_vrp or self.is_orienteering or self.is_pctsp:
if self.is_vrp:
features = ('demand', )
elif self.is_orienteering:
features = ('prize', )
else:
assert self.is_pctsp
features = ('deterministic_prize', 'penalty')
return torch.cat(
(
self.init_embed_depot(input['depot'])[:, None, :],
self.init_embed(torch.cat((
input['loc'],
*(input[feat][:, :, None] for feat in features)
), -1))
),
1
)
# TSP
return self.init_embed(input)
def _inner(self, input, embeddings):
outputs = []
sequences = []
state = self.problem.make_state(input)
# Compute keys, values for the glimpse and keys for the logits once as they can be reused in every step
fixed = self._precompute(embeddings)
batch_size = state.ids.size(0)
# Perform decoding steps
i = 0
while not (self.shrink_size is None and state.all_finished()):
if self.shrink_size is not None:
unfinished = torch.nonzero(state.get_finished() == 0)
if len(unfinished) == 0:
break
unfinished = unfinished[:, 0]
# Check if we can shrink by at least shrink_size and if this leaves at least 16
# (otherwise batch norm will not work well and it is inefficient anyway)
if 16 <= len(unfinished) <= state.ids.size(0) - self.shrink_size:
# Filter states
state = state[unfinished]
fixed = fixed[unfinished]
log_p, mask = self._get_log_p(fixed, state)
# Select the indices of the next nodes in the sequences, result (batch_size) long
selected = self._select_node(log_p.exp()[:, 0, :], mask[:, 0, :]) # Squeeze out steps dimension
state = state.update(selected)
# Now make log_p, selected desired output size by 'unshrinking'
if self.shrink_size is not None and state.ids.size(0) < batch_size:
log_p_, selected_ = log_p, selected
log_p = log_p_.new_zeros(batch_size, *log_p_.size()[1:])
selected = selected_.new_zeros(batch_size)
log_p[state.ids[:, 0]] = log_p_
selected[state.ids[:, 0]] = selected_
# Collect output of step
outputs.append(log_p[:, 0, :])
sequences.append(selected)
i += 1
# Collected lists, return Tensor
return torch.stack(outputs, 1), torch.stack(sequences, 1)
def sample_many(self, input, batch_rep=1, iter_rep=1):
"""
:param input: (batch_size, graph_size, node_dim) input node features
:return:
"""
# Bit ugly but we need to pass the embeddings as well.
# Making a tuple will not work with the problem.get_cost function
return sample_many(
lambda input: self._inner(*input), # Need to unpack tuple into arguments
lambda input, pi: self.problem.get_costs(input[0], pi), # Don't need embeddings as input to get_costs
(input, self.embedder(self._init_embed(input))[0]), # Pack input with embeddings (additional input)
batch_rep, iter_rep
)
def _select_node(self, probs, mask):
assert (probs == probs).all(), "Probs should not contain any nans"
if self.decode_type == "greedy":
_, selected = probs.max(1)
assert not mask.gather(1, selected.unsqueeze(
-1)).data.any(), "Decode greedy: infeasible action has maximum probability"
elif self.decode_type == "sampling":
selected = probs.multinomial(1).squeeze(1)
# Check if sampling went OK, can go wrong due to bug on GPU
# See https://discuss.pytorch.org/t/bad-behavior-of-multinomial-function/10232
while mask.gather(1, selected.unsqueeze(-1)).data.any():
print('Sampled bad values, resampling!')
selected = probs.multinomial(1).squeeze(1)
else:
assert False, "Unknown decode type"
return selected
def _precompute(self, embeddings, num_steps=1):
# The fixed context projection of the graph embedding is calculated only once for efficiency
graph_embed = embeddings.mean(1)
# fixed context = (batch_size, 1, embed_dim) to make broadcastable with parallel timesteps
fixed_context = self.project_fixed_context(graph_embed)[:, None, :]
# The projection of the node embeddings for the attention is calculated once up front
glimpse_key_fixed, glimpse_val_fixed, logit_key_fixed = \
self.project_node_embeddings(embeddings[:, None, :, :]).chunk(3, dim=-1)
# No need to rearrange key for logit as there is a single head
fixed_attention_node_data = (
self._make_heads(glimpse_key_fixed, num_steps),
self._make_heads(glimpse_val_fixed, num_steps),
logit_key_fixed.contiguous()
)
return AttentionModelFixed(embeddings, fixed_context, *fixed_attention_node_data)
def _get_log_p_topk(self, fixed, state, k=None, normalize=True):
log_p, _ = self._get_log_p(fixed, state, normalize=normalize)
# Return topk
if k is not None and k < log_p.size(-1):
return log_p.topk(k, -1)
# Return all, note different from torch.topk this does not give error if less than k elements along dim
return (
log_p,
torch.arange(log_p.size(-1), device=log_p.device, dtype=torch.int64).repeat(log_p.size(0), 1)[:, None, :]
)
def _get_log_p(self, fixed, state, normalize=True):
# Compute query = context node embedding
query = fixed.context_node_projected + \
self.project_step_context(self._get_parallel_step_context(fixed.node_embeddings, state))
# Compute keys and values for the nodes
glimpse_K, glimpse_V, logit_K = self._get_attention_node_data(fixed, state)
# Compute the mask
mask = state.get_mask()
# Compute logits (unnormalized log_p)
log_p, glimpse = self._one_to_many_logits(query, glimpse_K, glimpse_V, logit_K, mask)
if normalize:
log_p = torch.log_softmax(log_p / self.temp, dim=-1)
assert not torch.isnan(log_p).any()
return log_p, mask
def _get_parallel_step_context(self, embeddings, state, from_depot=False):
"""
Returns the context per step, optionally for multiple steps at once (for efficient evaluation of the model)
:param embeddings: (batch_size, graph_size, embed_dim)
:param prev_a: (batch_size, num_steps)
:param first_a: Only used when num_steps = 1, action of first step or None if first step
:return: (batch_size, num_steps, context_dim)
"""
current_node = state.get_current_node()
batch_size, num_steps = current_node.size()
if self.is_vrp:
# Embedding of previous node + remaining capacity
if from_depot:
# 1st dimension is node idx, but we do not squeeze it since we want to insert step dimension
# i.e. we actually want embeddings[:, 0, :][:, None, :] which is equivalent
return torch.cat(
(
embeddings[:, 0:1, :].expand(batch_size, num_steps, embeddings.size(-1)),
# used capacity is 0 after visiting depot
self.problem.VEHICLE_CAPACITY - torch.zeros_like(state.used_capacity[:, :, None])
),
-1
)
else:
return torch.cat(
(
torch.gather(
embeddings,
1,
current_node.contiguous()
.view(batch_size, num_steps, 1)
.expand(batch_size, num_steps, embeddings.size(-1))
).view(batch_size, num_steps, embeddings.size(-1)),
self.problem.VEHICLE_CAPACITY - state.used_capacity[:, :, None]
),
-1
)
elif self.is_orienteering or self.is_pctsp:
return torch.cat(
(
torch.gather(
embeddings,
1,
current_node.contiguous()
.view(batch_size, num_steps, 1)
.expand(batch_size, num_steps, embeddings.size(-1))
).view(batch_size, num_steps, embeddings.size(-1)),
(
state.get_remaining_length()[:, :, None]
if self.is_orienteering
else state.get_remaining_prize_to_collect()[:, :, None]
)
),
-1
)
else: # TSP
if num_steps == 1: # We need to special case if we have only 1 step, may be the first or not
if state.i.item() == 0:
# First and only step, ignore prev_a (this is a placeholder)
return self.W_placeholder[None, None, :].expand(batch_size, 1, self.W_placeholder.size(-1))
else:
return embeddings.gather(
1,
torch.cat((state.first_a, current_node), 1)[:, :, None].expand(batch_size, 2, embeddings.size(-1))
).view(batch_size, 1, -1)
# More than one step, assume always starting with first
embeddings_per_step = embeddings.gather(
1,
current_node[:, 1:, None].expand(batch_size, num_steps - 1, embeddings.size(-1))
)
return torch.cat((
# First step placeholder, cat in dim 1 (time steps)
self.W_placeholder[None, None, :].expand(batch_size, 1, self.W_placeholder.size(-1)),
# Second step, concatenate embedding of first with embedding of current/previous (in dim 2, context dim)
torch.cat((
embeddings_per_step[:, 0:1, :].expand(batch_size, num_steps - 1, embeddings.size(-1)),
embeddings_per_step
), 2)
), 1)
def _one_to_many_logits(self, query, glimpse_K, glimpse_V, logit_K, mask):
batch_size, num_steps, embed_dim = query.size()
key_size = val_size = embed_dim // self.n_heads
# Compute the glimpse, rearrange dimensions so the dimensions are (n_heads, batch_size, num_steps, 1, key_size)
glimpse_Q = query.view(batch_size, num_steps, self.n_heads, 1, key_size).permute(2, 0, 1, 3, 4)
# Batch matrix multiplication to compute compatibilities (n_heads, batch_size, num_steps, graph_size)
compatibility = torch.matmul(glimpse_Q, glimpse_K.transpose(-2, -1)) / math.sqrt(glimpse_Q.size(-1))
if self.mask_inner:
assert self.mask_logits, "Cannot mask inner without masking logits"
compatibility[mask[None, :, :, None, :].expand_as(compatibility)] = -math.inf
# Batch matrix multiplication to compute heads (n_heads, batch_size, num_steps, val_size)
heads = torch.matmul(torch.softmax(compatibility, dim=-1), glimpse_V)
# Project to get glimpse/updated context node embedding (batch_size, num_steps, embedding_dim)
glimpse = self.project_out(
heads.permute(1, 2, 3, 0, 4).contiguous().view(-1, num_steps, 1, self.n_heads * val_size))
# Now projecting the glimpse is not needed since this can be absorbed into project_out
# final_Q = self.project_glimpse(glimpse)
final_Q = glimpse
# Batch matrix multiplication to compute logits (batch_size, num_steps, graph_size)
# logits = 'compatibility'
logits = torch.matmul(final_Q, logit_K.transpose(-2, -1)).squeeze(-2) / math.sqrt(final_Q.size(-1))
# From the logits compute the probabilities by clipping, masking and softmax
if self.tanh_clipping > 0:
logits = torch.tanh(logits) * self.tanh_clipping
if self.mask_logits:
logits[mask] = -math.inf
return logits, glimpse.squeeze(-2)
def _get_attention_node_data(self, fixed, state):
if self.is_vrp and self.allow_partial:
# Need to provide information of how much each node has already been served
# Clone demands as they are needed by the backprop whereas they are updated later
glimpse_key_step, glimpse_val_step, logit_key_step = \
self.project_node_step(state.demands_with_depot[:, :, :, None].clone()).chunk(3, dim=-1)
# Projection of concatenation is equivalent to addition of projections but this is more efficient
return (
fixed.glimpse_key + self._make_heads(glimpse_key_step),
fixed.glimpse_val + self._make_heads(glimpse_val_step),
fixed.logit_key + logit_key_step,
)
# TSP or VRP without split delivery
return fixed.glimpse_key, fixed.glimpse_val, fixed.logit_key
def _make_heads(self, v, num_steps=None):
assert num_steps is None or v.size(1) == 1 or v.size(1) == num_steps
return (
v.contiguous().view(v.size(0), v.size(1), v.size(2), self.n_heads, -1)
.expand(v.size(0), v.size(1) if num_steps is None else num_steps, v.size(2), self.n_heads, -1)
.permute(3, 0, 1, 2, 4) # (n_heads, batch_size, num_steps, graph_size, head_dim)
)
| 22,485 | 42.49323 | 122 | py |
RESPECT | RESPECT-main/nets/graph_encoder.py | import torch
import numpy as np
from torch import nn
import math
class SkipConnection(nn.Module):
def __init__(self, module):
super(SkipConnection, self).__init__()
self.module = module
def forward(self, input):
return input + self.module(input)
class MultiHeadAttention(nn.Module):
def __init__(
self,
n_heads,
input_dim,
embed_dim=None,
val_dim=None,
key_dim=None
):
super(MultiHeadAttention, self).__init__()
if val_dim is None:
assert embed_dim is not None, "Provide either embed_dim or val_dim"
val_dim = embed_dim // n_heads
if key_dim is None:
key_dim = val_dim
self.n_heads = n_heads
self.input_dim = input_dim
self.embed_dim = embed_dim
self.val_dim = val_dim
self.key_dim = key_dim
self.norm_factor = 1 / math.sqrt(key_dim) # See Attention is all you need
self.W_query = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))
self.W_key = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim))
self.W_val = nn.Parameter(torch.Tensor(n_heads, input_dim, val_dim))
if embed_dim is not None:
self.W_out = nn.Parameter(torch.Tensor(n_heads, key_dim, embed_dim))
self.init_parameters()
def init_parameters(self):
for param in self.parameters():
stdv = 1. / math.sqrt(param.size(-1))
param.data.uniform_(-stdv, stdv)
def forward(self, q, h=None, mask=None):
"""
:param q: queries (batch_size, n_query, input_dim)
:param h: data (batch_size, graph_size, input_dim)
:param mask: mask (batch_size, n_query, graph_size) or viewable as that (i.e. can be 2 dim if n_query == 1)
Mask should contain 1 if attention is not possible (i.e. mask is negative adjacency)
:return:
"""
if h is None:
h = q # compute self-attention
# h should be (batch_size, graph_size, input_dim)
batch_size, graph_size, input_dim = h.size()
n_query = q.size(1)
assert q.size(0) == batch_size
assert q.size(2) == input_dim
assert input_dim == self.input_dim, "Wrong embedding dimension of input"
hflat = h.contiguous().view(-1, input_dim)
qflat = q.contiguous().view(-1, input_dim)
# last dimension can be different for keys and values
shp = (self.n_heads, batch_size, graph_size, -1)
shp_q = (self.n_heads, batch_size, n_query, -1)
# Calculate queries, (n_heads, n_query, graph_size, key/val_size)
Q = torch.matmul(qflat, self.W_query).view(shp_q)
# Calculate keys and values (n_heads, batch_size, graph_size, key/val_size)
K = torch.matmul(hflat, self.W_key).view(shp)
V = torch.matmul(hflat, self.W_val).view(shp)
# Calculate compatibility (n_heads, batch_size, n_query, graph_size)
compatibility = self.norm_factor * torch.matmul(Q, K.transpose(2, 3))
# Optionally apply mask to prevent attention
if mask is not None:
mask = mask.view(1, batch_size, n_query, graph_size).expand_as(compatibility)
compatibility[mask] = -np.inf
attn = torch.softmax(compatibility, dim=-1)
# If there are nodes with no neighbours then softmax returns nan so we fix them to 0
if mask is not None:
attnc = attn.clone()
attnc[mask] = 0
attn = attnc
heads = torch.matmul(attn, V)
out = torch.mm(
heads.permute(1, 2, 0, 3).contiguous().view(-1, self.n_heads * self.val_dim),
self.W_out.view(-1, self.embed_dim)
).view(batch_size, n_query, self.embed_dim)
return out
class Normalization(nn.Module):
def __init__(self, embed_dim, normalization='batch'):
super(Normalization, self).__init__()
normalizer_class = {
'batch': nn.BatchNorm1d,
'instance': nn.InstanceNorm1d
}.get(normalization, None)
self.normalizer = normalizer_class(embed_dim, affine=True)
# Normalization by default initializes affine parameters with bias 0 and weight unif(0,1) which is too large!
# self.init_parameters()
def init_parameters(self):
for name, param in self.named_parameters():
stdv = 1. / math.sqrt(param.size(-1))
param.data.uniform_(-stdv, stdv)
def forward(self, input):
if isinstance(self.normalizer, nn.BatchNorm1d):
return self.normalizer(input.view(-1, input.size(-1))).view(*input.size())
elif isinstance(self.normalizer, nn.InstanceNorm1d):
return self.normalizer(input.permute(0, 2, 1)).permute(0, 2, 1)
else:
assert self.normalizer is None, "Unknown normalizer type"
return input
class MultiHeadAttentionLayer(nn.Sequential):
def __init__(
self,
n_heads,
embed_dim,
feed_forward_hidden=512,
normalization='batch',
):
super(MultiHeadAttentionLayer, self).__init__(
SkipConnection(
MultiHeadAttention(
n_heads,
input_dim=embed_dim,
embed_dim=embed_dim
)
),
Normalization(embed_dim, normalization),
SkipConnection(
nn.Sequential(
nn.Linear(embed_dim, feed_forward_hidden),
nn.ReLU(),
nn.Linear(feed_forward_hidden, embed_dim)
) if feed_forward_hidden > 0 else nn.Linear(embed_dim, embed_dim)
),
Normalization(embed_dim, normalization)
)
class GraphAttentionEncoder(nn.Module):
def __init__(
self,
n_heads,
embed_dim,
n_layers,
node_dim=None,
normalization='batch',
feed_forward_hidden=512
):
super(GraphAttentionEncoder, self).__init__()
# To map input to embedding space
self.init_embed = nn.Linear(node_dim, embed_dim) if node_dim is not None else None
self.layers = nn.Sequential(*(
MultiHeadAttentionLayer(n_heads, embed_dim, feed_forward_hidden, normalization)
for _ in range(n_layers)
))
def forward(self, x, mask=None):
assert mask is None, "TODO mask not yet supported!"
# Batch multiply to get initial embeddings of nodes
h = self.init_embed(x.view(-1, x.size(-1))).view(*x.size()[:2], -1) if self.init_embed is not None else x
h = self.layers(h)
return (
h, # (batch_size, graph_size, embed_dim)
h.mean(dim=1), # average to get embedding of graph, (batch_size, embed_dim)
)
| 6,927 | 32.148325 | 117 | py |
RESPECT | RESPECT-main/nets/critic_network.py | from torch import nn
from nets.graph_encoder import GraphAttentionEncoder
class CriticNetwork(nn.Module):
def __init__(
self,
input_dim,
embedding_dim,
hidden_dim,
n_layers,
encoder_normalization
):
super(CriticNetwork, self).__init__()
self.hidden_dim = hidden_dim
self.encoder = GraphAttentionEncoder(
node_dim=input_dim,
n_heads=8,
embed_dim=embedding_dim,
n_layers=n_layers,
normalization=encoder_normalization
)
self.value_head = nn.Sequential(
nn.Linear(embedding_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self, inputs):
"""
:param inputs: (batch_size, graph_size, input_dim)
:return:
"""
_, graph_embeddings = self.encoder(inputs)
return self.value_head(graph_embeddings)
| 965 | 22.560976 | 58 | py |
RESPECT | RESPECT-main/nets/pointer_network_dataset_pick3.py | import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import numpy as np
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from utils import move_to
class Encoder(nn.Module):
"""Maps a graph represented as an input sequence
to a hidden vector"""
def __init__(self, input_dim, hidden_dim):
super(Encoder, self).__init__()
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(input_dim, hidden_dim)
self.init_hx, self.init_cx = self.init_hidden(hidden_dim)
def forward(self, x, hidden):
output, hidden = self.lstm(x, hidden)
return output, hidden
def init_hidden(self, hidden_dim):
"""Trainable initial hidden state"""
std = 1. / math.sqrt(hidden_dim)
enc_init_hx = nn.Parameter(torch.FloatTensor(hidden_dim))
enc_init_hx.data.uniform_(-std, std)
enc_init_cx = nn.Parameter(torch.FloatTensor(hidden_dim))
enc_init_cx.data.uniform_(-std, std)
return enc_init_hx, enc_init_cx
class Attention(nn.Module):
"""A generic attention module for a decoder in seq2seq"""
def __init__(self, dim, use_tanh=False, C=10):
super(Attention, self).__init__()
self.use_tanh = use_tanh
self.project_query = nn.Linear(dim, dim)
self.project_ref = nn.Conv1d(dim, dim, 1, 1)
self.C = C # tanh exploration
self.tanh = nn.Tanh()
self.v = nn.Parameter(torch.FloatTensor(dim))
self.v.data.uniform_(-(1. / math.sqrt(dim)), 1. / math.sqrt(dim))
def forward(self, query, ref):
"""
Args:
query: is the hidden state of the decoder at the current
time step. batch x dim
ref: the set of hidden states from the encoder.
sourceL x batch x hidden_dim
"""
# ref is now [batch_size x hidden_dim x sourceL]
ref = ref.permute(1, 2, 0)
q = self.project_query(query).unsqueeze(2) # batch x dim x 1
e = self.project_ref(ref) # batch_size x hidden_dim x sourceL
# expand the query by sourceL
# batch x dim x sourceL
expanded_q = q.repeat(1, 1, e.size(2))
# batch x 1 x hidden_dim
v_view = self.v.unsqueeze(0).expand(
expanded_q.size(0), len(self.v)).unsqueeze(1)
# [batch_size x 1 x hidden_dim] * [batch_size x hidden_dim x sourceL]
u = torch.bmm(v_view, self.tanh(expanded_q + e)).squeeze(1)
if self.use_tanh:
logits = self.C * self.tanh(u)
else:
logits = u
return e, logits
class Decoder(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
tanh_exploration,
use_tanh,
n_glimpses=1,
mask_glimpses=True,
mask_logits=True):
super(Decoder, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.n_glimpses = n_glimpses
self.mask_glimpses = mask_glimpses
self.mask_logits = mask_logits
self.use_tanh = use_tanh
self.tanh_exploration = tanh_exploration
self.decode_type = None # Needs to be set explicitly before use
#encoder_layers = TransformerEncoderLayer(embedding_dim, 1, hidden_dim, dropout=0.5)
#self.transformer_encoder = TransformerEncoder(encoder_layers, 1)
self.lstm = nn.LSTMCell(embedding_dim, hidden_dim)
self.pointer = Attention(hidden_dim, use_tanh=use_tanh, C=tanh_exploration)
self.glimpse = Attention(hidden_dim, use_tanh=False)
self.sm = nn.Softmax(dim=1)
def update_mask(self, mask, selected):
return mask.clone().scatter_(1, selected.unsqueeze(-1), True)
def recurrence(self, x, h_in, prev_mask, prev_idxs, step, context):
logit_mask = self.update_mask(prev_mask, prev_idxs) if prev_idxs is not None else prev_mask
logits, h_out = self.calc_logits(x, h_in, logit_mask, context, self.mask_glimpses, self.mask_logits)
# Calculate log_softmax for better numerical stability
log_p = torch.log_softmax(logits, dim=1)
probs = log_p.exp()
if not self.mask_logits:
# If self.mask_logits, this would be redundant, otherwise we must mask to make sure we don't resample
# Note that as a result the vector of probs may not sum to one (this is OK for .multinomial sampling)
# But practically by not masking the logits, a model is learned over all sequences (also infeasible)
# while only during sampling feasibility is enforced (a.k.a. by setting to 0. here)
probs[logit_mask] = 0.
# For consistency we should also mask out in log_p, but the values set to 0 will not be sampled and
# Therefore not be used by the reinforce estimator
return h_out, log_p, probs, logit_mask
def calc_logits(self, x, h_in, logit_mask, context, mask_glimpses=None, mask_logits=None):
if mask_glimpses is None:
mask_glimpses = self.mask_glimpses
if mask_logits is None:
mask_logits = self.mask_logits
#x = self.transformer_encoder(x)
hy, cy = self.lstm(x, h_in)
g_l, h_out = hy, (hy, cy)
for i in range(self.n_glimpses):
ref, logits = self.glimpse(g_l, context)
# For the glimpses, only mask before softmax so we have always an L1 norm 1 readout vector
if mask_glimpses:
logits[logit_mask] = -np.inf
# [batch_size x h_dim x sourceL] * [batch_size x sourceL x 1] =
# [batch_size x h_dim x 1]
g_l = torch.bmm(ref, self.sm(logits).unsqueeze(2)).squeeze(2)
_, logits = self.pointer(g_l, context)
# Masking before softmax makes probs sum to one
if mask_logits:
logits[logit_mask] = -np.inf
return logits, h_out
def forward(self, decoder_input, embedded_inputs, hidden, context, eval_tours=None):
"""
Args:
decoder_input: The initial input to the decoder
size is [batch_size x embedding_dim]. Trainable parameter.
embedded_inputs: [sourceL x batch_size x embedding_dim]
hidden: the prev hidden state, size is [batch_size x hidden_dim].
Initially this is set to (enc_h[-1], enc_c[-1])
context: encoder outputs, [sourceL x batch_size x hidden_dim]
"""
batch_size = context.size(1)
outputs = []
selections = []
steps = range(embedded_inputs.size(0))
idxs = None
mask = Variable(
embedded_inputs.data.new().byte().new(embedded_inputs.size(1), embedded_inputs.size(0)).zero_(),
requires_grad=False
)
for i in steps:
hidden, log_p, probs, mask = self.recurrence(decoder_input, hidden, mask, idxs, i, context)
# select the next inputs for the decoder [batch_size x hidden_dim]
idxs = self.decode(
probs,
mask
) if eval_tours is None else eval_tours[:, i]
idxs = idxs.detach() # Otherwise pytorch complains it want's a reward, todo implement this more properly?
# Gather input embedding of selected
decoder_input = torch.gather(
embedded_inputs,
0,
idxs.contiguous().view(1, batch_size, 1).expand(1, batch_size, *embedded_inputs.size()[2:])
).squeeze(0)
# use outs to point to next object
outputs.append(log_p)
selections.append(idxs)
return (torch.stack(outputs, 1), torch.stack(selections, 1)), hidden
def decode(self, probs, mask):
if self.decode_type == "greedy":
_, idxs = probs.max(1)
assert not mask.gather(1, idxs.unsqueeze(-1)).data.any(), \
"Decode greedy: infeasible action has maximum probability"
elif self.decode_type == "sampling":
idxs = probs.multinomial(1).squeeze(1)
# Check if sampling went OK, can go wrong due to bug on GPU
while mask.gather(1, idxs.unsqueeze(-1)).data.any():
print(' [!] resampling due to race condition')
#idxs = probs.multinomial().squeeze(1)
idxs = probs.multinomial(1).squeeze(1)
else:
assert False, "Unknown decode type"
return idxs
class CriticNetworkLSTM(nn.Module):
"""Useful as a baseline in REINFORCE updates"""
def __init__(self,
embedding_dim,
hidden_dim,
n_process_block_iters,
tanh_exploration,
use_tanh):
super(CriticNetworkLSTM, self).__init__()
self.hidden_dim = hidden_dim
self.n_process_block_iters = n_process_block_iters
self.encoder = Encoder(embedding_dim, hidden_dim)
self.process_block = Attention(hidden_dim, use_tanh=use_tanh, C=tanh_exploration)
self.sm = nn.Softmax(dim=1)
self.decoder = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self, inputs):
"""
Args:
inputs: [embedding_dim x batch_size x sourceL] of embedded inputs
"""
inputs = inputs.transpose(0, 1).contiguous()
encoder_hx = self.encoder.init_hx.unsqueeze(0).repeat(inputs.size(1), 1).unsqueeze(0)
encoder_cx = self.encoder.init_cx.unsqueeze(0).repeat(inputs.size(1), 1).unsqueeze(0)
# encoder forward pass
enc_outputs, (enc_h_t, enc_c_t) = self.encoder(inputs, (encoder_hx, encoder_cx))
# grab the hidden state and process it via the process block
process_block_state = enc_h_t[-1]
for i in range(self.n_process_block_iters):
ref, logits = self.process_block(process_block_state, enc_outputs)
process_block_state = torch.bmm(ref, self.sm(logits).unsqueeze(2)).squeeze(2)
# produce the final scalar output
out = self.decoder(process_block_state)
return out
class PointerNetwork(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
problem,
n_encode_layers=None,
tanh_clipping=10.,
mask_inner=True,
mask_logits=True,
normalization=None,
num_coordinates=11,
**kwargs):
super(PointerNetwork, self).__init__()
self.problem = problem
assert problem.NAME == "tsp" or problem.NAME == "toposort", "Pointer Network only supported for TSP and TopoSort"
self.input_dim = num_coordinates
#self.input_dim = 5
self.encoder = Encoder(
embedding_dim,
hidden_dim)
self.decoder = Decoder(
embedding_dim,
hidden_dim,
tanh_exploration=tanh_clipping,
use_tanh=tanh_clipping > 0,
n_glimpses=1,
mask_glimpses=mask_inner,
mask_logits=mask_logits
)
# Trainable initial hidden states
std = 1. / math.sqrt(embedding_dim)
self.decoder_in_0 = nn.Parameter(torch.FloatTensor(embedding_dim))
self.decoder_in_0.data.uniform_(-std, std)
self.embedding = nn.Parameter(torch.FloatTensor(self.input_dim, embedding_dim))
self.embedding.data.uniform_(-std, std)
self.bn1 = nn.BatchNorm1d(embedding_dim)
self.bn2 = nn.BatchNorm1d(hidden_dim)
"""
self.bn3 = nn.BatchNorm1d(hidden_dim)
encoder_layers = TransformerEncoderLayer(hidden_dim, 1, hidden_dim, dropout=0.5)
self.transformer_encoder = TransformerEncoder(encoder_layers, 1)
"""
def set_decode_type(self, decode_type):
self.decoder.decode_type = decode_type
def forward(self, inputs_11, labels, opts, eval_tours=None, return_pi=False, Measures=False, Plot_Data=False):
#def forward(self, inputs, eval_tours=None, return_pi=False, Measures=False, Plot_Data=False):
#batch_size, graph_size, input_dim = inputs.size()
indices = torch.tensor([0, 9, 10]) # (level, index, memory) for input dim of dataset as 11
inputs = torch.index_select(inputs_11, 2, indices)
batch_size, graph_size, input_dim = inputs.size()
"""
embedded_inputs = torch.mm(
#inputs.transpose(0, 1).contiguous().view(-1, input_dim),
inputs.transpose(0, 1).contiguous().view(-1, input_dim),
self.embedding
).view(graph_size, batch_size, -1)
"""
#inputs_weight_free = inputs.detach().clone()
#inputs_weight_free.index_fill_(2, move_to(torch.tensor([10]), opts.device), 0.)
embedded_inputs = self.bn1(torch.mm(
inputs.transpose(0, 1).contiguous().view(-1, input_dim),
#move_to(inputs_weight_free, opts.device).transpose(0, 1).contiguous().view(-1, input_dim),
self.embedding
).view(graph_size, batch_size, -1).transpose(0, 1).transpose(1, 2)).transpose(1, 2).transpose(0, 1)
#).view(graph_size, batch_size, -1).transpose(1, 2)).transpose(1, 2)
# query the actor net for the input indices
# making up the output, and the pointer attn
_log_p, pi = self._inner(embedded_inputs, eval_tours)
#cost, mask, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, Measures, Plot_Data)
cost, mask, misMatch, _, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs_11, pi, labels, Measures, Plot_Data, opts.graph_file)
#cost, mask, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = self.problem.get_costs(inputs, pi, labels, Measures, Plot_Data)
# Log likelyhood is calculated within the model since returning it per action does not work well with
# DataParallel since sequences can be of different lengths
ll = self._calc_log_likelihood(_log_p, pi, mask)
if return_pi:
#return cost, ll, pi, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
return cost, ll, pi, misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost, ll, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
return cost, ll, misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
def _calc_log_likelihood(self, _log_p, a, mask):
# Get log_p corresponding to selected actions
log_p = _log_p.gather(2, a.unsqueeze(-1)).squeeze(-1)
# Optional: mask out actions irrelevant to objective so they do not get reinforced
if mask is not None:
log_p[mask] = 0
assert (log_p > -1000).data.all(), "Logprobs should not be -inf, check sampling procedure!"
# Calculate log_likelihood
return log_p.sum(1)
def _inner(self, inputs, eval_tours=None):
encoder_hx = encoder_cx = Variable(
torch.zeros(1, inputs.size(1), self.encoder.hidden_dim, out=inputs.data.new()),
requires_grad=False
)
# encoder forward pass
enc_h, (enc_h_t, enc_c_t) = self.encoder(inputs, (encoder_hx, encoder_cx))
enc_h = self.bn2(enc_h.transpose(1, 2)).transpose(1, 2)
dec_init_state = (enc_h_t[-1], enc_c_t[-1])
# repeat decoder_in_0 across batch
decoder_input = self.decoder_in_0.unsqueeze(0).repeat(inputs.size(1), 1)
"""
enc_h = self.transformer_encoder(enc_h)
enc_h = self.bn3(enc_h.transpose(1, 2)).transpose(1, 2)
"""
(pointer_probs, input_idxs), dec_hidden_t = self.decoder(decoder_input,
inputs,
dec_init_state,
enc_h,
eval_tours)
return pointer_probs, input_idxs
| 16,562 | 40.304239 | 201 | py |
RESPECT | RESPECT-main/problems/pctsp/state_pctsp.py | import torch
from typing import NamedTuple
from utils.boolmask import mask_long2bool, mask_long_scatter
import torch.nn.functional as F
bypass = super
class StatePCTSP(NamedTuple):
# Fixed input
coords: torch.Tensor # Depot + loc
expected_prize: torch.Tensor
real_prize: torch.Tensor
penalty: torch.Tensor
# If this state contains multiple copies (i.e. beam search) for the same instance, then for memory efficiency
# the coords and prizes tensors are not kept multiple times, so we need to use the ids to index the correct rows.
ids: torch.Tensor # Keeps track of original fixed data index of rows
# State
prev_a: torch.Tensor
visited_: torch.Tensor # Keeps track of nodes that have been visited
lengths: torch.Tensor
cur_total_prize: torch.Tensor
cur_total_penalty: torch.Tensor
cur_coord: torch.Tensor
i: torch.Tensor # Keeps track of step
@property
def visited(self):
#if self.visited_.dtype == torch.uint8:
if self.visited_.dtype == torch.tool:
return self.visited_
else:
return mask_long2bool(self.visited_, n=self.coords.size(-2))
@property
def dist(self):
return (self.coords[:, :, None, :] - self.coords[:, None, :, :]).norm(p=2, dim=-1)
def __getitem__(self, key):
if torch.is_tensor(key) or isinstance(key, slice): # If tensor, idx all tensors by this tensor:
return self._replace(
ids=self.ids[key],
prev_a=self.prev_a[key],
visited_=self.visited_[key],
lengths=self.lengths[key],
cur_total_prize=self.cur_total_prize[key],
cur_total_penalty=self.cur_total_penalty[key],
cur_coord=self.cur_coord[key],
)
#return super(StatePCTSP, self).__getitem__(key)
return bypass(StatePCTSP, self).__getitem__(key)
# Warning: cannot override len of NamedTuple, len should be number of fields, not batch size
# def __len__(self):
# return len(self.used_capacity)
@staticmethod
def initialize(input, visited_dtype=torch.uint8, stochastic=False):
depot = input['depot']
loc = input['loc']
# For both deterministic and stochastic variant, model sees only deterministic (expected) prize
expected_prize = input['deterministic_prize']
# This is the prize that is actually obtained at each node
real_prize = input['stochastic_prize' if stochastic else 'deterministic_prize']
penalty = input['penalty']
batch_size, n_loc, _ = loc.size()
coords = torch.cat((depot[:, None, :], loc), -2)
# For prize, prepend 0 (corresponding to depot) so we can gather efficiently
real_prize_with_depot = torch.cat((torch.zeros_like(real_prize[:, :1]), real_prize), -1)
penalty_with_depot = F.pad(penalty, (1, 0), mode='constant', value=0)
return StatePCTSP(
coords=coords,
expected_prize=expected_prize,
real_prize=real_prize_with_depot,
penalty=penalty_with_depot,
ids=torch.arange(batch_size, dtype=torch.int64, device=loc.device)[:, None], # Add steps dimension
prev_a=torch.zeros(batch_size, 1, dtype=torch.long, device=loc.device),
visited_=( # Visited as mask is easier to understand, as long more memory efficient
# Keep visited_ with depot so we can scatter efficiently (if there is an action for depot)
torch.zeros(
batch_size, 1, n_loc + 1,
#dtype=torch.uint8, device=loc.device
dtype=torch.bool, device=loc.device
)
#if visited_dtype == torch.uint8
if visited_dtype == torch.bool
else torch.zeros(batch_size, 1, (n_loc + 63) // 64, dtype=torch.int64, device=loc.device) # Ceil
),
lengths=torch.zeros(batch_size, 1, device=loc.device),
cur_total_prize=torch.zeros(batch_size, 1, device=loc.device),
cur_total_penalty=penalty.sum(-1)[:, None], # Sum penalties (all when nothing is visited), add step dim
cur_coord=input['depot'][:, None, :], # Add step dimension
i=torch.zeros(1, dtype=torch.int64, device=loc.device) # Vector with length num_steps
)
def get_remaining_prize_to_collect(self):
# returns the remaining prize to collect, or 0 if already collected the minimum (1.0)
return torch.clamp(1 - self.cur_total_prize, min=0)
def get_final_cost(self):
assert self.all_finished()
# assert self.visited_.
# We are at the depot so no need to add remaining distance
return self.lengths + self.cur_total_penalty
def update(self, selected):
assert self.i.size(0) == 1, "Can only update if state represents single step"
# Update the state
selected = selected[:, None] # Add dimension for step
prev_a = selected
# Add the length
cur_coord = self.coords[self.ids, selected]
lengths = self.lengths + (cur_coord - self.cur_coord).norm(p=2, dim=-1) # (batch_dim, 1)
# Add current total prize
cur_total_prize = self.cur_total_prize + self.real_prize[self.ids, selected]
cur_total_penalty = self.cur_total_penalty + self.penalty[self.ids, selected]
#if self.visited_.dtype == torch.uint8:
if self.visited_.dtype == torch.bool:
# Note: here we do not subtract one as we have to scatter so the first column allows scattering depot
# Add one dimension since we write a single value
visited_ = self.visited_.scatter(-1, prev_a[:, :, None], 1)
else:
# This works, by check_unset=False it is allowed to set the depot visited a second a time
visited_ = mask_long_scatter(self.visited_, prev_a, check_unset=False)
return self._replace(
prev_a=prev_a, visited_=visited_,
lengths=lengths, cur_total_prize=cur_total_prize, cur_total_penalty=cur_total_penalty, cur_coord=cur_coord,
i=self.i + 1
)
def all_finished(self):
# All must be returned to depot (and at least 1 step since at start also prev_a == 0)
# This is more efficient than checking the mask
return self.i.item() > 0 and (self.prev_a == 0).all()
# return self.visited[:, :, 0].all() # If we have visited the depot we're done
def get_current_node(self):
"""
Returns the current node where 0 is depot, 1...n are nodes
:return: (batch_size, num_steps) tensor with current nodes
"""
return self.prev_a
def get_mask(self):
"""
Gets a (batch_size, n_loc + 1) mask with the feasible actions (0 = depot), depends on already visited and
remaining capacity. 0 = feasible, 1 = infeasible
Forbids to visit depot twice in a row, unless all nodes have been visited
:return:
"""
# Note: this always allows going to the depot, but that should always be suboptimal so be ok
# Cannot visit if already visited or if the depot has already been visited then we cannot visit anymore
visited_ = self.visited
mask = (
visited_ | visited_[:, :, 0:1]
)
# Cannot visit depot if not yet collected 1 total prize and there are unvisited nodes
mask[:, :, 0] = (self.cur_total_prize < 1.) & (visited_[:, :, 1:].int().sum(-1) < visited_[:, :, 1:].size(-1))
return mask > 0 # Hacky way to return bool or uint8 depending on pytorch version
def construct_solutions(self, actions):
return actions
| 7,770 | 43.405714 | 119 | py |
RESPECT | RESPECT-main/problems/pctsp/problem_pctsp.py | from torch.utils.data import Dataset
import torch
import os
import pickle
from problems.pctsp.state_pctsp import StatePCTSP
from utils.beam_search import beam_search
class PCTSP(object):
NAME = 'pctsp' # Prize Collecting TSP, without depot, with penalties
@staticmethod
def _get_costs(dataset, pi, stochastic=False):
if pi.size(-1) == 1: # In case all tours directly return to depot, prevent further problems
assert (pi == 0).all(), "If all length 1 tours, they should be zero"
# Return
return torch.zeros(pi.size(0), dtype=torch.float, device=pi.device), None
# Check that tours are valid, i.e. contain 0 to n -1
sorted_pi = pi.data.sort(1)[0]
# Make sure each node visited once at most (except for depot)
assert ((sorted_pi[:, 1:] == 0) | (sorted_pi[:, 1:] > sorted_pi[:, :-1])).all(), "Duplicates"
prize = dataset['stochastic_prize'] if stochastic else dataset['deterministic_prize']
prize_with_depot = torch.cat(
(
torch.zeros_like(prize[:, :1]),
prize
),
1
)
p = prize_with_depot.gather(1, pi)
# Either prize constraint should be satisfied or all prizes should be visited
assert (
(p.sum(-1) >= 1 - 1e-5) |
(sorted_pi.size(-1) - (sorted_pi == 0).int().sum(-1) == dataset['loc'].size(-2))
).all(), "Total prize does not satisfy min total prize"
penalty_with_depot = torch.cat(
(
torch.zeros_like(dataset['penalty'][:, :1]),
dataset['penalty']
),
1
)
pen = penalty_with_depot.gather(1, pi)
# Gather dataset in order of tour
loc_with_depot = torch.cat((dataset['depot'][:, None, :], dataset['loc']), 1)
d = loc_with_depot.gather(1, pi[..., None].expand(*pi.size(), loc_with_depot.size(-1)))
length = (
(d[:, 1:] - d[:, :-1]).norm(p=2, dim=-1).sum(1) # Prevent error if len 1 seq
+ (d[:, 0] - dataset['depot']).norm(p=2, dim=-1) # Depot to first
+ (d[:, -1] - dataset['depot']).norm(p=2, dim=-1) # Last to depot, will be 0 if depot is last
)
# We want to maximize total prize but code minimizes so return negative
# Incurred penalty cost is total penalty cost - saved penalty costs of nodes visited
return length + dataset['penalty'].sum(-1) - pen.sum(-1), None
@staticmethod
def make_dataset(*args, **kwargs):
return PCTSPDataset(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
# With beam search we always consider the deterministic case
state = PCTSPDet.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class PCTSPDet(PCTSP):
@staticmethod
def get_costs(dataset, pi):
return PCTSP._get_costs(dataset, pi, stochastic=False)
@staticmethod
def make_state(*args, **kwargs):
return StatePCTSP.initialize(*args, **kwargs, stochastic=False)
class PCTSPStoch(PCTSP):
# Stochastic variant of PCTSP, the real (stochastic) prize is only revealed when node is visited
@staticmethod
def get_costs(dataset, pi):
return PCTSP._get_costs(dataset, pi, stochastic=True)
@staticmethod
def make_state(*args, **kwargs):
return StatePCTSP.initialize(*args, **kwargs, stochastic=True)
def generate_instance(size, penalty_factor=3):
depot = torch.rand(2)
loc = torch.rand(size, 2)
# For the penalty to make sense it should be not too large (in which case all nodes will be visited) nor too small
# so we want the objective term to be approximately equal to the length of the tour, which we estimate with half
# of the nodes by half of the tour length (which is very rough but similar to op)
# This means that the sum of penalties for all nodes will be approximately equal to the tour length (on average)
# The expected total (uniform) penalty of half of the nodes (since approx half will be visited by the constraint)
# is (n / 2) / 2 = n / 4 so divide by this means multiply by 4 / n,
# However instead of 4 we use penalty_factor (3 works well) so we can make them larger or smaller
MAX_LENGTHS = {
20: 2.,
50: 3.,
100: 4.
}
penalty_max = MAX_LENGTHS[size] * (penalty_factor) / float(size)
penalty = torch.rand(size) * penalty_max
# Take uniform prizes
# Now expectation is 0.5 so expected total prize is n / 2, we want to force to visit approximately half of the nodes
# so the constraint will be that total prize >= (n / 2) / 2 = n / 4
# equivalently, we divide all prizes by n / 4 and the total prize should be >= 1
deterministic_prize = torch.rand(size) * 4 / float(size)
# In the deterministic setting, the stochastic_prize is not used and the deterministic prize is known
# In the stochastic setting, the deterministic prize is the expected prize and is known up front but the
# stochastic prize is only revealed once the node is visited
# Stochastic prize is between (0, 2 * expected_prize) such that E(stochastic prize) = E(deterministic_prize)
stochastic_prize = torch.rand(size) * deterministic_prize * 2
return {
'depot': depot,
'loc': loc,
'penalty': penalty,
'deterministic_prize': deterministic_prize,
'stochastic_prize': stochastic_prize
}
class PCTSPDataset(Dataset):
def __init__(self, filename=None, size=50, num_samples=1000000, offset=0, distribution=None):
super(PCTSPDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [
{
'depot': torch.FloatTensor(depot),
'loc': torch.FloatTensor(loc),
'penalty': torch.FloatTensor(penalty),
'deterministic_prize': torch.FloatTensor(deterministic_prize),
'stochastic_prize': torch.tensor(stochastic_prize)
}
for depot, loc, penalty, deterministic_prize, stochastic_prize in (data[offset:offset+num_samples])
]
else:
self.data = [
generate_instance(size)
for i in range(num_samples)
]
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 7,293 | 38.215054 | 120 | py |
RESPECT | RESPECT-main/problems/tsp/problem_tsp.py | from torch.utils.data import Dataset
import torch,random
import os
import pickle
from problems.tsp.state_tsp import StateTSP
from utils.beam_search import beam_search
class TSP(object):
NAME = 'tsp'
@staticmethod
def get_costs(dataset, pi):
# Check that tours are valid, i.e. contain 0 to n -1
assert (
torch.arange(pi.size(1), out=pi.data.new()).view(1, -1).expand_as(pi) ==
pi.data.sort(1)[0]
).all(), "Invalid tour"
# Gather dataset in order of tour
d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
# Length is distance (L2-norm of difference) from each next location from its prev and of last from first
#return (d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1), None
y_ = d[:,:,1].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
sorted, indices = torch.sort(y_, dim=1)
#print(indices.shape, idx.shape)
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
#print(cost.shape)
return 1-cost, None
@staticmethod
def make_dataset(*args, **kwargs):
return TSPDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTSP.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
state = TSP.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TSPDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None):
super(TSPDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
for i in range(num_samples):
graph = []
for i in range(size):
graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 3,449 | 34.9375 | 135 | py |
RESPECT | RESPECT-main/problems/tsp/tsp_baseline.py | import argparse
import numpy as np
import os
import time
from datetime import timedelta
from scipy.spatial import distance_matrix
from utils import run_all_in_pool
from utils.data_utils import check_extension, load_dataset, save_dataset
from subprocess import check_call, check_output, CalledProcessError
from problems.vrp.vrp_baseline import get_lkh_executable
import torch
from tqdm import tqdm
import re
def solve_gurobi(directory, name, loc, disable_cache=False, timeout=None, gap=None):
# Lazy import so we do not need to have gurobi installed to run this script
from problems.tsp.tsp_gurobi import solve_euclidian_tsp as solve_euclidian_tsp_gurobi
try:
problem_filename = os.path.join(directory, "{}.gurobi{}{}.pkl".format(
name, "" if timeout is None else "t{}".format(timeout), "" if gap is None else "gap{}".format(gap)))
if os.path.isfile(problem_filename) and not disable_cache:
(cost, tour, duration) = load_dataset(problem_filename)
else:
# 0 = start, 1 = end so add depot twice
start = time.time()
cost, tour = solve_euclidian_tsp_gurobi(loc, threads=1, timeout=timeout, gap=gap)
duration = time.time() - start # Measure clock time
save_dataset((cost, tour, duration), problem_filename)
# First and last node are depot(s), so first node is 2 but should be 1 (as depot is 0) so subtract 1
total_cost = calc_tsp_length(loc, tour)
assert abs(total_cost - cost) <= 1e-5, "Cost is incorrect"
return total_cost, tour, duration
except Exception as e:
# For some stupid reason, sometimes OR tools cannot find a feasible solution?
# By letting it fail we do not get total results, but we dcan retry by the caching mechanism
print("Exception occured")
print(e)
return None
def solve_concorde_log(executable, directory, name, loc, disable_cache=False):
problem_filename = os.path.join(directory, "{}.tsp".format(name))
tour_filename = os.path.join(directory, "{}.tour".format(name))
output_filename = os.path.join(directory, "{}.concorde.pkl".format(name))
log_filename = os.path.join(directory, "{}.log".format(name))
# if True:
try:
# May have already been run
if os.path.isfile(output_filename) and not disable_cache:
tour, duration = load_dataset(output_filename)
else:
write_tsplib(problem_filename, loc, name=name)
with open(log_filename, 'w') as f:
start = time.time()
try:
# Concorde is weird, will leave traces of solution in current directory so call from target dir
check_call([executable, '-s', '1234', '-x', '-o',
os.path.abspath(tour_filename), os.path.abspath(problem_filename)],
stdout=f, stderr=f, cwd=directory)
except CalledProcessError as e:
# Somehow Concorde returns 255
assert e.returncode == 255
duration = time.time() - start
tour = read_concorde_tour(tour_filename)
save_dataset((tour, duration), output_filename)
return calc_tsp_length(loc, tour), tour, duration
except Exception as e:
print("Exception occured")
print(e)
return None
def solve_lkh_log(executable, directory, name, loc, runs=1, disable_cache=False):
problem_filename = os.path.join(directory, "{}.lkh{}.vrp".format(name, runs))
tour_filename = os.path.join(directory, "{}.lkh{}.tour".format(name, runs))
output_filename = os.path.join(directory, "{}.lkh{}.pkl".format(name, runs))
param_filename = os.path.join(directory, "{}.lkh{}.par".format(name, runs))
log_filename = os.path.join(directory, "{}.lkh{}.log".format(name, runs))
try:
# May have already been run
if os.path.isfile(output_filename) and not disable_cache:
tour, duration = load_dataset(output_filename)
else:
write_tsplib(problem_filename, loc, name=name)
params = {"PROBLEM_FILE": problem_filename, "OUTPUT_TOUR_FILE": tour_filename, "RUNS": runs, "SEED": 1234}
write_lkh_par(param_filename, params)
with open(log_filename, 'w') as f:
start = time.time()
check_call([executable, param_filename], stdout=f, stderr=f)
duration = time.time() - start
tour = read_tsplib(tour_filename)
save_dataset((tour, duration), output_filename)
return calc_tsp_length(loc, tour), tour, duration
except Exception as e:
print("Exception occured")
print(e)
return None
def write_lkh_par(filename, parameters):
default_parameters = { # Use none to include as flag instead of kv
"MAX_TRIALS": 10000,
"RUNS": 10,
"TRACE_LEVEL": 1,
"SEED": 0
}
with open(filename, 'w') as f:
for k, v in {**default_parameters, **parameters}.items():
if v is None:
f.write("{}\n".format(k))
else:
f.write("{} = {}\n".format(k, v))
def write_tsplib(filename, loc, name="problem"):
with open(filename, 'w') as f:
f.write("\n".join([
"{} : {}".format(k, v)
for k, v in (
("NAME", name),
("TYPE", "TSP"),
("DIMENSION", len(loc)),
("EDGE_WEIGHT_TYPE", "EUC_2D"),
)
]))
f.write("\n")
f.write("NODE_COORD_SECTION\n")
f.write("\n".join([
"{}\t{}\t{}".format(i + 1, int(x * 10000000 + 0.5), int(y * 10000000 + 0.5)) # tsplib does not take floats
for i, (x, y) in enumerate(loc)
]))
f.write("\n")
f.write("EOF\n")
def read_concorde_tour(filename):
with open(filename, 'r') as f:
n = None
tour = []
for line in f:
if n is None:
n = int(line)
else:
tour.extend([int(node) for node in line.rstrip().split(" ")])
assert len(tour) == n, "Unexpected tour length"
return tour
def read_tsplib(filename):
with open(filename, 'r') as f:
tour = []
dimension = 0
started = False
for line in f:
if started:
loc = int(line)
if loc == -1:
break
tour.append(loc)
if line.startswith("DIMENSION"):
dimension = int(line.split(" ")[-1])
if line.startswith("TOUR_SECTION"):
started = True
assert len(tour) == dimension
tour = np.array(tour).astype(int) - 1 # Subtract 1 as depot is 1 and should be 0
return tour.tolist()
def calc_tsp_length(loc, tour):
assert len(np.unique(tour)) == len(tour), "Tour cannot contain duplicates"
assert len(tour) == len(loc)
sorted_locs = np.array(loc)[np.concatenate((tour, [tour[0]]))]
return np.linalg.norm(sorted_locs[1:] - sorted_locs[:-1], axis=-1).sum()
def _calc_insert_cost(D, prv, nxt, ins):
"""
Calculates insertion costs of inserting ins between prv and nxt
:param D: distance matrix
:param prv: node before inserted node, can be vector
:param nxt: node after inserted node, can be vector
:param ins: node to insert
:return:
"""
return (
D[prv, ins]
+ D[ins, nxt]
- D[prv, nxt]
)
def run_insertion(loc, method):
n = len(loc)
D = distance_matrix(loc, loc)
mask = np.zeros(n, dtype=bool)
tour = [] # np.empty((0, ), dtype=int)
for i in range(n):
feas = mask == 0
feas_ind = np.flatnonzero(mask == 0)
if method == 'random':
# Order of instance is random so do in order for deterministic results
a = i
elif method == 'nearest':
if i == 0:
a = 0 # order does not matter so first is random
else:
a = feas_ind[D[np.ix_(feas, ~feas)].min(1).argmin()] # node nearest to any in tour
elif method == 'cheapest':
assert False, "Not yet implemented" # try all and find cheapest insertion cost
elif method == 'farthest':
if i == 0:
a = D.max(1).argmax() # Node with farthest distance to any other node
else:
a = feas_ind[D[np.ix_(feas, ~feas)].min(1).argmax()] # node which has closest node in tour farthest
mask[a] = True
if len(tour) == 0:
tour = [a]
else:
# Find index with least insert cost
ind_insert = np.argmin(
_calc_insert_cost(
D,
tour,
np.roll(tour, -1),
a
)
)
tour.insert(ind_insert + 1, a)
cost = D[tour, np.roll(tour, -1)].sum()
return cost, tour
def solve_insertion(directory, name, loc, method='random'):
start = time.time()
cost, tour = run_insertion(loc, method)
duration = time.time() - start
return cost, tour, duration
def calc_batch_pdist(dataset):
diff = (dataset[:, :, None, :] - dataset[:, None, :, :])
return torch.matmul(diff[:, :, :, None, :], diff[:, :, :, :, None]).squeeze(-1).squeeze(-1).sqrt()
def nearest_neighbour(dataset, start='first'):
dist = calc_batch_pdist(dataset)
batch_size, graph_size, _ = dataset.size()
total_dist = dataset.new(batch_size).zero_()
if not isinstance(start, torch.Tensor):
if start == 'random':
start = dataset.new().long().new(batch_size).zero_().random_(0, graph_size)
elif start == 'first':
start = dataset.new().long().new(batch_size).zero_()
elif start == 'center':
_, start = dist.mean(2).min(1) # Minimum total distance to others
else:
assert False, "Unknown start: {}".format(start)
current = start
dist_to_startnode = torch.gather(dist, 2, current.view(-1, 1, 1).expand(batch_size, graph_size, 1)).squeeze(2)
tour = [current]
for i in range(graph_size - 1):
# Mark out current node as option
dist.scatter_(2, current.view(-1, 1, 1).expand(batch_size, graph_size, 1), np.inf)
nn_dist = torch.gather(dist, 1, current.view(-1, 1, 1).expand(batch_size, 1, graph_size)).squeeze(1)
min_nn_dist, current = nn_dist.min(1)
total_dist += min_nn_dist
tour.append(current)
total_dist += torch.gather(dist_to_startnode, 1, current.view(-1, 1)).squeeze(1)
return total_dist, torch.stack(tour, dim=1)
def solve_all_nn(dataset_path, eval_batch_size=1024, no_cuda=False, dataset_n=None, progress_bar_mininterval=0.1):
import torch
from torch.utils.data import DataLoader
from problems import TSP
from utils import move_to
dataloader = DataLoader(
TSP.make_dataset(filename=dataset_path, num_samples=dataset_n if dataset_n is not None else 1000000),
batch_size=eval_batch_size
)
device = torch.device("cuda:0" if torch.cuda.is_available() and not no_cuda else "cpu")
results = []
for batch in tqdm(dataloader, mininterval=progress_bar_mininterval):
start = time.time()
batch = move_to(batch, device)
lengths, tours = nearest_neighbour(batch)
lengths_check, _ = TSP.get_costs(batch, tours)
assert (torch.abs(lengths - lengths_check.data) < 1e-5).all()
duration = time.time() - start
results.extend(
[(cost.item(), np.trim_zeros(pi.cpu().numpy(), 'b'), duration) for cost, pi in zip(lengths, tours)])
return results, eval_batch_size
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("method",
help="Name of the method to evaluate, 'nn', 'gurobi' or '(nearest|random|farthest)_insertion'")
parser.add_argument("datasets", nargs='+', help="Filename of the dataset(s) to evaluate")
parser.add_argument("-f", action='store_true', help="Set true to overwrite")
parser.add_argument("-o", default=None, help="Name of the results file to write")
parser.add_argument("--cpus", type=int, help="Number of CPUs to use, defaults to all cores")
parser.add_argument('--no_cuda', action='store_true', help='Disable CUDA (only for Tsiligirides)')
parser.add_argument('--disable_cache', action='store_true', help='Disable caching')
parser.add_argument('--max_calc_batch_size', type=int, default=1000, help='Size for subbatches')
parser.add_argument('--progress_bar_mininterval', type=float, default=0.1, help='Minimum interval')
parser.add_argument('-n', type=int, help="Number of instances to process")
parser.add_argument('--offset', type=int, help="Offset where to start processing")
parser.add_argument('--results_dir', default='results', help="Name of results directory")
opts = parser.parse_args()
assert opts.o is None or len(opts.datasets) == 1, "Cannot specify result filename with more than one dataset"
for dataset_path in opts.datasets:
assert os.path.isfile(check_extension(dataset_path)), "File does not exist!"
dataset_basename, ext = os.path.splitext(os.path.split(dataset_path)[-1])
if opts.o is None:
results_dir = os.path.join(opts.results_dir, "tsp", dataset_basename)
os.makedirs(results_dir, exist_ok=True)
out_file = os.path.join(results_dir, "{}{}{}-{}{}".format(
dataset_basename,
"offs{}".format(opts.offset) if opts.offset is not None else "",
"n{}".format(opts.n) if opts.n is not None else "",
opts.method, ext
))
else:
out_file = opts.o
assert opts.f or not os.path.isfile(
out_file), "File already exists! Try running with -f option to overwrite."
match = re.match(r'^([a-z_]+)(\d*)$', opts.method)
assert match
method = match[1]
runs = 1 if match[2] == '' else int(match[2])
if method == "nn":
assert opts.offset is None, "Offset not supported for nearest neighbor"
eval_batch_size = opts.max_calc_batch_size
results, parallelism = solve_all_nn(
dataset_path, eval_batch_size, opts.no_cuda, opts.n,
opts.progress_bar_mininterval
)
elif method in ("gurobi", "gurobigap", "gurobit", "concorde", "lkh") or method[-9:] == 'insertion':
target_dir = os.path.join(results_dir, "{}-{}".format(
dataset_basename,
opts.method
))
assert opts.f or not os.path.isdir(target_dir), \
"Target dir already exists! Try running with -f option to overwrite."
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
# TSP contains single loc array rather than tuple
dataset = [(instance, ) for instance in load_dataset(dataset_path)]
if method == "concorde":
use_multiprocessing = False
executable = os.path.abspath(os.path.join('problems', 'tsp', 'concorde', 'concorde', 'TSP', 'concorde'))
def run_func(args):
return solve_concorde_log(executable, *args, disable_cache=opts.disable_cache)
elif method == "lkh":
use_multiprocessing = False
executable = get_lkh_executable()
def run_func(args):
return solve_lkh_log(executable, *args, runs=runs, disable_cache=opts.disable_cache)
elif method[:6] == "gurobi":
use_multiprocessing = True # We run one thread per instance
def run_func(args):
return solve_gurobi(*args, disable_cache=opts.disable_cache,
timeout=runs if method[6:] == "t" else None,
gap=float(runs) if method[6:] == "gap" else None)
else:
assert method[-9:] == "insertion"
use_multiprocessing = True
def run_func(args):
return solve_insertion(*args, opts.method.split("_")[0])
results, parallelism = run_all_in_pool(
run_func,
target_dir, dataset, opts, use_multiprocessing=use_multiprocessing
)
else:
assert False, "Unknown method: {}".format(opts.method)
costs, tours, durations = zip(*results) # Not really costs since they should be negative
print("Average cost: {} +- {}".format(np.mean(costs), 2 * np.std(costs) / np.sqrt(len(costs))))
print("Average serial duration: {} +- {}".format(
np.mean(durations), 2 * np.std(durations) / np.sqrt(len(durations))))
print("Average parallel duration: {}".format(np.mean(durations) / parallelism))
print("Calculated total duration: {}".format(timedelta(seconds=int(np.sum(durations) / parallelism))))
save_dataset((results, parallelism), out_file)
| 17,311 | 37.471111 | 120 | py |
RESPECT | RESPECT-main/problems/tsp/state_tsp.py | import torch
from typing import NamedTuple
from utils.boolmask import mask_long2bool, mask_long_scatter
bypass = super
class StateTSP(NamedTuple):
# Fixed input
loc: torch.Tensor
dist: torch.Tensor
# If this state contains multiple copies (i.e. beam search) for the same instance, then for memory efficiency
# the loc and dist tensors are not kept multiple times, so we need to use the ids to index the correct rows.
ids: torch.Tensor # Keeps track of original fixed data index of rows
# State
first_a: torch.Tensor
prev_a: torch.Tensor
visited_: torch.Tensor # Keeps track of nodes that have been visited
lengths: torch.Tensor
cur_coord: torch.Tensor
i: torch.Tensor # Keeps track of step
@property
def visited(self):
#if self.visited_.dtype == torch.uint8:
if self.visited_.dtype == torch.bool:
return self.visited_
else:
return mask_long2bool(self.visited_, n=self.loc.size(-2))
def __getitem__(self, key):
if torch.is_tensor(key) or isinstance(key, slice): # If tensor, idx all tensors by this tensor:
return self._replace(
ids=self.ids[key],
first_a=self.first_a[key],
prev_a=self.prev_a[key],
visited_=self.visited_[key],
lengths=self.lengths[key],
cur_coord=self.cur_coord[key] if self.cur_coord is not None else None,
)
#return super(StateTSP, self).__getitem__(key)
return bypass(StateTSP, self).__getitem__(key)
@staticmethod
def initialize(loc, visited_dtype=torch.uint8):
batch_size, n_loc, _ = loc.size()
prev_a = torch.zeros(batch_size, 1, dtype=torch.long, device=loc.device)
return StateTSP(
loc=loc,
dist=(loc[:, :, None, :] - loc[:, None, :, :]).norm(p=2, dim=-1),
ids=torch.arange(batch_size, dtype=torch.int64, device=loc.device)[:, None], # Add steps dimension
first_a=prev_a,
prev_a=prev_a,
# Keep visited with depot so we can scatter efficiently (if there is an action for depot)
visited_=( # Visited as mask is easier to understand, as long more memory efficient
torch.zeros(
batch_size, 1, n_loc,
#dtype=torch.uint8, device=loc.device
dtype=torch.bool, device=loc.device
)
#if visited_dtype == torch.uint8
if visited_dtype == torch.bool
else torch.zeros(batch_size, 1, (n_loc + 63) // 64, dtype=torch.int64, device=loc.device) # Ceil
),
lengths=torch.zeros(batch_size, 1, device=loc.device),
cur_coord=None,
i=torch.zeros(1, dtype=torch.int64, device=loc.device) # Vector with length num_steps
)
def get_final_cost(self):
assert self.all_finished()
# assert self.visited_.
return self.lengths + (self.loc[self.ids, self.first_a, :] - self.cur_coord).norm(p=2, dim=-1)
def update(self, selected):
# Update the state
prev_a = selected[:, None] # Add dimension for step
# Add the length
# cur_coord = self.loc.gather(
# 1,
# selected[:, None, None].expand(selected.size(0), 1, self.loc.size(-1))
# )[:, 0, :]
cur_coord = self.loc[self.ids, prev_a]
lengths = self.lengths
if self.cur_coord is not None: # Don't add length for first action (selection of start node)
lengths = self.lengths + (cur_coord - self.cur_coord).norm(p=2, dim=-1) # (batch_dim, 1)
# Update should only be called with just 1 parallel step, in which case we can check this way if we should update
first_a = prev_a if self.i.item() == 0 else self.first_a
#if self.visited_.dtype == torch.uint8:
if self.visited_.dtype == torch.bool:
# Add one dimension since we write a single value
visited_ = self.visited_.scatter(-1, prev_a[:, :, None], 1)
else:
visited_ = mask_long_scatter(self.visited_, prev_a)
return self._replace(first_a=first_a, prev_a=prev_a, visited_=visited_,
lengths=lengths, cur_coord=cur_coord, i=self.i + 1)
def all_finished(self):
# Exactly n steps
return self.i.item() >= self.loc.size(-2)
def get_current_node(self):
return self.prev_a
def get_mask(self):
return self.visited > 0 # Hacky way to return bool or uint8 depending on pytorch version
def get_nn(self, k=None):
# Insert step dimension
# Nodes already visited get inf so they do not make it
if k is None:
k = self.loc.size(-2) - self.i.item() # Number of remaining
return (self.dist[self.ids, :, :] + self.visited.float()[:, :, None, :] * 1e6).topk(k, dim=-1, largest=False)[1]
def get_nn_current(self, k=None):
assert False, "Currently not implemented, look into which neighbours to use in step 0?"
# Note: if this is called in step 0, it will have k nearest neighbours to node 0, which may not be desired
# so it is probably better to use k = None in the first iteration
if k is None:
k = self.loc.size(-2)
k = min(k, self.loc.size(-2) - self.i.item()) # Number of remaining
return (
self.dist[
self.ids,
self.prev_a
] +
self.visited.float() * 1e6
).topk(k, dim=-1, largest=False)[1]
def construct_solutions(self, actions):
return actions
| 5,705 | 39.468085 | 121 | py |
RESPECT | RESPECT-main/problems/vrp/problem_vrp.py | from torch.utils.data import Dataset
import torch
import os
import pickle
from problems.vrp.state_cvrp import StateCVRP
from problems.vrp.state_sdvrp import StateSDVRP
from utils.beam_search import beam_search
class CVRP(object):
NAME = 'cvrp' # Capacitated Vehicle Routing Problem
VEHICLE_CAPACITY = 1.0 # (w.l.o.g. vehicle capacity is 1, demands should be scaled)
@staticmethod
def get_costs(dataset, pi):
batch_size, graph_size = dataset['demand'].size()
# Check that tours are valid, i.e. contain 0 to n -1
sorted_pi = pi.data.sort(1)[0]
# Sorting it should give all zeros at front and then 1...n
assert (
torch.arange(1, graph_size + 1, out=pi.data.new()).view(1, -1).expand(batch_size, graph_size) ==
sorted_pi[:, -graph_size:]
).all() and (sorted_pi[:, :-graph_size] == 0).all(), "Invalid tour"
# Visiting depot resets capacity so we add demand = -capacity (we make sure it does not become negative)
demand_with_depot = torch.cat(
(
torch.full_like(dataset['demand'][:, :1], -CVRP.VEHICLE_CAPACITY),
dataset['demand']
),
1
)
d = demand_with_depot.gather(1, pi)
used_cap = torch.zeros_like(dataset['demand'][:, 0])
for i in range(pi.size(1)):
used_cap += d[:, i] # This will reset/make capacity negative if i == 0, e.g. depot visited
# Cannot use less than 0
used_cap[used_cap < 0] = 0
assert (used_cap <= CVRP.VEHICLE_CAPACITY + 1e-5).all(), "Used more than capacity"
# Gather dataset in order of tour
loc_with_depot = torch.cat((dataset['depot'][:, None, :], dataset['loc']), 1)
d = loc_with_depot.gather(1, pi[..., None].expand(*pi.size(), loc_with_depot.size(-1)))
# Length is distance (L2-norm of difference) of each next location to its prev and of first and last to depot
return (
(d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1)
+ (d[:, 0] - dataset['depot']).norm(p=2, dim=1) # Depot to first
+ (d[:, -1] - dataset['depot']).norm(p=2, dim=1) # Last to depot, will be 0 if depot is last
), None
@staticmethod
def make_dataset(*args, **kwargs):
return VRPDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateCVRP.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
state = CVRP.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class SDVRP(object):
NAME = 'sdvrp' # Split Delivery Vehicle Routing Problem
VEHICLE_CAPACITY = 1.0 # (w.l.o.g. vehicle capacity is 1, demands should be scaled)
@staticmethod
def get_costs(dataset, pi):
batch_size, graph_size = dataset['demand'].size()
# Each node can be visited multiple times, but we always deliver as much demand as possible
# We check that at the end all demand has been satisfied
demands = torch.cat(
(
torch.full_like(dataset['demand'][:, :1], -SDVRP.VEHICLE_CAPACITY),
dataset['demand']
),
1
)
rng = torch.arange(batch_size, out=demands.data.new().long())
used_cap = torch.zeros_like(dataset['demand'][:, 0])
a_prev = None
for a in pi.transpose(0, 1):
assert a_prev is None or (demands[((a_prev == 0) & (a == 0)), :] == 0).all(), \
"Cannot visit depot twice if any nonzero demand"
d = torch.min(demands[rng, a], SDVRP.VEHICLE_CAPACITY - used_cap)
demands[rng, a] -= d
used_cap += d
used_cap[a == 0] = 0
a_prev = a
assert (demands == 0).all(), "All demand must be satisfied"
# Gather dataset in order of tour
loc_with_depot = torch.cat((dataset['depot'][:, None, :], dataset['loc']), 1)
d = loc_with_depot.gather(1, pi[..., None].expand(*pi.size(), loc_with_depot.size(-1)))
# Length is distance (L2-norm of difference) of each next location to its prev and of first and last to depot
return (
(d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1)
+ (d[:, 0] - dataset['depot']).norm(p=2, dim=1) # Depot to first
+ (d[:, -1] - dataset['depot']).norm(p=2, dim=1) # Last to depot, will be 0 if depot is last
), None
@staticmethod
def make_dataset(*args, **kwargs):
return VRPDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateSDVRP.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
assert not compress_mask, "SDVRP does not support compression of the mask"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
state = SDVRP.make_state(input)
return beam_search(state, beam_size, propose_expansions)
def make_instance(args):
depot, loc, demand, capacity, *args = args
grid_size = 1
if len(args) > 0:
depot_types, customer_types, grid_size = args
return {
'loc': torch.tensor(loc, dtype=torch.float) / grid_size,
'demand': torch.tensor(demand, dtype=torch.float) / capacity,
'depot': torch.tensor(depot, dtype=torch.float) / grid_size
}
class VRPDataset(Dataset):
def __init__(self, filename=None, size=50, num_samples=1000000, offset=0, distribution=None):
super(VRPDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [make_instance(args) for args in data[offset:offset+num_samples]]
else:
# From VRP with RL paper https://arxiv.org/abs/1802.04240
CAPACITIES = {
10: 20.,
20: 30.,
50: 40.,
100: 50.
}
self.data = [
{
'loc': torch.FloatTensor(size, 2).uniform_(0, 1),
# Uniform 1 - 9, scaled by capacities
'demand': (torch.FloatTensor(size).uniform_(0, 9).int() + 1).float() / CAPACITIES[size],
'depot': torch.FloatTensor(2).uniform_(0, 1)
}
for i in range(num_samples)
]
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 7,569 | 35.570048 | 117 | py |
RESPECT | RESPECT-main/problems/vrp/state_sdvrp.py | import torch
from typing import NamedTuple
bypass = super
class StateSDVRP(NamedTuple):
# Fixed input
coords: torch.Tensor
demand: torch.Tensor
# If this state contains multiple copies (i.e. beam search) for the same instance, then for memory efficiency
# the coords and demands tensors are not kept multiple times, so we need to use the ids to index the correct rows.
ids: torch.Tensor # Keeps track of original fixed data index of rows
# State
prev_a: torch.Tensor
used_capacity: torch.Tensor
demands_with_depot: torch.Tensor # Keeps track of remaining demands
lengths: torch.Tensor
cur_coord: torch.Tensor
i: torch.Tensor # Keeps track of step
VEHICLE_CAPACITY = 1.0 # Hardcoded
def __getitem__(self, key):
if torch.is_tensor(key) or isinstance(key, slice): # If tensor, idx all tensors by this tensor:
return self._replace(
ids=self.ids[key],
prev_a=self.prev_a[key],
used_capacity=self.used_capacity[key],
demands_with_depot=self.demands_with_depot[key],
lengths=self.lengths[key],
cur_coord=self.cur_coord[key],
)
#return super(StateSDVRP, self).__getitem__(key)
return bypass(StateSDVRP, self).__getitem__(key)
@staticmethod
def initialize(input):
depot = input['depot']
loc = input['loc']
demand = input['demand']
batch_size, n_loc, _ = loc.size()
return StateSDVRP(
coords=torch.cat((depot[:, None, :], loc), -2),
demand=demand,
ids=torch.arange(batch_size, dtype=torch.int64, device=loc.device)[:, None], # Add steps dimension
prev_a=torch.zeros(batch_size, 1, dtype=torch.long, device=loc.device),
used_capacity=demand.new_zeros(batch_size, 1),
demands_with_depot=torch.cat((
demand.new_zeros(batch_size, 1),
demand[:, :]
), 1)[:, None, :],
lengths=torch.zeros(batch_size, 1, device=loc.device),
cur_coord=input['depot'][:, None, :], # Add step dimension
i=torch.zeros(1, dtype=torch.int64, device=loc.device) # Vector with length num_steps
)
def get_final_cost(self):
assert self.all_finished()
return self.lengths + (self.coords[self.ids, 0, :] - self.cur_coord).norm(p=2, dim=-1)
def update(self, selected):
assert self.i.size(0) == 1, "Can only update if state represents single step"
# Update the state
selected = selected[:, None] # Add dimension for step
prev_a = selected
# Add the length
cur_coord = self.coords[self.ids, selected]
lengths = self.lengths + (cur_coord - self.cur_coord).norm(p=2, dim=-1) # (batch_dim, 1)
# Not selected_demand is demand of first node (by clamp) so incorrect for nodes that visit depot!
selected_demand = self.demands_with_depot.gather(-1, prev_a[:, :, None])[:, :, 0]
delivered_demand = torch.min(selected_demand, self.VEHICLE_CAPACITY - self.used_capacity)
# Increase capacity if depot is not visited, otherwise set to 0
#used_capacity = torch.where(selected == 0, 0, self.used_capacity + delivered_demand)
used_capacity = (self.used_capacity + delivered_demand) * (prev_a != 0).float()
# demands_with_depot = demands_with_depot.clone()[:, 0, :]
# Add one dimension since we write a single value
demands_with_depot = self.demands_with_depot.scatter(
-1,
prev_a[:, :, None],
self.demands_with_depot.gather(-1, prev_a[:, :, None]) - delivered_demand[:, :, None]
)
return self._replace(
prev_a=prev_a, used_capacity=used_capacity, demands_with_depot=demands_with_depot,
lengths=lengths, cur_coord=cur_coord, i=self.i + 1
)
def all_finished(self):
return self.i.item() >= self.demands_with_depot.size(-1) and not (self.demands_with_depot > 0).any()
def get_current_node(self):
return self.prev_a
def get_mask(self):
"""
Gets a (batch_size, n_loc + 1) mask with the feasible actions (0 = depot), depends on already visited and
remaining capacity. 0 = feasible, 1 = infeasible
Forbids to visit depot twice in a row, unless all nodes have been visited
:return:
"""
# Nodes that cannot be visited are already visited or too much demand to be served now
mask_loc = (self.demands_with_depot[:, :, 1:] == 0) | (self.used_capacity[:, :, None] >= self.VEHICLE_CAPACITY)
# Cannot visit the depot if just visited and still unserved nodes
mask_depot = (self.prev_a == 0) & ((mask_loc == 0).int().sum(-1) > 0)
return torch.cat((mask_depot[:, :, None], mask_loc), -1)
def construct_solutions(self, actions):
return actions
| 4,979 | 39.487805 | 119 | py |
RESPECT | RESPECT-main/problems/vrp/state_cvrp.py | import torch
from typing import NamedTuple
from utils.boolmask import mask_long2bool, mask_long_scatter
bypass = super
class StateCVRP(NamedTuple):
# Fixed input
coords: torch.Tensor # Depot + loc
demand: torch.Tensor
# If this state contains multiple copies (i.e. beam search) for the same instance, then for memory efficiency
# the coords and demands tensors are not kept multiple times, so we need to use the ids to index the correct rows.
ids: torch.Tensor # Keeps track of original fixed data index of rows
# State
prev_a: torch.Tensor
used_capacity: torch.Tensor
visited_: torch.Tensor # Keeps track of nodes that have been visited
lengths: torch.Tensor
cur_coord: torch.Tensor
i: torch.Tensor # Keeps track of step
VEHICLE_CAPACITY = 1.0 # Hardcoded
@property
def visited(self):
#if self.visited_.dtype == torch.uint8:
if self.visited_.dtype == torch.bool:
return self.visited_
else:
return mask_long2bool(self.visited_, n=self.demand.size(-1))
@property
def dist(self):
return (self.coords[:, :, None, :] - self.coords[:, None, :, :]).norm(p=2, dim=-1)
def __getitem__(self, key):
if torch.is_tensor(key) or isinstance(key, slice): # If tensor, idx all tensors by this tensor:
return self._replace(
ids=self.ids[key],
prev_a=self.prev_a[key],
used_capacity=self.used_capacity[key],
visited_=self.visited_[key],
lengths=self.lengths[key],
cur_coord=self.cur_coord[key],
)
return bypass(StateCVRP, self).__getitem__(key)
# Warning: cannot override len of NamedTuple, len should be number of fields, not batch size
# def __len__(self):
# return len(self.used_capacity)
@staticmethod
#def initialize(input, visited_dtype=torch.uint8):
def initialize(input, visited_dtype=torch.bool):
depot = input['depot']
loc = input['loc']
demand = input['demand']
batch_size, n_loc, _ = loc.size()
return StateCVRP(
coords=torch.cat((depot[:, None, :], loc), -2),
demand=demand,
ids=torch.arange(batch_size, dtype=torch.int64, device=loc.device)[:, None], # Add steps dimension
prev_a=torch.zeros(batch_size, 1, dtype=torch.long, device=loc.device),
used_capacity=demand.new_zeros(batch_size, 1),
visited_=( # Visited as mask is easier to understand, as long more memory efficient
# Keep visited_ with depot so we can scatter efficiently
torch.zeros(
batch_size, 1, n_loc + 1,
#dtype=torch.uint8, device=loc.device
dtype=torch.bool, device=loc.device
)
#if visited_dtype == torch.uint8
if visited_dtype == torch.bool
else torch.zeros(batch_size, 1, (n_loc + 63) // 64, dtype=torch.int64, device=loc.device) # Ceil
),
lengths=torch.zeros(batch_size, 1, device=loc.device),
cur_coord=input['depot'][:, None, :], # Add step dimension
i=torch.zeros(1, dtype=torch.int64, device=loc.device) # Vector with length num_steps
)
def get_final_cost(self):
assert self.all_finished()
return self.lengths + (self.coords[self.ids, 0, :] - self.cur_coord).norm(p=2, dim=-1)
def update(self, selected):
assert self.i.size(0) == 1, "Can only update if state represents single step"
# Update the state
selected = selected[:, None] # Add dimension for step
prev_a = selected
n_loc = self.demand.size(-1) # Excludes depot
# Add the length
cur_coord = self.coords[self.ids, selected]
# cur_coord = self.coords.gather(
# 1,
# selected[:, None].expand(selected.size(0), 1, self.coords.size(-1))
# )[:, 0, :]
lengths = self.lengths + (cur_coord - self.cur_coord).norm(p=2, dim=-1) # (batch_dim, 1)
# Not selected_demand is demand of first node (by clamp) so incorrect for nodes that visit depot!
#selected_demand = self.demand.gather(-1, torch.clamp(prev_a - 1, 0, n_loc - 1))
selected_demand = self.demand[self.ids, torch.clamp(prev_a - 1, 0, n_loc - 1)]
# Increase capacity if depot is not visited, otherwise set to 0
#used_capacity = torch.where(selected == 0, 0, self.used_capacity + selected_demand)
used_capacity = (self.used_capacity + selected_demand) * (prev_a != 0).float()
#if self.visited_.dtype == torch.uint8:
if self.visited_.dtype == torch.bool:
# Note: here we do not subtract one as we have to scatter so the first column allows scattering depot
# Add one dimension since we write a single value
visited_ = self.visited_.scatter(-1, prev_a[:, :, None], 1)
else:
# This works, will not set anything if prev_a -1 == -1 (depot)
visited_ = mask_long_scatter(self.visited_, prev_a - 1)
return self._replace(
prev_a=prev_a, used_capacity=used_capacity, visited_=visited_,
lengths=lengths, cur_coord=cur_coord, i=self.i + 1
)
def all_finished(self):
return self.i.item() >= self.demand.size(-1) and self.visited.all()
def get_finished(self):
return self.visited.sum(-1) == self.visited.size(-1)
def get_current_node(self):
return self.prev_a
def get_mask(self):
"""
Gets a (batch_size, n_loc + 1) mask with the feasible actions (0 = depot), depends on already visited and
remaining capacity. 0 = feasible, 1 = infeasible
Forbids to visit depot twice in a row, unless all nodes have been visited
:return:
"""
#if self.visited_.dtype == torch.uint8:
if self.visited_.dtype == torch.bool:
visited_loc = self.visited_[:, :, 1:]
else:
visited_loc = mask_long2bool(self.visited_, n=self.demand.size(-1))
# For demand steps_dim is inserted by indexing with id, for used_capacity insert node dim for broadcasting
exceeds_cap = (self.demand[self.ids, :] + self.used_capacity[:, :, None] > self.VEHICLE_CAPACITY)
# Nodes that cannot be visited are already visited or too much demand to be served now
mask_loc = visited_loc.to(exceeds_cap.dtype) | exceeds_cap
# Cannot visit the depot if just visited and still unserved nodes
mask_depot = (self.prev_a == 0) & ((mask_loc == 0).int().sum(-1) > 0)
return torch.cat((mask_depot[:, :, None], mask_loc), -1)
def construct_solutions(self, actions):
return actions
| 6,844 | 40.737805 | 118 | py |
RESPECT | RESPECT-main/problems/toposort/state_toposort.py | import torch
from typing import NamedTuple
from utils.boolmask import mask_long2bool, mask_long_scatter
bypass = super
class StateTopoSort(NamedTuple):
# Fixed input
loc: torch.Tensor
dist: torch.Tensor
# If this state contains multiple copies (i.e. beam search) for the same instance, then for memory efficiency
# the loc and dist tensors are not kept multiple times, so we need to use the ids to index the correct rows.
ids: torch.Tensor # Keeps track of original fixed data index of rows
# State
first_a: torch.Tensor
prev_a: torch.Tensor
visited_: torch.Tensor # Keeps track of nodes that have been visited
lengths: torch.Tensor
cur_coord: torch.Tensor
i: torch.Tensor # Keeps track of step
@property
def visited(self):
#if self.visited_.dtype == torch.uint8:
if self.visited_.dtype == torch.bool:
return self.visited_
else:
return mask_long2bool(self.visited_, n=self.loc.size(-2))
def __getitem__(self, key):
if torch.is_tensor(key) or isinstance(key, slice): # If tensor, idx all tensors by this tensor:
return self._replace(
ids=self.ids[key],
first_a=self.first_a[key],
prev_a=self.prev_a[key],
visited_=self.visited_[key],
lengths=self.lengths[key],
cur_coord=self.cur_coord[key] if self.cur_coord is not None else None,
)
return bypass(StateTopoSort, self).__getitem__(key)
#return super(StateTopoSort, self).__getitem__(key)
@staticmethod
def initialize(loc, visited_dtype=torch.uint8):
batch_size, n_loc, _ = loc.size()
prev_a = torch.zeros(batch_size, 1, dtype=torch.long, device=loc.device)
return StateTopoSort(
loc=loc,
dist=(loc[:, :, None, :] - loc[:, None, :, :]).norm(p=2, dim=-1),
ids=torch.arange(batch_size, dtype=torch.int64, device=loc.device)[:, None], # Add steps dimension
first_a=prev_a,
prev_a=prev_a,
# Keep visited with depot so we can scatter efficiently (if there is an action for depot)
visited_=( # Visited as mask is easier to understand, as long more memory efficient
torch.zeros(
batch_size, 1, n_loc,
#dtype=torch.uint8, device=loc.device
dtype=torch.bool, device=loc.device
)
#if visited_dtype == torch.uint8
if visited_dtype == torch.bool
else torch.zeros(batch_size, 1, (n_loc + 63) // 64, dtype=torch.int64, device=loc.device) # Ceil
),
lengths=torch.zeros(batch_size, 1, device=loc.device),
cur_coord=None,
i=torch.zeros(1, dtype=torch.int64, device=loc.device) # Vector with length num_steps
)
def get_final_cost(self):
assert self.all_finished()
# assert self.visited_.
return self.lengths + (self.loc[self.ids, self.first_a, :] - self.cur_coord).norm(p=2, dim=-1)
def update(self, selected):
# Update the state
prev_a = selected[:, None] # Add dimension for step
# Add the length
# cur_coord = self.loc.gather(
# 1,
# selected[:, None, None].expand(selected.size(0), 1, self.loc.size(-1))
# )[:, 0, :]
cur_coord = self.loc[self.ids, prev_a]
lengths = self.lengths
if self.cur_coord is not None: # Don't add length for first action (selection of start node)
lengths = self.lengths + (cur_coord - self.cur_coord).norm(p=2, dim=-1) # (batch_dim, 1)
# Update should only be called with just 1 parallel step, in which case we can check this way if we should update
first_a = prev_a if self.i.item() == 0 else self.first_a
#if self.visited_.dtype == torch.uint8:
if self.visited_.dtype == torch.bool:
# Add one dimension since we write a single value
visited_ = self.visited_.scatter(-1, prev_a[:, :, None], 1)
else:
visited_ = mask_long_scatter(self.visited_, prev_a)
return self._replace(first_a=first_a, prev_a=prev_a, visited_=visited_,
lengths=lengths, cur_coord=cur_coord, i=self.i + 1)
def all_finished(self):
# Exactly n steps
return self.i.item() >= self.loc.size(-2)
def get_current_node(self):
return self.prev_a
def get_mask(self):
return self.visited > 0 # Hacky way to return bool or uint8 depending on pytorch version
def get_nn(self, k=None):
# Insert step dimension
# Nodes already visited get inf so they do not make it
if k is None:
k = self.loc.size(-2) - self.i.item() # Number of remaining
return (self.dist[self.ids, :, :] + self.visited.float()[:, :, None, :] * 1e6).topk(k, dim=-1, largest=False)[1]
def get_nn_current(self, k=None):
assert False, "Currently not implemented, look into which neighbours to use in step 0?"
# Note: if this is called in step 0, it will have k nearest neighbours to node 0, which may not be desired
# so it is probably better to use k = None in the first iteration
if k is None:
k = self.loc.size(-2)
k = min(k, self.loc.size(-2) - self.i.item()) # Number of remaining
return (
self.dist[
self.ids,
self.prev_a
] +
self.visited.float() * 1e6
).topk(k, dim=-1, largest=False)[1]
def construct_solutions(self, actions):
return actions
| 5,725 | 39.609929 | 121 | py |
RESPECT | RESPECT-main/problems/toposort/problem_toposort_xySorting.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
from problems.toposort.state_toposort import StateTopoSort
from utils.beam_search import beam_search
from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check
import networkx as nx
import numpy as np
class TopoSort(object):
NAME = 'toposort'
@staticmethod
def get_costs(dataset, pi, measures=False, plot_data=False):
# Gather dataset in order of graph nodes
d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
"""
if plot_data:
print("learned dataset is: ")
for element in d[-1].cpu():
print(element)
print("end of batch")
"""
# generated index compared to optimal index; to be used for cost function below
y_ = d[:,:,1].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
sorted, indices = torch.sort(y_, dim=1)
#New cost stragety to satisfy level sorting
#cost = level_sorting(sorted, y_)
cost, indices = level_sorting_xy_pairs(indices, d)
#_, indices = level_sorting_xy_pairs(indices, d)
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
if mode == 1:
indices = deep_sort_x(indices, d)
#print(indices.shape, idx.shape)
# sorting cost is measured with Cosine Similarity
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#misMatch_y = torch.sub(y_, sorted)
#misMatch_x = torch.sub(indices, idx)
#cost = 0.2 * torch.count_nonzero(misMatch_x, dim=1) + 0.8 * torch.count_nonzero(misMatch_y, dim=1)
if measures:
recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = order_check(idx, indices.cuda())
"""
graph_size = indices.shape[1]
full_recall = (graph_size-1) * graph_size / 2.
recall_elementWise = torch.cat([torch.sum(indices[:, (i+1):]>indices[:, i].view(indices.shape[0], -1), dim=1).view(indices.shape[0], -1) for i in range(indices.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise, dim=1)
recall_accuracy_max, recall_accuracy_min = torch.max(recall) / full_recall, torch.min(recall) / full_recall
recall_accuracy = torch.mul(recall, torch.FloatTensor([1.]).cuda()).mean() / full_recall
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#idx = torch.tensor([i for i in range(indices.shape[1])]).cuda()
radius_elementWise = torch.abs(indices - idx)
radius_mean = torch.mean(torch.mul(radius_elementWise, torch.FloatTensor([1.]).cuda()), 1).mean()
radius_max = torch.max(radius_elementWise)
"""
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None
#misMatch_y = torch.nonzero(misMatch_y).shape[0] / y_.shape[0]
#misMatch_x = torch.nonzero(misMatch_x).shape[0] / idx.shape[0]
misMatch_y = torch.FloatTensor([torch.nonzero(torch.sub(sorted, y_)).shape[0] / y_.shape[0]]).cuda()
misMatch_x = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / idx.shape[0]]).cuda()
#misMatch_y = None
#misMatch_x = None
#recall_accuracy, radius_mean, radius_max = None, None, None
else:
misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None, None
#print(cost.shape)
return 1-cost, None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost.cuda(), None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
@staticmethod
def make_dataset(*args, **kwargs):
return TopoSortDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTopoSort.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
#state = TSP.make_state(
state = TopoSort.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
if seed > 0:
random.seed(seed)
for _ in range(num_samples):
graph = []
levels = random.randint(2, 20)
#levels = random.randint(1, 25)
#levels = random.randint(50, 100)
num_level = size // levels
for level in range(levels-1):
graph.append([level+1, random.randint(0, level), random.randint(0, level), random.randint(0, level)])
y_axis = random.random()
for j in range(num_level):
graph.append([random.random(), y_axis])
y_axis = random.random()
for k in range(num_level+size%levels):
graph.append([random.random(), y_axis])
graph.sort(key = lambda i: i[0]**2-i[1]**2)
self.data.append(torch.FloatTensor(graph))
#for i in range(size):
#graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
#self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 7,376 | 45.396226 | 192 | py |
RESPECT | RESPECT-main/problems/toposort/problem_toposort_tmp.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
from problems.toposort.state_toposort import StateTopoSort
from utils.beam_search import beam_search
from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check
import networkx as nx
import numpy as np
class TopoSort(object):
NAME = 'toposort'
@staticmethod
def get_costs(dataset, pi, measures=False, plot_data=False):
# Gather dataset in order of graph nodes
d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
"""
if plot_data:
print("learned dataset is: ")
for element in d[-1].cpu():
print(element)
print("end of batch")
"""
# generated index compared to optimal index; to be used for cost function below
order_learned = d[:,:,0].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
order_sorted, indices = torch.sort(order_learned, dim=1)
#New cost stragety to satisfy level sorting
#cost = level_sorting(sorted, y_)
#cost, indices = level_sorting_xy_pairs(indices, d)
#_, indices = level_sorting_xy_pairs(indices, d)
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
if mode == 1:
indices = deep_sort_x(indices, d)
#print(indices.shape, idx.shape)
# sorting cost is measured with Cosine Similarity
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
"""
#misMatch_y = torch.sub(y_, sorted)
#misMatch_x = torch.sub(indices, idx)
#cost = 0.2 * torch.count_nonzero(misMatch_x, dim=1) + 0.8 * torch.count_nonzero(misMatch_y, dim=1)
order_learned = order_learned.cuda()
order_sorted = order_sorted.cuda()
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(order_learned, order_sorted).cuda()
if measures:
recall_elementWise = torch.cat([torch.sum(order_learned[:, (i+1):] > order_learned[:, i].view(order_learned.shape[0], -1), dim=1).view(order_learned.shape[0], -1) for i in range(order_learned.shape[1]-1)], dim=1)
full_recall_elementWise = torch.cat([torch.sum(order_sorted[:, (i+1):] > order_sorted[:, i].view(order_sorted.shape[0], -1), dim=1).view(order_sorted.shape[0], -1) for i in range(order_sorted.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise.cuda(), dim=1)
full_recall = torch.sum(full_recall_elementWise.cuda(), dim=1)
recall_accuracy = torch.div(recall.cuda(), full_recall.cuda())
recall_accuracy_mean, recall_accuracy_max, recall_accuracy_min = recall_accuracy.mean(), torch.max(recall_accuracy), torch.min(recall_accuracy)
#diff = torch.abs(torch.sub(order_learned, order_sorted))
misMatch = torch.FloatTensor([torch.nonzero(torch.sub(order_learned, order_sorted)).shape[0] / indices.shape[0]]).cuda()
radius_mean, radius_max = None, None
"""
graph_size = indices.shape[1]
full_recall = (graph_size-1) * graph_size / 2.
recall_elementWise = torch.cat([torch.sum(indices[:, (i+1):]>indices[:, i].view(indices.shape[0], -1), dim=1).view(indices.shape[0], -1) for i in range(indices.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise, dim=1)
recall_accuracy_max, recall_accuracy_min = torch.max(recall) / full_recall, torch.min(recall) / full_recall
recall_accuracy = torch.mul(recall, torch.FloatTensor([1.]).cuda()).mean() / full_recall
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#idx = torch.tensor([i for i in range(indices.shape[1])]).cuda()
radius_elementWise = torch.abs(indices - idx)
radius_mean = torch.mean(torch.mul(radius_elementWise, torch.FloatTensor([1.]).cuda()), 1).mean()
radius_max = torch.max(radius_elementWise)
"""
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None
#misMatch_y = torch.nonzero(misMatch_y).shape[0] / y_.shape[0]
#misMatch_x = torch.nonzero(misMatch_x).shape[0] / idx.shape[0]
#misMatch_y = torch.FloatTensor([torch.nonzero(torch.sub(sorted, y_)).shape[0] / y_.shape[0]]).cuda()
#misMatch_x = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / idx.shape[0]]).cuda()
#misMatch_y = None
#misMatch_x = None
#recall_accuracy, radius_mean, radius_max = None, None, None
else:
#misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None, None
misMatch, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None
return 1-cost, None, misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost.cuda(), None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
@staticmethod
def make_dataset(*args, **kwargs):
return TopoSortDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTopoSort.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
#state = TSP.make_state(
state = TopoSort.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
if seed > 0:
random.seed(seed)
for _ in range(num_samples):
graph = []
levels = random.randint(2, 20)
#levels = random.randint(1, 25)
#levels = random.randint(50, 100)
level = [1 for _ in range(levels)]
remaining = size - levels
traverse = 0
while remaining > 0:
addition = random.randint(0, remaining)
level[traverse % levels] += addition
traverse += 1
remaining -= addition
caution = [i+1 for i, val in enumerate(level) if val < 3]
#num_level = size // levels
for i in range(levels):
for j in range(level[i]):
embedding = [i+1, random.randint(0, i), random.randint(0, i), random.randint(0, i)]
if max(embedding[1:]) < i:
embedding[random.randint(1, 3)] = i
for constraint in caution:
while embedding[1:].count(constraint) > level[constraint-1]:
embedding[embedding.index(constraint)] = -1
graph.append(embedding)
order = [i for i in range(size)]
random.shuffle(order)
graph = [graph[i] for i in order]
#graph.sort(key = lambda i: i[0]**2-i[1]**2+i[2]**2-i[3]**2)
self.data.append(torch.FloatTensor(graph))
#for i in range(size):
#graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
#self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 9,242 | 46.891192 | 224 | py |
RESPECT | RESPECT-main/problems/toposort/problem_toposort_singleTraining_reversed_label.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
from problems.toposort.state_toposort import StateTopoSort
from utils.beam_search import beam_search
#from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check
from utils import smart_sort
import networkx as nx
import numpy as np
class TopoSort(object):
NAME = 'toposort'
@staticmethod
def get_costs(dataset, pi, labels, measures=False, plot_data=False, graph_name=None):
#def get_costs(dataset, pi, measures=False, plot_data=False):
# Gather dataset in order of graph nodes
#d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
order_learned = smart_sort(labels, pi, dim=2)
order_sorted, indices = torch.sort(labels, dim=1, descending=True)
order_learned = order_learned.cuda()
order_sorted = order_sorted.cuda()
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(order_learned, order_sorted).cuda()
if plot_data:
dataset_learning_sequence = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
label_learning_sequence = dataset_learning_sequence[:,:,-2].view(dataset_learning_sequence.shape[0], dataset_learning_sequence.shape[1])
dataset_labeling_sequence = dataset.gather(1, indices.unsqueeze(-1).expand_as(dataset))
#layers_order = d[:,:,0].view(d.shape[0], d.shape[1])
file = open(r"graph_data_collection/graph_data_collection_adaptive_training/" + graph_name, "a")
#file = open(r"graph_data_collection/graph_data_collection_validation/" + graph_name, "a")
#file = open(r"graph_data_collection/graph30_50_128k_w35_35_weightFree.txt", "a")
torch.set_printoptions(profile="full")
for i in range(dataset.shape[0]):
file.write("real sequence is:\n")
#file.writelines([str(sequence.cpu().numpy())+"\n" for sequence in dataset_labeling_sequence[i]])
file.write(str(dataset_labeling_sequence[i]) + "\n")
file.write("learning sequence is:\n")
#file.write(str(dataset_learning_sequence[i]) + "\n")
file.write(str(label_learning_sequence[i]) + "\n")
file.write("Nodes Level Distribution:\n")
file.write(str(order_sorted[i]) + "\n")
file.write("end\n")
file.close()
#print(layers_order[-1].cpu())
#for element in order_learned[-1].cpu():
# print(element)
#print("end of batch")
# generated index compared to optimal index; to be used for cost function below
#order_learned = d[:,:,0].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
#order_sorted, indices = torch.sort(order_learned, dim=1)
#New cost stragety to satisfy level sorting
#cost = level_sorting(sorted, y_)
#cost, indices = level_sorting_xy_pairs(indices, d)
#_, indices = level_sorting_xy_pairs(indices, d)
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
if mode == 1:
indices = deep_sort_x(indices, d)
#print(indices.shape, idx.shape)
# sorting cost is measured with Cosine Similarity
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
"""
#misMatch_y = torch.sub(y_, sorted)
#misMatch_x = torch.sub(indices, idx)
#cost = 0.2 * torch.count_nonzero(misMatch_x, dim=1) + 0.8 * torch.count_nonzero(misMatch_y, dim=1)
#order_learned = order_learned.cuda()
#order_sorted = order_sorted.cuda()
#cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
#cost = cos(order_learned, order_sorted).cuda()
if measures:
recall_elementWise = torch.cat([torch.sum(order_learned[:, (i+1):] < order_learned[:, i].view(order_learned.shape[0], -1), dim=1).view(order_learned.shape[0], -1) for i in range(order_learned.shape[1]-1)], dim=1)
full_recall_elementWise = torch.cat([torch.sum(order_sorted[:, (i+1):] < order_sorted[:, i].view(order_sorted.shape[0], -1), dim=1).view(order_sorted.shape[0], -1) for i in range(order_sorted.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise.cuda(), dim=1)
full_recall = torch.sum(full_recall_elementWise.cuda(), dim=1)
recall_accuracy = torch.div(recall.cuda(), full_recall.cuda())
recall_accuracy_mean, recall_accuracy_max, recall_accuracy_min = recall_accuracy.mean(), torch.max(recall_accuracy), torch.min(recall_accuracy)
#diff = torch.abs(torch.sub(order_learned, order_sorted))
misMatch = torch.FloatTensor([torch.nonzero(torch.sub(order_learned, order_sorted)).shape[0] / indices.shape[0]]).cuda()
radius_mean, radius_max = None, None
"""
graph_size = indices.shape[1]
full_recall = (graph_size-1) * graph_size / 2.
recall_elementWise = torch.cat([torch.sum(indices[:, (i+1):]>indices[:, i].view(indices.shape[0], -1), dim=1).view(indices.shape[0], -1) for i in range(indices.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise, dim=1)
recall_accuracy_max, recall_accuracy_min = torch.max(recall) / full_recall, torch.min(recall) / full_recall
recall_accuracy = torch.mul(recall, torch.FloatTensor([1.]).cuda()).mean() / full_recall
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#idx = torch.tensor([i for i in range(indices.shape[1])]).cuda()
radius_elementWise = torch.abs(indices - idx)
radius_mean = torch.mean(torch.mul(radius_elementWise, torch.FloatTensor([1.]).cuda()), 1).mean()
radius_max = torch.max(radius_elementWise)
"""
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None
#misMatch_y = torch.nonzero(misMatch_y).shape[0] / y_.shape[0]
#misMatch_x = torch.nonzero(misMatch_x).shape[0] / idx.shape[0]
#misMatch_y = torch.FloatTensor([torch.nonzero(torch.sub(sorted, y_)).shape[0] / y_.shape[0]]).cuda()
#misMatch_x = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / idx.shape[0]]).cuda()
#misMatch_y = None
#misMatch_x = None
#recall_accuracy, radius_mean, radius_max = None, None, None
else:
#misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None, None
misMatch, recall_accuracy_mean, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None
return 1-cost, None, misMatch, None, recall_accuracy_mean, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost.cuda(), None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
@staticmethod
def make_dataset(*args, **kwargs):
return TopoSortDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTopoSort.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
#state = TSP.make_state(
state = TopoSort.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
if seed > 0:
random.seed(seed)
for _ in range(num_samples):
graph = []
#levels = random.randint(2, 10)
#levels = random.randint(1, 25)
levels = random.randint(800, 1000)
level = [1 for _ in range(levels)]
remaining = size - levels
traverse = 0
while remaining > 0:
addition = random.randint(0, remaining)
level[traverse % levels] += addition
traverse += 1
remaining -= addition
caution = [i+1 for i, val in enumerate(level) if val < 3]
#num_level = size // levels
for i in range(levels):
for j in range(level[i]):
embedding = [i+1, random.randint(0, i), random.randint(0, i), random.randint(0, i)]
if max(embedding[1:]) < i:
embedding[random.randint(1, 3)] = i
for constraint in caution:
while embedding[1:].count(constraint) > level[constraint-1]:
embedding[embedding.index(constraint)] = -1
graph.append(embedding)
order = [i for i in range(size)]
random.shuffle(order)
graph = [graph[i] for i in order]
#graph.sort(key = lambda i: i[0]**2-i[1]**2+i[2]**2-i[3]**2)
self.data.append(torch.FloatTensor(graph))
#for i in range(size):
#graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
#self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 11,131 | 49.144144 | 224 | py |
RESPECT | RESPECT-main/problems/toposort/problem_toposort_model_run.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
from problems.toposort.state_toposort import StateTopoSort
from utils.beam_search import beam_search
#from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check
from utils import smart_sort
import networkx as nx
import numpy as np
class TopoSort(object):
NAME = 'toposort'
@staticmethod
def get_costs(dataset, pi, labels, measures=False, plot_data=False, graph_name=None):
#def get_costs(dataset, pi, measures=False, plot_data=False):
# Gather dataset in order of graph nodes
#d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
cost = torch.FloatTensor([0]).cuda()
if plot_data:
dataset_learning_sequence = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
label_learning_sequence = dataset_learning_sequence[:,:,-2].view(dataset_learning_sequence.shape[0], dataset_learning_sequence.shape[1])
#dataset_labeling_sequence = dataset.gather(1, indices.unsqueeze(-1).expand_as(dataset))
#layers_order = d[:,:,0].view(d.shape[0], d.shape[1])
file = open(r"graph_data_collection/graph_data_collection_model_run/" + graph_name, "a")
#file = open(r"graph_data_collection/graph_data_collection_validation/" + graph_name, "a")
#file = open(r"graph_data_collection/graph30_50_128k_w35_35_weightFree.txt", "a")
torch.set_printoptions(profile="full")
for i in range(dataset.shape[0]):
file.write("learning dataset is:\n")
#file.writelines([str(sequence.cpu().numpy())+"\n" for sequence in dataset_labeling_sequence[i]])
file.write(str(dataset_learning_sequence[i]) + "\n")
file.write("learning sequence is:\n")
#file.write(str(dataset_learning_sequence[i]) + "\n")
file.write(str(label_learning_sequence[i]) + "\n")
file.write("layer index corresponding:\n")
file.write(str(labels) + "\n")
file.write("end\n")
file.close()
#print(layers_order[-1].cpu())
#for element in order_learned[-1].cpu():
# print(element)
#print("end of batch")
# generated index compared to optimal index; to be used for cost function below
#order_learned = d[:,:,0].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
#order_sorted, indices = torch.sort(order_learned, dim=1)
#New cost stragety to satisfy level sorting
#cost = level_sorting(sorted, y_)
#cost, indices = level_sorting_xy_pairs(indices, d)
#_, indices = level_sorting_xy_pairs(indices, d)
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
if mode == 1:
indices = deep_sort_x(indices, d)
#print(indices.shape, idx.shape)
# sorting cost is measured with Cosine Similarity
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
"""
#misMatch_y = torch.sub(y_, sorted)
#misMatch_x = torch.sub(indices, idx)
#cost = 0.2 * torch.count_nonzero(misMatch_x, dim=1) + 0.8 * torch.count_nonzero(misMatch_y, dim=1)
#order_learned = order_learned.cuda()
#order_sorted = order_sorted.cuda()
#cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
#cost = cos(order_learned, order_sorted).cuda()
if measures:
recall_elementWise = torch.cat([torch.sum(order_learned[:, (i+1):] > order_learned[:, i].view(order_learned.shape[0], -1), dim=1).view(order_learned.shape[0], -1) for i in range(order_learned.shape[1]-1)], dim=1)
full_recall_elementWise = torch.cat([torch.sum(order_sorted[:, (i+1):] > order_sorted[:, i].view(order_sorted.shape[0], -1), dim=1).view(order_sorted.shape[0], -1) for i in range(order_sorted.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise.cuda(), dim=1)
full_recall = torch.sum(full_recall_elementWise.cuda(), dim=1)
recall_accuracy = torch.div(recall.cuda(), full_recall.cuda())
recall_accuracy_mean, recall_accuracy_max, recall_accuracy_min = recall_accuracy.mean(), torch.max(recall_accuracy), torch.min(recall_accuracy)
#diff = torch.abs(torch.sub(order_learned, order_sorted))
misMatch = torch.FloatTensor([torch.nonzero(torch.sub(order_learned, order_sorted)).shape[0] / indices.shape[0]]).cuda()
radius_mean, radius_max = None, None
"""
graph_size = indices.shape[1]
full_recall = (graph_size-1) * graph_size / 2.
recall_elementWise = torch.cat([torch.sum(indices[:, (i+1):]>indices[:, i].view(indices.shape[0], -1), dim=1).view(indices.shape[0], -1) for i in range(indices.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise, dim=1)
recall_accuracy_max, recall_accuracy_min = torch.max(recall) / full_recall, torch.min(recall) / full_recall
recall_accuracy = torch.mul(recall, torch.FloatTensor([1.]).cuda()).mean() / full_recall
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#idx = torch.tensor([i for i in range(indices.shape[1])]).cuda()
radius_elementWise = torch.abs(indices - idx)
radius_mean = torch.mean(torch.mul(radius_elementWise, torch.FloatTensor([1.]).cuda()), 1).mean()
radius_max = torch.max(radius_elementWise)
"""
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None
#misMatch_y = torch.nonzero(misMatch_y).shape[0] / y_.shape[0]
#misMatch_x = torch.nonzero(misMatch_x).shape[0] / idx.shape[0]
#misMatch_y = torch.FloatTensor([torch.nonzero(torch.sub(sorted, y_)).shape[0] / y_.shape[0]]).cuda()
#misMatch_x = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / idx.shape[0]]).cuda()
#misMatch_y = None
#misMatch_x = None
#recall_accuracy, radius_mean, radius_max = None, None, None
else:
#misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None, None
misMatch, recall_accuracy_mean, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None
return 1-cost, None, misMatch, None, recall_accuracy_mean, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost.cuda(), None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
@staticmethod
def make_dataset(*args, **kwargs):
return TopoSortDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTopoSort.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
#state = TSP.make_state(
state = TopoSort.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
if seed > 0:
random.seed(seed)
for _ in range(num_samples):
graph = []
#levels = random.randint(2, 10)
#levels = random.randint(1, 25)
levels = random.randint(800, 1000)
level = [1 for _ in range(levels)]
remaining = size - levels
traverse = 0
while remaining > 0:
addition = random.randint(0, remaining)
level[traverse % levels] += addition
traverse += 1
remaining -= addition
caution = [i+1 for i, val in enumerate(level) if val < 3]
#num_level = size // levels
for i in range(levels):
for j in range(level[i]):
embedding = [i+1, random.randint(0, i), random.randint(0, i), random.randint(0, i)]
if max(embedding[1:]) < i:
embedding[random.randint(1, 3)] = i
for constraint in caution:
while embedding[1:].count(constraint) > level[constraint-1]:
embedding[embedding.index(constraint)] = -1
graph.append(embedding)
order = [i for i in range(size)]
random.shuffle(order)
graph = [graph[i] for i in order]
#graph.sort(key = lambda i: i[0]**2-i[1]**2+i[2]**2-i[3]**2)
self.data.append(torch.FloatTensor(graph))
#for i in range(size):
#graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
#self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 10,826 | 48.438356 | 224 | py |
RESPECT | RESPECT-main/problems/toposort/problem_toposort_multipleTraining_2.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
from problems.toposort.state_toposort import StateTopoSort
from utils.beam_search import beam_search
#from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check
from utils import smart_sort
import networkx as nx
import numpy as np
class TopoSort(object):
NAME = 'toposort'
@staticmethod
def get_costs(dataset, pi, labels, measures=False, plot_data=False, P=0):
#def get_costs(dataset, pi, measures=False, plot_data=False):
# Gather dataset in order of graph nodes
#d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
order_learned = smart_sort(labels, pi, dim=2)
if P == 0:
order_sorted, indices = torch.sort(labels, dim=1)
else:
order_sorted, indices = torch.sort(labels, dim=1, descending=True)
order_learned = order_learned.cuda()
order_sorted = order_sorted.cuda()
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(order_learned, order_sorted).cuda()
if plot_data:
dataset_learning_sequence = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
label_learning_sequence = dataset_learning_sequence[:,:,-2].view(dataset_learning_sequence.shape[0], dataset_learning_sequence.shape[1])
dataset_labeling_sequence = dataset.gather(1, indices.unsqueeze(-1).expand_as(dataset))
#layers_order = d[:,:,0].view(d.shape[0], d.shape[1])
file = open(r"graph_data_collection/graph_data_collection_doubleEngine/graph30_50_128k_w35_35_" + str(P) + "_doubleEngine_controversalDirection.txt", "a")
torch.set_printoptions(profile="full")
for i in range(dataset.shape[0]):
file.write("real sequence is:\n")
#file.writelines([str(sequence.cpu().numpy())+"\n" for sequence in dataset_labeling_sequence[i]])
file.write(str(dataset_labeling_sequence[i]) + "\n")
file.write("learning sequence is:\n")
file.write(str(label_learning_sequence[i]) + "\n")
file.write("Nodes Level Distribution:\n")
file.write(str(order_sorted[i]) + "\n")
file.write("end\n")
file.close()
#print(layers_order[-1].cpu())
#for element in order_learned[-1].cpu():
# print(element)
#print("end of batch")
# generated index compared to optimal index; to be used for cost function below
#order_learned = d[:,:,0].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
#order_sorted, indices = torch.sort(order_learned, dim=1)
#New cost stragety to satisfy level sorting
#cost = level_sorting(sorted, y_)
#cost, indices = level_sorting_xy_pairs(indices, d)
#_, indices = level_sorting_xy_pairs(indices, d)
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
if mode == 1:
indices = deep_sort_x(indices, d)
#print(indices.shape, idx.shape)
# sorting cost is measured with Cosine Similarity
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
"""
#misMatch_y = torch.sub(y_, sorted)
#misMatch_x = torch.sub(indices, idx)
#cost = 0.2 * torch.count_nonzero(misMatch_x, dim=1) + 0.8 * torch.count_nonzero(misMatch_y, dim=1)
#order_learned = order_learned.cuda()
#order_sorted = order_sorted.cuda()
#cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
#cost = cos(order_learned, order_sorted).cuda()
if measures:
if P == 0:
recall_elementWise = torch.cat([torch.sum(order_learned[:, (i+1):] > order_learned[:, i].view(order_learned.shape[0], -1), dim=1).view(order_learned.shape[0], -1) for i in range(order_learned.shape[1]-1)], dim=1)
full_recall_elementWise = torch.cat([torch.sum(order_sorted[:, (i+1):] > order_sorted[:, i].view(order_sorted.shape[0], -1), dim=1).view(order_sorted.shape[0], -1) for i in range(order_sorted.shape[1]-1)], dim=1)
else:
recall_elementWise = torch.cat([torch.sum(order_learned[:, (i+1):] < order_learned[:, i].view(order_learned.shape[0], -1), dim=1).view(order_learned.shape[0], -1) for i in range(order_learned.shape[1]-1)], dim=1)
full_recall_elementWise = torch.cat([torch.sum(order_sorted[:, (i+1):] < order_sorted[:, i].view(order_sorted.shape[0], -1), dim=1).view(order_sorted.shape[0], -1) for i in range(order_sorted.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise.cuda(), dim=1)
full_recall = torch.sum(full_recall_elementWise.cuda(), dim=1)
recall_accuracy = torch.div(recall.cuda(), full_recall.cuda())
recall_accuracy_mean, recall_accuracy_max, recall_accuracy_min = recall_accuracy.mean(), torch.max(recall_accuracy), torch.min(recall_accuracy)
#diff = torch.abs(torch.sub(order_learned, order_sorted))
misMatch = torch.FloatTensor([torch.nonzero(torch.sub(order_learned, order_sorted)).shape[0] / indices.shape[0]]).cuda()
radius_mean, radius_max = None, None
"""
graph_size = indices.shape[1]
full_recall = (graph_size-1) * graph_size / 2.
recall_elementWise = torch.cat([torch.sum(indices[:, (i+1):]>indices[:, i].view(indices.shape[0], -1), dim=1).view(indices.shape[0], -1) for i in range(indices.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise, dim=1)
recall_accuracy_max, recall_accuracy_min = torch.max(recall) / full_recall, torch.min(recall) / full_recall
recall_accuracy = torch.mul(recall, torch.FloatTensor([1.]).cuda()).mean() / full_recall
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#idx = torch.tensor([i for i in range(indices.shape[1])]).cuda()
radius_elementWise = torch.abs(indices - idx)
radius_mean = torch.mean(torch.mul(radius_elementWise, torch.FloatTensor([1.]).cuda()), 1).mean()
radius_max = torch.max(radius_elementWise)
"""
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None
#misMatch_y = torch.nonzero(misMatch_y).shape[0] / y_.shape[0]
#misMatch_x = torch.nonzero(misMatch_x).shape[0] / idx.shape[0]
#misMatch_y = torch.FloatTensor([torch.nonzero(torch.sub(sorted, y_)).shape[0] / y_.shape[0]]).cuda()
#misMatch_x = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / idx.shape[0]]).cuda()
#misMatch_y = None
#misMatch_x = None
#recall_accuracy, radius_mean, radius_max = None, None, None
else:
#misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None, None
misMatch, recall_accuracy_mean, recall_accuracy_max, recall_accuracy_min, radius_mean, radius_max = None, None, None, None, None, None
return 1-cost, None, misMatch, None, recall_accuracy_mean, recall_accuracy_max, recall_accuracy_min, radius_mean, radius_max, order_learned
#return cost.cuda(), None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
@staticmethod
def make_dataset(*args, **kwargs):
return TopoSortDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTopoSort.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
#state = TSP.make_state(
state = TopoSort.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
if seed > 0:
random.seed(seed)
for _ in range(num_samples):
graph = []
#levels = random.randint(2, 10)
#levels = random.randint(1, 25)
levels = random.randint(800, 1000)
level = [1 for _ in range(levels)]
remaining = size - levels
traverse = 0
while remaining > 0:
addition = random.randint(0, remaining)
level[traverse % levels] += addition
traverse += 1
remaining -= addition
caution = [i+1 for i, val in enumerate(level) if val < 3]
#num_level = size // levels
for i in range(levels):
for j in range(level[i]):
embedding = [i+1, random.randint(0, i), random.randint(0, i), random.randint(0, i)]
if max(embedding[1:]) < i:
embedding[random.randint(1, 3)] = i
for constraint in caution:
while embedding[1:].count(constraint) > level[constraint-1]:
embedding[embedding.index(constraint)] = -1
graph.append(embedding)
order = [i for i in range(size)]
random.shuffle(order)
graph = [graph[i] for i in order]
#graph.sort(key = lambda i: i[0]**2-i[1]**2+i[2]**2-i[3]**2)
self.data.append(torch.FloatTensor(graph))
#for i in range(size):
#graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
#self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 11,535 | 48.939394 | 228 | py |
RESPECT | RESPECT-main/problems/toposort/dataset_generator.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
#from problems.toposort.state_toposort import StateTopoSort
#from utils.beam_search import beam_search
#from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check, graph_sorting_DAG
from collections import defaultdict
import networkx as nx
import numpy as np
from torch.utils.data import DataLoader
from tqdm import tqdm
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, in_degree_fixed=3, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data = []
#self.label = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
self.data = []
if seed > 0:
random.seed(seed)
for _ in range(num_samples):
D = None
while True:
G = nx.gnp_random_graph(size, random.random())
if nx.is_connected(G):
Graph = self._in_degree_modification(G, in_degree_fixed)
if nx.is_connected(Graph):
D = nx.DiGraph([(u, v) for u, v in Graph.edges()])
try:
if nx.is_directed_acyclic_graph(D) and max(D.in_degree(i) for i in range(size)) <= in_degree_fixed:
break
except:
continue
graph = self._embedding(D, size, in_degree_fixed)
order = [i for i in range(size)]
random.shuffle(order)
graph = [graph[i] for i in order]
#self.data.append(torch.FloatTensor(sorted(graph, key=lambda x : x[0]**2- x[-1]**3)))
self.data.append(torch.FloatTensor(graph))
#self.label.append(torch.FloatTensor(list(nx.topological_sort(D))))
self.size = len(self.data)
def _embedding(self, DAG, size, in_degree_fixed):
visited = defaultdict(int, {head : 1 for head in range(size) if DAG.in_degree(head) == 0})
def embedding_method(nodes, level, in_degree_fixed, visited):
if len(nodes) <= 0: return []
nodes_processed = set(visited.keys())
nodes_processing = set()
for node in nodes:
children = set(DAG.successors(node))
for child in children:
parents = set(DAG.predecessors(child))
if parents.issubset(nodes_processed):
nodes_processing.add(child)
embedding_nodes_current = [[level] + [0 for _ in range(in_degree_fixed)] for _ in range(len(nodes))]
if level > 1:
node_index = 0
for node in nodes:
embedding_index = 1
for p in set(DAG.predecessors(node)):
if embedding_index > in_degree_fixed:
print("Nodes in degree out of permission")
return []
embedding_nodes_current[node_index][embedding_index] = visited[p]
embedding_index += 1
node_index += 1
for node_processing in nodes_processing:
visited[node_processing] = level+1
return embedding_nodes_current + embedding_method(nodes_processing, level+1, in_degree_fixed, visited)
return embedding_method(set(visited.keys()), 1, in_degree_fixed, visited)
def _in_degree_modification(self, G, in_degree_fixed):
nodes_discon = set()
node_in_degree = defaultdict(int)
edges = []
for u, v in G.edges():
if node_in_degree[v] == in_degree_fixed:
nodes_discon.add(u)
else:
node_in_degree[v] += 1
edges.append((u, v))
for node in node_in_degree:
if len(nodes_discon) <= 0:
break
if node_in_degree[node] < in_degree_fixed:
nodes_to_be_cleared = set()
for node_loss in nodes_discon:
if node_loss != node and (node_loss, node) not in edges:
edges.append((node_loss, node))
nodes_to_be_cleared.add(node_loss)
node_in_degree[node] += 1
if node_in_degree[node] >= in_degree_fixed:
break
nodes_discon = nodes_discon.difference(nodes_to_be_cleared)
return nx.Graph(edges)
def __len__(self):
return self.size
def __getitem__(self, idx):
#return self.data[idx], self.label[idx]
return self.data[idx]
"""
if __name__ == '__main__':
myDataset = TopoSortDataset(size=20, num_samples=128000)
torch.save(myDataset, "TopoSort_Dataset_Training.pt")
#dataset = torch.load("TopoSort_Dataset_Training.pt", map_location=torch.device('cuda'))
#training_dataloader = DataLoader(dataset, batch_size=10)
#for batch_id, batch in enumerate(tqdm(training_dataloader)):
# print(batch_id)
# print("training_data: ", batch[0])
# print("training_label: ", batch[1])
"""
| 5,644 | 38.753521 | 131 | py |
RESPECT | RESPECT-main/problems/toposort/problem_toposort_1.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
from problems.toposort.state_toposort import StateTopoSort
from utils.beam_search import beam_search
from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check, graph_sorting_DAG
import networkx as nx
import numpy as np
class TopoSort(object):
NAME = 'toposort'
@staticmethod
def get_costs(dataset, pi, measures=False, plot_data=False):
# Gather dataset in order of graph nodes
label_indices = dataset[1]
d = dataset[0].gather(1, pi.unsqueeze(-1).expand_as(dataset[0]))
idx = torch.stack([torch.stack([torch.nonzero(d[i]==2)[j][1] for j in range(d.shape[1])]) for i in range(d.shape[0])])
"""
if plot_data:
print("learned dataset is: ")
for element in d[-1].cpu():
print(element)
print("end of batch")
"""
# generated index compared to optimal index; to be used for cost function below
#y_ = d[:,:,1].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
#sorted, indices = torch.sort(y_, dim=1)
#New cost stragety to satisfy level sorting
#cost = level_sorting(sorted, y_)
#cost, indices = level_sorting_xy_pairs(indices, d)
#_, indices = level_sorting_xy_pairs(indices, d)
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
if mode == 1:
indices = deep_sort_x(indices, d)
#print(indices.shape, idx.shape)
# sorting cost is measured with Cosine Similarity
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
"""
#indices = graph_sorting_DAG(d)
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#misMatch_y = torch.sub(y_, sorted)
#misMatch_x = torch.sub(indices, idx)
#cost = 0.2 * torch.count_nonzero(misMatch_x, dim=1) + 0.8 * torch.count_nonzero(misMatch_y, dim=1)
if measures:
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = order_check(idx, indices.cuda())
graph_size = indices.shape[1]
full_recall = (graph_size-1) * graph_size / 2.
recall_elementWise = torch.cat([torch.sum(indices[:, (i+1):]>indices[:, i].view(indices.shape[0], -1), dim=1).view(indices.shape[0], -1) for i in range(indices.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise, dim=1)
recall_accuracy_max, recall_accuracy_min = torch.max(recall) / full_recall, torch.min(recall) / full_recall
recall_accuracy = torch.mul(recall, torch.FloatTensor([1.]).cuda()).mean() / full_recall
#idx = torch.tensor([i for i in range(indices.shape[1])]).cuda()
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
radius_elementWise = torch.abs(indices - idx)
radius_mean = torch.mean(torch.mul(radius_elementWise, torch.FloatTensor([1.]).cuda()), 1).mean()
radius_max = torch.max(radius_elementWise)
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None
#misMatch_y = torch.nonzero(misMatch_y).shape[0] / y_.shape[0]
#misMatch_x = torch.nonzero(misMatch_x).shape[0] / idx.shape[0]
misMatch_y = torch.FloatTensor([torch.nonzero(torch.sub(sorted, y_)).shape[0] / y_.shape[0]]).cuda()
misMatch_x = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / idx.shape[0]]).cuda()
#misMatch_y = None
#misMatch_x = None
#recall_accuracy, radius_mean, radius_max = None, None, None
else:
misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None, None
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(label_indices, idx).cuda()
#print(cost.shape)
return 1-cost, None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost.cuda(), None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
@staticmethod
def make_dataset(*args, **kwargs):
return TopoSortDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTopoSort.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
#state = TSP.make_state(
state = TopoSort.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data = []
self.label = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
self.data = []
if seed > 0:
random.seed(seed)
order = [i for i in range(size)]
for _ in range(num_samples):
G = nx.gnp_random_graph(size, random.random())
D = None
while True:
if nx.is_connected(G):
D = nx.DiGraph([(u, v) for u, v in G.edges()])
if nx.is_directed_acyclic_graph(D):
break
G = nx.gnp_random_graph(size, random.random())
random.shuffle(order)
mapping = {idx:item for idx, item in enumerate(order)}
DAG = nx.relabel.relabel_nodes(D, mapping)
graph = np.diag([2]*size)
for u, v in DAG.edges():
graph[u][v] = 1
graph[v][u] = -1
self.data.append(torch.FloatTensor(sorted(graph, key=lambda x : x[0]**2- x[-1]**3)))
self.label.append(torch.tensor(list(nx.topological_sort(DAG))))
"""
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
for _ in range(num_samples):
#random.seed(seed)
#seed += 10
graph = []
#levels = random.randint(1, 20)
levels = random.randint(1, 25)
#levels = random.randint(50, 100)
num_level = size // levels
for level in range(levels-1):
y_axis = random.random()
for j in range(num_level):
graph.append([random.random(), y_axis])
y_axis = random.random()
for k in range(num_level+size%levels):
graph.append([random.random(), y_axis])
graph.sort(key = lambda i: i[0]**2-i[1]**2)
self.data.append(torch.FloatTensor(graph))
for i in range(size):
graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
"""
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx], self.label[idx]
| 8,767 | 44.430052 | 192 | py |
RESPECT | RESPECT-main/problems/toposort/problem_toposort_multipleTraining.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
from problems.toposort.state_toposort import StateTopoSort
from utils.beam_search import beam_search
#from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check
from utils import smart_sort
import networkx as nx
import numpy as np
class TopoSort(object):
NAME = 'toposort'
@staticmethod
def get_costs(dataset, pi_0, pi_1, labels, measures=False, plot_data=False):
#def get_costs(dataset, pi, measures=False, plot_data=False):
# Gather dataset in order of graph nodes
#d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
order_learned_0 = smart_sort(labels, pi_0, dim=2)
order_learned_1 = smart_sort(labels, pi_1, dim=2)
order_sorted, indices = torch.sort(labels, dim=1)
order_learned_0 = order_learned_0.cuda()
order_learned_1 = order_learned_1.cuda()
order_sorted = order_sorted.cuda()
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost_0 = cos(order_learned_0, order_sorted).cuda()
cost_1 = cos(order_learned_1, order_sorted).cuda()
if plot_data:
dataset_learning_sequence_0 = dataset.gather(1, pi_0.unsqueeze(-1).expand_as(dataset))
dataset_learning_sequence_1 = dataset.gather(1, pi_1.unsqueeze(-1).expand_as(dataset))
label_learning_sequence_0 = dataset_learning_sequence_0[:,:,-2].view(dataset_learning_sequence_0.shape[0], dataset_learning_sequence_0.shape[1])
label_learning_sequence_1 = dataset_learning_sequence_1[:,:,-2].view(dataset_learning_sequence_1.shape[0], dataset_learning_sequence_1.shape[1])
dataset_labeling_sequence = dataset.gather(1, indices.unsqueeze(-1).expand_as(dataset))
#layers_order = d[:,:,0].view(d.shape[0], d.shape[1])
file = open(r"graph_data_collection/graph_data_collection_largeSize3/graph30_200_128k_w35_35_weightFree.txt", "a")
#file = open(r"graph_data_collection/graph30_50_128k_w35_35_weightFree.txt", "a")
torch.set_printoptions(profile="full")
for i in range(dataset.shape[0]):
file.write("real sequence is:\n")
#file.writelines([str(sequence.cpu().numpy())+"\n" for sequence in dataset_labeling_sequence[i]])
file.write(str(dataset_labeling_sequence[i]) + "\n")
file.write("learning_0 sequence is:\n")
#file.write(str(dataset_learning_sequence[i]) + "\n")
file.write(str(label_learning_sequence_0[i]) + "\n")
file.write("learning_1 sequence is:\n")
file.write(str(label_learning_sequence_1[i]) + "\n")
file.write("Nodes Level Distribution:\n")
file.write(str(order_sorted[i]) + "\n")
file.write("end\n")
file.close()
#print(layers_order[-1].cpu())
#for element in order_learned[-1].cpu():
# print(element)
#print("end of batch")
# generated index compared to optimal index; to be used for cost function below
#order_learned = d[:,:,0].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
#order_sorted, indices = torch.sort(order_learned, dim=1)
#New cost stragety to satisfy level sorting
#cost = level_sorting(sorted, y_)
#cost, indices = level_sorting_xy_pairs(indices, d)
#_, indices = level_sorting_xy_pairs(indices, d)
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
if mode == 1:
indices = deep_sort_x(indices, d)
#print(indices.shape, idx.shape)
# sorting cost is measured with Cosine Similarity
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
"""
#misMatch_y = torch.sub(y_, sorted)
#misMatch_x = torch.sub(indices, idx)
#cost = 0.2 * torch.count_nonzero(misMatch_x, dim=1) + 0.8 * torch.count_nonzero(misMatch_y, dim=1)
#order_learned = order_learned.cuda()
#order_sorted = order_sorted.cuda()
#cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
#cost = cos(order_learned, order_sorted).cuda()
if measures:
recall_elementWise_0 = torch.cat([torch.sum(order_learned_0[:, (i+1):] > order_learned_0[:, i].view(order_learned_0.shape[0], -1), dim=1).view(order_learned_0.shape[0], -1) for i in range(order_learned_0.shape[1]-1)], dim=1)
recall_elementWise_1 = torch.cat([torch.sum(order_learned_1[:, (i+1):] > order_learned_1[:, i].view(order_learned_1.shape[0], -1), dim=1).view(order_learned_1.shape[0], -1) for i in range(order_learned_1.shape[1]-1)], dim=1)
full_recall_elementWise = torch.cat([torch.sum(order_sorted[:, (i+1):] > order_sorted[:, i].view(order_sorted.shape[0], -1), dim=1).view(order_sorted.shape[0], -1) for i in range(order_sorted.shape[1]-1)], dim=1)
recall_0 = torch.sum(recall_elementWise_0.cuda(), dim=1)
recall_1 = torch.sum(recall_elementWise_1.cuda(), dim=1)
full_recall = torch.sum(full_recall_elementWise.cuda(), dim=1)
recall_accuracy_0 = torch.div(recall_0.cuda(), full_recall.cuda())
recall_accuracy_1 = torch.div(recall_1.cuda(), full_recall.cuda())
recall_accuracy_0_mean, recall_accuracy_0_max, recall_accuracy_0_min = recall_accuracy_0.mean(), torch.max(recall_accuracy_0), torch.min(recall_accuracy_0)
recall_accuracy_1_mean, recall_accuracy_1_max, recall_accuracy_1_min = recall_accuracy_1.mean(), torch.max(recall_accuracy_1), torch.min(recall_accuracy_1)
#diff = torch.abs(torch.sub(order_learned, order_sorted))
misMatch_0 = torch.FloatTensor([torch.nonzero(torch.sub(order_learned_0, order_sorted)).shape[0] / indices.shape[0]]).cuda()
misMatch_1 = torch.FloatTensor([torch.nonzero(torch.sub(order_learned_1, order_sorted)).shape[0] / indices.shape[0]]).cuda()
radius_mean, radius_max = None, None
"""
graph_size = indices.shape[1]
full_recall = (graph_size-1) * graph_size / 2.
recall_elementWise = torch.cat([torch.sum(indices[:, (i+1):]>indices[:, i].view(indices.shape[0], -1), dim=1).view(indices.shape[0], -1) for i in range(indices.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise, dim=1)
recall_accuracy_max, recall_accuracy_min = torch.max(recall) / full_recall, torch.min(recall) / full_recall
recall_accuracy = torch.mul(recall, torch.FloatTensor([1.]).cuda()).mean() / full_recall
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#idx = torch.tensor([i for i in range(indices.shape[1])]).cuda()
radius_elementWise = torch.abs(indices - idx)
radius_mean = torch.mean(torch.mul(radius_elementWise, torch.FloatTensor([1.]).cuda()), 1).mean()
radius_max = torch.max(radius_elementWise)
"""
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None
#misMatch_y = torch.nonzero(misMatch_y).shape[0] / y_.shape[0]
#misMatch_x = torch.nonzero(misMatch_x).shape[0] / idx.shape[0]
#misMatch_y = torch.FloatTensor([torch.nonzero(torch.sub(sorted, y_)).shape[0] / y_.shape[0]]).cuda()
#misMatch_x = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / idx.shape[0]]).cuda()
#misMatch_y = None
#misMatch_x = None
#recall_accuracy, radius_mean, radius_max = None, None, None
else:
#misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None, None
misMatch_0, misMatch_1, recall_accuracy_0_mean, recall_accuracy_0_max, recall_accuracy_0_min, recall_accuracy_1_mean, recall_accuracy_1_max, recall_accuracy_1_min, radius_mean, radius_max = None, None, None, None, None, None, None, None, None, None
return 1-cost_0, 1-cost_1, None, misMatch_0, misMatch_1, None, recall_accuracy_0_mean, recall_accuracy_0_max, recall_accuracy_0_min, recall_accuracy_1_mean, recall_accuracy_1_max, recall_accuracy_1_min, radius_mean, radius_max
#return cost.cuda(), None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
@staticmethod
def make_dataset(*args, **kwargs):
return TopoSortDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTopoSort.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
#state = TSP.make_state(
state = TopoSort.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
if seed > 0:
random.seed(seed)
for _ in range(num_samples):
graph = []
#levels = random.randint(2, 10)
#levels = random.randint(1, 25)
levels = random.randint(800, 1000)
level = [1 for _ in range(levels)]
remaining = size - levels
traverse = 0
while remaining > 0:
addition = random.randint(0, remaining)
level[traverse % levels] += addition
traverse += 1
remaining -= addition
caution = [i+1 for i, val in enumerate(level) if val < 3]
#num_level = size // levels
for i in range(levels):
for j in range(level[i]):
embedding = [i+1, random.randint(0, i), random.randint(0, i), random.randint(0, i)]
if max(embedding[1:]) < i:
embedding[random.randint(1, 3)] = i
for constraint in caution:
while embedding[1:].count(constraint) > level[constraint-1]:
embedding[embedding.index(constraint)] = -1
graph.append(embedding)
order = [i for i in range(size)]
random.shuffle(order)
graph = [graph[i] for i in order]
#graph.sort(key = lambda i: i[0]**2-i[1]**2+i[2]**2-i[3]**2)
self.data.append(torch.FloatTensor(graph))
#for i in range(size):
#graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
#self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 12,543 | 51.485356 | 260 | py |
RESPECT | RESPECT-main/problems/toposort/problem_toposort_temporary_idea.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
from problems.toposort.state_toposort import StateTopoSort
from utils.beam_search import beam_search
from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check
import networkx as nx
import numpy as np
class TopoSort(object):
NAME = 'toposort'
@staticmethod
def get_costs(dataset, pi, measures=False, plot_data=False):
# Gather dataset in order of graph nodes
d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
"""
if plot_data:
print("learned dataset is: ")
for element in d[-1].cpu():
print(element)
print("end of batch")
"""
# generated index compared to optimal index; to be used for cost function below
order_learned = d[:,:,0].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
order_sorted, indices = torch.sort(order_learned, dim=1)
#New cost stragety to satisfy level sorting
#cost = level_sorting(sorted, y_)
#cost, indices = level_sorting_xy_pairs(indices, d)
#_, indices = level_sorting_xy_pairs(indices, d)
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
if mode == 1:
indices = deep_sort_x(indices, d)
#print(indices.shape, idx.shape)
# sorting cost is measured with Cosine Similarity
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
"""
#misMatch_y = torch.sub(y_, sorted)
#misMatch_x = torch.sub(indices, idx)
#cost = 0.2 * torch.count_nonzero(misMatch_x, dim=1) + 0.8 * torch.count_nonzero(misMatch_y, dim=1)
order_learned = order_learned.cuda()
order_sorted = order_sorted.cuda()
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(order_learned, order_sorted).cuda()
if measures:
recall_elementWise = torch.cat([torch.sum(order_learned[:, (i+1):] > order_learned[:, i].view(order_learned.shape[0], -1), dim=1).view(order_learned.shape[0], -1) for i in range(order_learned.shape[1]-1)], dim=1)
full_recall_elementWise = torch.cat([torch.sum(order_sorted[:, (i+1):] > order_sorted[:, i].view(order_sorted.shape[0], -1), dim=1).view(order_sorted.shape[0], -1) for i in range(order_sorted.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise.cuda(), dim=1)
full_recall = torch.sum(full_recall_elementWise.cuda(), dim=1)
recall_accuracy = torch.div(recall.cuda(), full_recall.cuda())
recall_accuracy_mean, recall_accuracy_max, recall_accuracy_min = recall_accuracy.mean(), torch.max(recall_accuracy), torch.min(recall_accuracy)
#diff = torch.abs(torch.sub(order_learned, order_sorted))
misMatch = torch.FloatTensor([torch.nonzero(torch.sub(order_learned, order_sorted)).shape[0] / indices.shape[0]]).cuda()
radius_mean, radius_max = None, None
"""
graph_size = indices.shape[1]
full_recall = (graph_size-1) * graph_size / 2.
recall_elementWise = torch.cat([torch.sum(indices[:, (i+1):]>indices[:, i].view(indices.shape[0], -1), dim=1).view(indices.shape[0], -1) for i in range(indices.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise, dim=1)
recall_accuracy_max, recall_accuracy_min = torch.max(recall) / full_recall, torch.min(recall) / full_recall
recall_accuracy = torch.mul(recall, torch.FloatTensor([1.]).cuda()).mean() / full_recall
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#idx = torch.tensor([i for i in range(indices.shape[1])]).cuda()
radius_elementWise = torch.abs(indices - idx)
radius_mean = torch.mean(torch.mul(radius_elementWise, torch.FloatTensor([1.]).cuda()), 1).mean()
radius_max = torch.max(radius_elementWise)
"""
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None
#misMatch_y = torch.nonzero(misMatch_y).shape[0] / y_.shape[0]
#misMatch_x = torch.nonzero(misMatch_x).shape[0] / idx.shape[0]
#misMatch_y = torch.FloatTensor([torch.nonzero(torch.sub(sorted, y_)).shape[0] / y_.shape[0]]).cuda()
#misMatch_x = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / idx.shape[0]]).cuda()
#misMatch_y = None
#misMatch_x = None
#recall_accuracy, radius_mean, radius_max = None, None, None
else:
#misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None, None
misMatch, recall_accuracy_mean, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None
return 1-cost, None, misMatch, None, recall_accuracy_mean, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost.cuda(), None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
@staticmethod
def make_dataset(*args, **kwargs):
return TopoSortDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTopoSort.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
#state = TSP.make_state(
state = TopoSort.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
if seed > 0:
random.seed(seed)
for _ in range(num_samples):
graph = []
levels = random.randint(2, 20)
#levels = random.randint(1, 25)
#levels = random.randint(50, 100)
level = [1 for _ in range(levels)]
remaining = size - levels
traverse = 0
while remaining > 0:
addition = random.randint(0, remaining)
level[traverse % levels] += addition
traverse += 1
remaining -= addition
caution = [i+1 for i, val in enumerate(level) if val < 3]
#num_level = size // levels
for i in range(levels):
for j in range(level[i]):
embedding = [i+1, random.randint(0, i), random.randint(0, i), random.randint(0, i)]
if max(embedding[1:]) < i:
embedding[random.randint(1, 3)] = i
for constraint in caution:
while embedding[1:].count(constraint) > level[constraint-1]:
embedding[embedding.index(constraint)] = -1
graph.append(embedding)
order = [i for i in range(size)]
random.shuffle(order)
graph = [graph[i] for i in order]
#graph.sort(key = lambda i: i[0]**2-i[1]**2+i[2]**2-i[3]**2)
self.data.append(torch.FloatTensor(graph))
#for i in range(size):
#graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
#self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 9,252 | 46.943005 | 224 | py |
RESPECT | RESPECT-main/problems/toposort/problem_toposort_2.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
from problems.toposort.state_toposort import StateTopoSort
from utils.beam_search import beam_search
from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check
import networkx as nx
import numpy as np
class TopoSort(object):
NAME = 'toposort'
@staticmethod
def get_costs(dataset, pi, measures=False, plot_data=False):
# Gather dataset in order of graph nodes
d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
"""
if plot_data:
print("learned dataset is: ")
for element in d[-1].cpu():
print(element)
print("end of batch")
"""
# generated index compared to optimal index; to be used for cost function below
order_learned = d[:,:,0].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
order_sorted, indices = torch.sort(order_learned, dim=1)
#New cost stragety to satisfy level sorting
#cost = level_sorting(sorted, y_)
#cost, indices = level_sorting_xy_pairs(indices, d)
#_, indices = level_sorting_xy_pairs(indices, d)
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
if mode == 1:
indices = deep_sort_x(indices, d)
#print(indices.shape, idx.shape)
# sorting cost is measured with Cosine Similarity
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
"""
#misMatch_y = torch.sub(y_, sorted)
#misMatch_x = torch.sub(indices, idx)
#cost = 0.2 * torch.count_nonzero(misMatch_x, dim=1) + 0.8 * torch.count_nonzero(misMatch_y, dim=1)
order_learned = order_learned.cuda()
order_sorted = order_sorted.cuda()
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(order_learned, order_sorted).cuda()
if measures:
recall_elementWise = torch.cat([torch.sum(order_learned[:, (i+1):] > order_learned[:, i].view(order_learned.shape[0], -1), dim=1).view(order_learned.shape[0], -1) for i in range(order_learned.shape[1]-1)], dim=1)
full_recall_elementWise = torch.cat([torch.sum(order_sorted[:, (i+1):] > order_sorted[:, i].view(order_sorted.shape[0], -1), dim=1).view(order_sorted.shape[0], -1) for i in range(order_sorted.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise.cuda(), dim=1)
full_recall = torch.sum(full_recall_elementWise.cuda(), dim=1)
recall_accuracy = torch.div(recall.cuda(), full_recall.cuda())
recall_accuracy_mean, recall_accuracy_max, recall_accuracy_min = recall_accuracy.mean(), torch.max(recall_accuracy), torch.min(recall_accuracy)
#diff = torch.abs(torch.sub(order_learned, order_sorted))
misMatch = torch.FloatTensor([torch.nonzero(torch.sub(order_learned, order_sorted)).shape[0] / indices.shape[0]]).cuda()
radius_mean, radius_max = None, None
"""
graph_size = indices.shape[1]
full_recall = (graph_size-1) * graph_size / 2.
recall_elementWise = torch.cat([torch.sum(indices[:, (i+1):]>indices[:, i].view(indices.shape[0], -1), dim=1).view(indices.shape[0], -1) for i in range(indices.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise, dim=1)
recall_accuracy_max, recall_accuracy_min = torch.max(recall) / full_recall, torch.min(recall) / full_recall
recall_accuracy = torch.mul(recall, torch.FloatTensor([1.]).cuda()).mean() / full_recall
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#idx = torch.tensor([i for i in range(indices.shape[1])]).cuda()
radius_elementWise = torch.abs(indices - idx)
radius_mean = torch.mean(torch.mul(radius_elementWise, torch.FloatTensor([1.]).cuda()), 1).mean()
radius_max = torch.max(radius_elementWise)
"""
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None
#misMatch_y = torch.nonzero(misMatch_y).shape[0] / y_.shape[0]
#misMatch_x = torch.nonzero(misMatch_x).shape[0] / idx.shape[0]
#misMatch_y = torch.FloatTensor([torch.nonzero(torch.sub(sorted, y_)).shape[0] / y_.shape[0]]).cuda()
#misMatch_x = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / idx.shape[0]]).cuda()
#misMatch_y = None
#misMatch_x = None
#recall_accuracy, radius_mean, radius_max = None, None, None
else:
#misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None, None
misMatch, recall_accuracy_mean, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None
return 1-cost, None, misMatch, None, recall_accuracy_mean, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost.cuda(), None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
@staticmethod
def make_dataset(*args, **kwargs):
return TopoSortDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTopoSort.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
#state = TSP.make_state(
state = TopoSort.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
if seed > 0:
random.seed(seed)
for _ in range(num_samples):
graph = []
#levels = random.randint(2, 10)
#levels = random.randint(1, 25)
levels = random.randint(800, 1000)
level = [1 for _ in range(levels)]
remaining = size - levels
traverse = 0
while remaining > 0:
addition = random.randint(0, remaining)
level[traverse % levels] += addition
traverse += 1
remaining -= addition
caution = [i+1 for i, val in enumerate(level) if val < 3]
#num_level = size // levels
for i in range(levels):
for j in range(level[i]):
embedding = [i+1, random.randint(0, i), random.randint(0, i), random.randint(0, i)]
if max(embedding[1:]) < i:
embedding[random.randint(1, 3)] = i
for constraint in caution:
while embedding[1:].count(constraint) > level[constraint-1]:
embedding[embedding.index(constraint)] = -1
graph.append(embedding)
order = [i for i in range(size)]
random.shuffle(order)
graph = [graph[i] for i in order]
#graph.sort(key = lambda i: i[0]**2-i[1]**2+i[2]**2-i[3]**2)
self.data.append(torch.FloatTensor(graph))
#for i in range(size):
#graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
#self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 9,254 | 46.953368 | 224 | py |
RESPECT | RESPECT-main/problems/toposort/problem_toposort_singleTraining.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
from problems.toposort.state_toposort import StateTopoSort
from utils.beam_search import beam_search
#from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check
from utils import smart_sort
import networkx as nx
import numpy as np
class TopoSort(object):
NAME = 'toposort'
@staticmethod
def get_costs(dataset, pi, labels, measures=False, plot_data=False, graph_name=None):
#def get_costs(dataset, pi, measures=False, plot_data=False):
# Gather dataset in order of graph nodes
#d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
order_learned = smart_sort(labels, pi, dim=2)
#order_sorted, indices = torch.sort(labels, dim=1, descending=True)
order_sorted, indices = torch.sort(labels, dim=1)
order_learned = order_learned.cuda()
order_sorted = order_sorted.cuda()
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(order_learned, order_sorted).cuda()
if plot_data:
dataset_learning_sequence = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
label_learning_sequence = dataset_learning_sequence[:,:,-2].view(dataset_learning_sequence.shape[0], dataset_learning_sequence.shape[1])
dataset_labeling_sequence = dataset.gather(1, indices.unsqueeze(-1).expand_as(dataset))
#layers_order = d[:,:,0].view(d.shape[0], d.shape[1])
file = open(r"graph_data_collection/graph_data_collection_adaptive_training/" + graph_name, "a")
#file = open(r"graph_data_collection/graph_data_collection_validation/" + graph_name, "a")
#file = open(r"graph_data_collection/graph30_50_128k_w35_35_weightFree.txt", "a")
torch.set_printoptions(profile="full")
for i in range(dataset.shape[0]):
file.write("real sequence is:\n")
#file.writelines([str(sequence.cpu().numpy())+"\n" for sequence in dataset_labeling_sequence[i]])
file.write(str(dataset_labeling_sequence[i]) + "\n")
file.write("learning sequence is:\n")
#file.write(str(dataset_learning_sequence[i]) + "\n")
file.write(str(label_learning_sequence[i]) + "\n")
file.write("Nodes Level Distribution:\n")
file.write(str(order_sorted[i]) + "\n")
file.write("end\n")
file.close()
#print(layers_order[-1].cpu())
#for element in order_learned[-1].cpu():
# print(element)
#print("end of batch")
# generated index compared to optimal index; to be used for cost function below
#order_learned = d[:,:,0].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
#order_sorted, indices = torch.sort(order_learned, dim=1)
#New cost stragety to satisfy level sorting
#cost = level_sorting(sorted, y_)
#cost, indices = level_sorting_xy_pairs(indices, d)
#_, indices = level_sorting_xy_pairs(indices, d)
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
if mode == 1:
indices = deep_sort_x(indices, d)
#print(indices.shape, idx.shape)
# sorting cost is measured with Cosine Similarity
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
"""
#misMatch_y = torch.sub(y_, sorted)
#misMatch_x = torch.sub(indices, idx)
#cost = 0.2 * torch.count_nonzero(misMatch_x, dim=1) + 0.8 * torch.count_nonzero(misMatch_y, dim=1)
#order_learned = order_learned.cuda()
#order_sorted = order_sorted.cuda()
#cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
#cost = cos(order_learned, order_sorted).cuda()
if measures:
recall_elementWise = torch.cat([torch.sum(order_learned[:, (i+1):] > order_learned[:, i].view(order_learned.shape[0], -1), dim=1).view(order_learned.shape[0], -1) for i in range(order_learned.shape[1]-1)], dim=1)
full_recall_elementWise = torch.cat([torch.sum(order_sorted[:, (i+1):] > order_sorted[:, i].view(order_sorted.shape[0], -1), dim=1).view(order_sorted.shape[0], -1) for i in range(order_sorted.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise.cuda(), dim=1)
full_recall = torch.sum(full_recall_elementWise.cuda(), dim=1)
recall_accuracy = torch.div(recall.cuda(), full_recall.cuda())
recall_accuracy_mean, recall_accuracy_max, recall_accuracy_min = recall_accuracy.mean(), torch.max(recall_accuracy), torch.min(recall_accuracy)
#diff = torch.abs(torch.sub(order_learned, order_sorted))
misMatch = torch.FloatTensor([torch.nonzero(torch.sub(order_learned, order_sorted)).shape[0] / indices.shape[0]]).cuda()
radius_mean, radius_max = None, None
"""
graph_size = indices.shape[1]
full_recall = (graph_size-1) * graph_size / 2.
recall_elementWise = torch.cat([torch.sum(indices[:, (i+1):]>indices[:, i].view(indices.shape[0], -1), dim=1).view(indices.shape[0], -1) for i in range(indices.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise, dim=1)
recall_accuracy_max, recall_accuracy_min = torch.max(recall) / full_recall, torch.min(recall) / full_recall
recall_accuracy = torch.mul(recall, torch.FloatTensor([1.]).cuda()).mean() / full_recall
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#idx = torch.tensor([i for i in range(indices.shape[1])]).cuda()
radius_elementWise = torch.abs(indices - idx)
radius_mean = torch.mean(torch.mul(radius_elementWise, torch.FloatTensor([1.]).cuda()), 1).mean()
radius_max = torch.max(radius_elementWise)
"""
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None
#misMatch_y = torch.nonzero(misMatch_y).shape[0] / y_.shape[0]
#misMatch_x = torch.nonzero(misMatch_x).shape[0] / idx.shape[0]
#misMatch_y = torch.FloatTensor([torch.nonzero(torch.sub(sorted, y_)).shape[0] / y_.shape[0]]).cuda()
#misMatch_x = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / idx.shape[0]]).cuda()
#misMatch_y = None
#misMatch_x = None
#recall_accuracy, radius_mean, radius_max = None, None, None
else:
#misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None, None
misMatch, recall_accuracy_mean, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None
return 1-cost, None, misMatch, None, recall_accuracy_mean, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost.cuda(), None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
@staticmethod
def make_dataset(*args, **kwargs):
return TopoSortDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTopoSort.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
#state = TSP.make_state(
state = TopoSort.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
if seed > 0:
random.seed(seed)
for _ in range(num_samples):
graph = []
#levels = random.randint(2, 10)
#levels = random.randint(1, 25)
levels = random.randint(800, 1000)
level = [1 for _ in range(levels)]
remaining = size - levels
traverse = 0
while remaining > 0:
addition = random.randint(0, remaining)
level[traverse % levels] += addition
traverse += 1
remaining -= addition
caution = [i+1 for i, val in enumerate(level) if val < 3]
#num_level = size // levels
for i in range(levels):
for j in range(level[i]):
embedding = [i+1, random.randint(0, i), random.randint(0, i), random.randint(0, i)]
if max(embedding[1:]) < i:
embedding[random.randint(1, 3)] = i
for constraint in caution:
while embedding[1:].count(constraint) > level[constraint-1]:
embedding[embedding.index(constraint)] = -1
graph.append(embedding)
order = [i for i in range(size)]
random.shuffle(order)
graph = [graph[i] for i in order]
#graph.sort(key = lambda i: i[0]**2-i[1]**2+i[2]**2-i[3]**2)
self.data.append(torch.FloatTensor(graph))
#for i in range(size):
#graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
#self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 11,190 | 49.183857 | 224 | py |
RESPECT | RESPECT-main/problems/toposort/problem_toposort_newEmbedding.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
from problems.toposort.state_toposort import StateTopoSort
from utils.beam_search import beam_search
from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check, graph_sorting_DAG
from collections import defaultdict
import networkx as nx
import numpy as np
class TopoSort(object):
NAME = 'toposort'
@staticmethod
def get_costs(dataset, pi, labels, measures=False, plot_data=False):
# Gather dataset in order of graph nodes
d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
idx_learning = torch.stack([torch.stack([torch.nonzero(d[i]==2)[j][1] for j in range(d.shape[1])]) for i in range(d.shape[0])]).cuda()
"""
if plot_data:
print("learned dataset is: ")
for element in d[-1].cpu():
print(element)
print("end of batch")
"""
# generated index compared to optimal index; to be used for cost function below
#y_ = d[:,:,1].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
#sorted, indices = torch.sort(y_, dim=1)
#New cost stragety to satisfy level sorting
#cost = level_sorting(sorted, y_)
#cost, indices = level_sorting_xy_pairs(indices, d)
#_, indices = level_sorting_xy_pairs(indices, d)
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
if mode == 1:
indices = deep_sort_x(indices, d)
#print(indices.shape, idx.shape)
# sorting cost is measured with Cosine Similarity
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
"""
#indices = graph_sorting_DAG(d)
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#misMatch_y = torch.sub(y_, sorted)
#misMatch_x = torch.sub(indices, idx)
#cost = 0.2 * torch.count_nonzero(misMatch_x, dim=1) + 0.8 * torch.count_nonzero(misMatch_y, dim=1)
if measures:
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = order_check(idx, indices.cuda())
#graph_size = indices.shape[1]
graph_size = labels.shape[1]
full_recall = (graph_size-1) * graph_size / 2.
indices = torch.tensor([[torch.nonzero(labels[i]==j).item() for j in idx_learning[i]] for i in range(labels.shape[0])]).cuda()
recall_elementWise = torch.cat([torch.sum(indices[:, (i+1):]>indices[:, i].view(indices.shape[0], -1), dim=1).view(indices.shape[0], -1) for i in range(indices.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise, dim=1)
recall_accuracy_max, recall_accuracy_min = torch.max(recall) / full_recall, torch.min(recall) / full_recall
recall_accuracy = torch.mul(recall, torch.FloatTensor([1.]).cuda()).mean() / full_recall
idx = torch.tensor([i for i in range(indices.shape[1])]).cuda()
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#radius_elementWise = torch.abs(indices - idx)
radius_elementWise = torch.abs(indices - idx)
radius_mean = torch.mean(torch.mul(radius_elementWise, torch.FloatTensor([1.]).cuda()), 1).mean()
radius_max = torch.max(radius_elementWise)
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None
#misMatch_y = torch.nonzero(misMatch_y).shape[0] / y_.shape[0]
#misMatch_x = torch.nonzero(misMatch_x).shape[0] / idx.shape[0]
#misMatch_y = torch.FloatTensor([torch.nonzero(torch.sub(sorted, y_)).shape[0] / y_.shape[0]]).cuda()
#misMatch_x = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / idx.shape[0]]).cuda()
misMatch = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / indices.shape[0]]).cuda()
#misMatch_y = None
#misMatch_x = None
#recall_accuracy, radius_mean, radius_max = None, None, None
else:
#misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None, None
misMatch, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(labels, idx_learning).cuda()
#print(cost.shape)
#return 1-cost, None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
return 1-cost, None, misMatch, None, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost.cuda(), None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
@staticmethod
def make_dataset(*args, **kwargs):
return TopoSortDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTopoSort.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
#state = TSP.make_state(
state = TopoSort.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data = []
self.label = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
self.data = []
if seed > 0:
random.seed(seed)
order = [i for i in range(size)]
for _ in range(num_samples):
G = nx.gnp_random_graph(size, random.random())
D = None
while True:
if nx.is_connected(G):
G = self._in_degree_modification(G, size)
if nx.is_connected(G):
D = nx.DiGraph([(u, v) for u, v in G.edges()])
if nx.is_directed_acyclic_graph(D):
break
G = nx.gnp_random_graph(size, random.random())
random.shuffle(order)
mapping = {idx:item for idx, item in enumerate(order)}
DAG = nx.relabel.relabel_nodes(D, mapping)
graph = np.diag([2]*size)
for u, v in DAG.edges():
graph[u][v] = 1
graph[v][u] = -1
self.data.append(torch.FloatTensor(sorted(graph, key=lambda x : x[0]**2- x[-1]**3)))
self.label.append(torch.FloatTensor(list(nx.topological_sort(DAG))))
"""
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
for _ in range(num_samples):
#random.seed(seed)
#seed += 10
graph = []
#levels = random.randint(1, 20)
levels = random.randint(1, 25)
#levels = random.randint(50, 100)
num_level = size // levels
for level in range(levels-1):
y_axis = random.random()
for j in range(num_level):
graph.append([random.random(), y_axis])
y_axis = random.random()
for k in range(num_level+size%levels):
graph.append([random.random(), y_axis])
graph.sort(key = lambda i: i[0]**2-i[1]**2)
self.data.append(torch.FloatTensor(graph))
for i in range(size):
graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
"""
self.size = len(self.data)
def _in_degree_modification(self, G, size):
redundant_edges_end = []
node_in_degree = defaultdict(int)
node_out_degree = defaultdict(int)
edges = []
nodes_covering = set()
for u, v in G.edges():
if node_in_degree[v] == 2:
redundant_edges_end.append(u)
else:
node_in_degree[v] += 1
node_out_degree[u] += 1
edges.append((u, v))
nodes_covering.add(u)
nodes_covering.add(v)
for k in node_in_degree:
if node_in_degree[k] < 2:
in_degree_sup = False
if len(redundant_edges_end) > 0:
for ele in redundant_edges_end:
if (ele, k) not in edges:
edges.append((ele, k))
redundant_edges_end.remove(ele)
node_out_degree[ele] += 1
nodes_covering.add(ele)
in_degree_sup = True
break
if not in_degree_sup:
source = random.randint(0, k-1)
edges.append((source, k))
node_out_degree[source] += 1
node_in_degree[k] += 1
while len(redundant_edges_end) > 0:
node = redundant_edges_end.pop()
if node not in nodes_covering:
for head in nodes_covering:
if head not in node_in_degree or node_in_degree[head] < 2:
edges.append((node, head))
node_in_degree[head] += 1
node_out_degree[node] += 1
nodes_covering.add(node)
break
return nx.Graph(edges)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx], self.label[idx]
| 11,423 | 45.064516 | 192 | py |
RESPECT | RESPECT-main/problems/toposort/problem_toposort_multipleTraining_1.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
from problems.toposort.state_toposort import StateTopoSort
from utils.beam_search import beam_search
#from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check
from utils import smart_sort
import networkx as nx
import numpy as np
class TopoSort(object):
NAME = 'toposort'
@staticmethod
def get_costs(dataset, pi, labels, measures=False, plot_data=False, P=0):
#def get_costs(dataset, pi, measures=False, plot_data=False):
# Gather dataset in order of graph nodes
#d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
order_learned = smart_sort(labels, pi, dim=2)
order_sorted, indices = torch.sort(labels, dim=1)
order_learned = order_learned.cuda()
order_sorted = order_sorted.cuda()
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(order_learned, order_sorted).cuda()
if plot_data:
dataset_learning_sequence = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
label_learning_sequence = dataset_learning_sequence[:,:,-2].view(dataset_learning_sequence.shape[0], dataset_learning_sequence.shape[1])
dataset_labeling_sequence = dataset.gather(1, indices.unsqueeze(-1).expand_as(dataset))
#layers_order = d[:,:,0].view(d.shape[0], d.shape[1])
file = open(r"graph_data_collection/graph_data_collection_largeSize3/graph30_200_128k_w35_35_weightFree_" + str(P) + ".txt", "a")
torch.set_printoptions(profile="full")
for i in range(dataset.shape[0]):
file.write("real sequence is:\n")
#file.writelines([str(sequence.cpu().numpy())+"\n" for sequence in dataset_labeling_sequence[i]])
file.write(str(dataset_labeling_sequence[i]) + "\n")
file.write("learning sequence is:\n")
file.write(str(label_learning_sequence[i]) + "\n")
file.write("Nodes Level Distribution:\n")
file.write(str(order_sorted[i]) + "\n")
file.write("end\n")
file.close()
#print(layers_order[-1].cpu())
#for element in order_learned[-1].cpu():
# print(element)
#print("end of batch")
# generated index compared to optimal index; to be used for cost function below
#order_learned = d[:,:,0].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
#order_sorted, indices = torch.sort(order_learned, dim=1)
#New cost stragety to satisfy level sorting
#cost = level_sorting(sorted, y_)
#cost, indices = level_sorting_xy_pairs(indices, d)
#_, indices = level_sorting_xy_pairs(indices, d)
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
if mode == 1:
indices = deep_sort_x(indices, d)
#print(indices.shape, idx.shape)
# sorting cost is measured with Cosine Similarity
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
"""
#misMatch_y = torch.sub(y_, sorted)
#misMatch_x = torch.sub(indices, idx)
#cost = 0.2 * torch.count_nonzero(misMatch_x, dim=1) + 0.8 * torch.count_nonzero(misMatch_y, dim=1)
#order_learned = order_learned.cuda()
#order_sorted = order_sorted.cuda()
#cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
#cost = cos(order_learned, order_sorted).cuda()
if measures:
recall_elementWise = torch.cat([torch.sum(order_learned[:, (i+1):] > order_learned[:, i].view(order_learned.shape[0], -1), dim=1).view(order_learned.shape[0], -1) for i in range(order_learned.shape[1]-1)], dim=1)
full_recall_elementWise = torch.cat([torch.sum(order_sorted[:, (i+1):] > order_sorted[:, i].view(order_sorted.shape[0], -1), dim=1).view(order_sorted.shape[0], -1) for i in range(order_sorted.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise.cuda(), dim=1)
full_recall = torch.sum(full_recall_elementWise.cuda(), dim=1)
recall_accuracy = torch.div(recall.cuda(), full_recall.cuda())
recall_accuracy_mean, recall_accuracy_max, recall_accuracy_min = recall_accuracy.mean(), torch.max(recall_accuracy), torch.min(recall_accuracy)
#diff = torch.abs(torch.sub(order_learned, order_sorted))
misMatch = torch.FloatTensor([torch.nonzero(torch.sub(order_learned, order_sorted)).shape[0] / indices.shape[0]]).cuda()
radius_mean, radius_max = None, None
"""
graph_size = indices.shape[1]
full_recall = (graph_size-1) * graph_size / 2.
recall_elementWise = torch.cat([torch.sum(indices[:, (i+1):]>indices[:, i].view(indices.shape[0], -1), dim=1).view(indices.shape[0], -1) for i in range(indices.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise, dim=1)
recall_accuracy_max, recall_accuracy_min = torch.max(recall) / full_recall, torch.min(recall) / full_recall
recall_accuracy = torch.mul(recall, torch.FloatTensor([1.]).cuda()).mean() / full_recall
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#idx = torch.tensor([i for i in range(indices.shape[1])]).cuda()
radius_elementWise = torch.abs(indices - idx)
radius_mean = torch.mean(torch.mul(radius_elementWise, torch.FloatTensor([1.]).cuda()), 1).mean()
radius_max = torch.max(radius_elementWise)
"""
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None
#misMatch_y = torch.nonzero(misMatch_y).shape[0] / y_.shape[0]
#misMatch_x = torch.nonzero(misMatch_x).shape[0] / idx.shape[0]
#misMatch_y = torch.FloatTensor([torch.nonzero(torch.sub(sorted, y_)).shape[0] / y_.shape[0]]).cuda()
#misMatch_x = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / idx.shape[0]]).cuda()
#misMatch_y = None
#misMatch_x = None
#recall_accuracy, radius_mean, radius_max = None, None, None
else:
#misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None, None
misMatch, recall_accuracy_mean, recall_accuracy_max, recall_accuracy_min, radius_mean, radius_max = None, None, None, None, None, None
return 1-cost, None, misMatch, None, recall_accuracy_mean, recall_accuracy_max, recall_accuracy_min, radius_mean, radius_max
#return cost.cuda(), None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
@staticmethod
def make_dataset(*args, **kwargs):
return TopoSortDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTopoSort.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
#state = TSP.make_state(
state = TopoSort.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
if seed > 0:
random.seed(seed)
for _ in range(num_samples):
graph = []
#levels = random.randint(2, 10)
#levels = random.randint(1, 25)
levels = random.randint(800, 1000)
level = [1 for _ in range(levels)]
remaining = size - levels
traverse = 0
while remaining > 0:
addition = random.randint(0, remaining)
level[traverse % levels] += addition
traverse += 1
remaining -= addition
caution = [i+1 for i, val in enumerate(level) if val < 3]
#num_level = size // levels
for i in range(levels):
for j in range(level[i]):
embedding = [i+1, random.randint(0, i), random.randint(0, i), random.randint(0, i)]
if max(embedding[1:]) < i:
embedding[random.randint(1, 3)] = i
for constraint in caution:
while embedding[1:].count(constraint) > level[constraint-1]:
embedding[embedding.index(constraint)] = -1
graph.append(embedding)
order = [i for i in range(size)]
random.shuffle(order)
graph = [graph[i] for i in order]
#graph.sort(key = lambda i: i[0]**2-i[1]**2+i[2]**2-i[3]**2)
self.data.append(torch.FloatTensor(graph))
#for i in range(size):
#graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
#self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 10,873 | 47.328889 | 224 | py |
RESPECT | RESPECT-main/problems/toposort/problem_toposort.py | from torch.utils.data import Dataset
import torch, random
import os
import pickle
from problems.toposort.state_toposort import StateTopoSort
from utils.beam_search import beam_search
#from utils import orderCheck, deep_sort_x, level_sorting, level_sorting_xy_pairs, order_check
from utils import smart_sort
import networkx as nx
import numpy as np
class TopoSort(object):
NAME = 'toposort'
@staticmethod
def get_costs(dataset, pi, labels, measures=False, plot_data=False, graph_name=None):
#def get_costs(dataset, pi, measures=False, plot_data=False):
# Gather dataset in order of graph nodes
#d = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
#print('graph size is:', labels.size()[1])
order_learned = smart_sort(labels, pi, dim=2)
#order_sorted, indices = torch.sort(labels, dim=1, descending=True)
order_sorted, indices = torch.sort(labels, dim=1)
order_learned = order_learned.cuda()
order_sorted = order_sorted.cuda()
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(order_learned, order_sorted).cuda()
if plot_data:
dataset_learning_sequence = dataset.gather(1, pi.unsqueeze(-1).expand_as(dataset))
label_learning_sequence = dataset_learning_sequence[:,:,-2].view(dataset_learning_sequence.shape[0], dataset_learning_sequence.shape[1])
dataset_labeling_sequence = dataset.gather(1, indices.unsqueeze(-1).expand_as(dataset))
#layers_order = d[:,:,0].view(d.shape[0], d.shape[1])
file = open(r"edge_tpu_inference_results/" + graph_name, "a")
torch.set_printoptions(profile="full")
for i in range(dataset.shape[0]):
file.write("real sequence is:\n")
#file.writelines([str(sequence.cpu().numpy())+"\n" for sequence in dataset_labeling_sequence[i]])
file.write(str(dataset_labeling_sequence[i]) + "\n")
file.write("learning sequence is:\n")
#file.write(str(dataset_learning_sequence[i]) + "\n")
file.write(str(label_learning_sequence[i]) + "\n")
file.write("Nodes Level Distribution:\n")
file.write(str(order_sorted[i]) + "\n")
file.write("end\n")
file.close()
#print(layers_order[-1].cpu())
#for element in order_learned[-1].cpu():
# print(element)
#print("end of batch")
# generated index compared to optimal index; to be used for cost function below
#order_learned = d[:,:,0].view(d.shape[0],d.shape[1])
#print(d.shape, list(y_.shape), ((d[:, 1:] - d[:, :-1]).norm(p=2, dim=2).sum(1) + (d[:, 0] - d[:, -1]).norm(p=2, dim=1)).shape)
#order_sorted, indices = torch.sort(order_learned, dim=1)
#New cost stragety to satisfy level sorting
#cost = level_sorting(sorted, y_)
#cost, indices = level_sorting_xy_pairs(indices, d)
#_, indices = level_sorting_xy_pairs(indices, d)
"""
idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
if mode == 1:
indices = deep_sort_x(indices, d)
#print(indices.shape, idx.shape)
# sorting cost is measured with Cosine Similarity
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = cos(indices.cuda(),idx).cuda()
"""
#misMatch_y = torch.sub(y_, sorted)
#misMatch_x = torch.sub(indices, idx)
#cost = 0.2 * torch.count_nonzero(misMatch_x, dim=1) + 0.8 * torch.count_nonzero(misMatch_y, dim=1)
#order_learned = order_learned.cuda()
#order_sorted = order_sorted.cuda()
#cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
#cost = cos(order_learned, order_sorted).cuda()
if measures:
recall_elementWise = torch.cat([torch.sum(order_learned[:, (i+1):] > order_learned[:, i].view(order_learned.shape[0], -1), dim=1).view(order_learned.shape[0], -1) for i in range(order_learned.shape[1]-1)], dim=1)
full_recall_elementWise = torch.cat([torch.sum(order_sorted[:, (i+1):] > order_sorted[:, i].view(order_sorted.shape[0], -1), dim=1).view(order_sorted.shape[0], -1) for i in range(order_sorted.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise.cuda(), dim=1)
full_recall = torch.sum(full_recall_elementWise.cuda(), dim=1)
recall_accuracy = torch.div(recall.cuda(), full_recall.cuda())
recall_accuracy_mean, recall_accuracy_max, recall_accuracy_min = recall_accuracy.mean(), torch.max(recall_accuracy), torch.min(recall_accuracy)
#diff = torch.abs(torch.sub(order_learned, order_sorted))
misMatch = torch.FloatTensor([torch.nonzero(torch.sub(order_learned, order_sorted)).shape[0] / indices.shape[0]]).cuda()
radius_mean, radius_max = None, None
"""
graph_size = indices.shape[1]
full_recall = (graph_size-1) * graph_size / 2.
recall_elementWise = torch.cat([torch.sum(indices[:, (i+1):]>indices[:, i].view(indices.shape[0], -1), dim=1).view(indices.shape[0], -1) for i in range(indices.shape[1]-1)], dim=1)
recall = torch.sum(recall_elementWise, dim=1)
recall_accuracy_max, recall_accuracy_min = torch.max(recall) / full_recall, torch.min(recall) / full_recall
recall_accuracy = torch.mul(recall, torch.FloatTensor([1.]).cuda()).mean() / full_recall
#idx = torch.Tensor([i for i in range(int(list(y_.shape)[1]))]).cuda().repeat(list(y_.shape)[0]).view(d.shape[0],d.shape[1])
#idx = torch.tensor([i for i in range(indices.shape[1])]).cuda()
radius_elementWise = torch.abs(indices - idx)
radius_mean = torch.mean(torch.mul(radius_elementWise, torch.FloatTensor([1.]).cuda()), 1).mean()
radius_max = torch.max(radius_elementWise)
"""
#recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None
#misMatch_y = torch.nonzero(misMatch_y).shape[0] / y_.shape[0]
#misMatch_x = torch.nonzero(misMatch_x).shape[0] / idx.shape[0]
#misMatch_y = torch.FloatTensor([torch.nonzero(torch.sub(sorted, y_)).shape[0] / y_.shape[0]]).cuda()
#misMatch_x = torch.FloatTensor([torch.nonzero(torch.sub(indices, idx)).shape[0] / idx.shape[0]]).cuda()
#misMatch_y = None
#misMatch_x = None
#recall_accuracy, radius_mean, radius_max = None, None, None
else:
#misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None, None
misMatch, recall_accuracy_mean, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min = None, None, None, None, None, None
return 1-cost, None, misMatch, None, recall_accuracy_mean, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
#return cost.cuda(), None, misMatch_y, misMatch_x, recall_accuracy, radius_mean, radius_max, recall_accuracy_max, recall_accuracy_min
@staticmethod
def make_dataset(*args, **kwargs):
return TopoSortDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTopoSort.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
#state = TSP.make_state(
state = TopoSort.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
class TopoSortDataset(Dataset):
# 50, 1000000
def __init__(self, filename=None, size=25, num_samples=1000, offset=0, distribution=None, seed=0):
super(TopoSortDataset, self).__init__()
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [torch.FloatTensor(row) for row in (data[offset:offset+num_samples])]
else:
# Sample points randomly in [0, 1] square
#self.data = [torch.FloatTensor(size, 2).uniform_(0, 1) for i in range(num_samples)]
self.data = []
if seed > 0:
random.seed(seed)
for _ in range(num_samples):
graph = []
#levels = random.randint(2, 10)
#levels = random.randint(1, 25)
levels = random.randint(800, 1000)
level = [1 for _ in range(levels)]
remaining = size - levels
traverse = 0
while remaining > 0:
addition = random.randint(0, remaining)
level[traverse % levels] += addition
traverse += 1
remaining -= addition
caution = [i+1 for i, val in enumerate(level) if val < 3]
#num_level = size // levels
for i in range(levels):
for j in range(level[i]):
embedding = [i+1, random.randint(0, i), random.randint(0, i), random.randint(0, i)]
if max(embedding[1:]) < i:
embedding[random.randint(1, 3)] = i
for constraint in caution:
while embedding[1:].count(constraint) > level[constraint-1]:
embedding[embedding.index(constraint)] = -1
graph.append(embedding)
order = [i for i in range(size)]
random.shuffle(order)
graph = [graph[i] for i in order]
#graph.sort(key = lambda i: i[0]**2-i[1]**2+i[2]**2-i[3]**2)
self.data.append(torch.FloatTensor(graph))
#for i in range(size):
#graph.append([i,i+random.randint(1,10)])
#print(torch.FloatTensor(graph).shape)
#self.data.append(torch.nn.functional.normalize(torch.FloatTensor(graph)))
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 11,011 | 48.160714 | 224 | py |
RESPECT | RESPECT-main/problems/op/op_baseline.py | import argparse
import os
import numpy as np
from utils import run_all_in_pool
from utils.data_utils import check_extension, load_dataset, save_dataset
from subprocess import check_call, check_output
import tempfile
import time
from datetime import timedelta
from problems.op.opga.opevo import run_alg as run_opga_alg
from tqdm import tqdm
import re
MAX_LENGTH_TOL = 1e-5
# Run install_compass.sh to install
def solve_compass(executable, depot, loc, demand, capacity):
with tempfile.TemporaryDirectory() as tempdir:
problem_filename = os.path.join(tempdir, "problem.oplib")
output_filename = os.path.join(tempdir, "output.tour")
param_filename = os.path.join(tempdir, "params.par")
starttime = time.time()
write_oplib(problem_filename, depot, loc, demand, capacity)
params = {"PROBLEM_FILE": problem_filename, "OUTPUT_TOUR_FILE": output_filename}
write_compass_par(param_filename, params)
output = check_output([executable, param_filename])
result = read_oplib(output_filename, n=len(demand))
duration = time.time() - starttime
return result, output, duration
def solve_compass_log(executable, directory, name, depot, loc, prize, max_length, disable_cache=False):
problem_filename = os.path.join(directory, "{}.oplib".format(name))
tour_filename = os.path.join(directory, "{}.tour".format(name))
output_filename = os.path.join(directory, "{}.compass.pkl".format(name))
log_filename = os.path.join(directory, "{}.log".format(name))
try:
# May have already been run
if os.path.isfile(output_filename) and not disable_cache:
tour, duration = load_dataset(output_filename)
else:
write_oplib(problem_filename, depot, loc, prize, max_length, name=name)
with open(log_filename, 'w') as f:
start = time.time()
check_call([executable, '--op', '--op-ea4op', problem_filename, '-o', tour_filename],
stdout=f, stderr=f)
duration = time.time() - start
tour = read_oplib(tour_filename, n=len(prize))
if not calc_op_length(depot, loc, tour) <= max_length:
print("Warning: length exceeds max length:", calc_op_length(depot, loc, tour), max_length)
assert calc_op_length(depot, loc, tour) <= max_length + MAX_LENGTH_TOL, "Tour exceeds max_length!"
save_dataset((tour, duration), output_filename)
return -calc_op_total(prize, tour), tour, duration
except Exception as e:
print("Exception occured")
print(e)
return None
def calc_op_total(prize, tour):
# Subtract 1 since vals index start with 0 while tour indexing starts with 1 as depot is 0
assert (np.array(tour) > 0).all(), "Depot cannot be in tour"
assert len(np.unique(tour)) == len(tour), "Tour cannot contain duplicates"
return np.array(prize)[np.array(tour) - 1].sum()
def calc_op_length(depot, loc, tour):
assert len(np.unique(tour)) == len(tour), "Tour cannot contain duplicates"
loc_with_depot = np.vstack((np.array(depot)[None, :], np.array(loc)))
sorted_locs = loc_with_depot[np.concatenate(([0], tour, [0]))]
return np.linalg.norm(sorted_locs[1:] - sorted_locs[:-1], axis=-1).sum()
def write_compass_par(filename, parameters):
default_parameters = { # Use none to include as flag instead of kv
"SPECIAL": None,
"MAX_TRIALS": 10000,
"RUNS": 10,
"TRACE_LEVEL": 1,
"SEED": 0
}
with open(filename, 'w') as f:
for k, v in {**default_parameters, **parameters}.items():
if v is None:
f.write("{}\n".format(k))
else:
f.write("{} = {}\n".format(k, v))
def read_oplib(filename, n):
with open(filename, 'r') as f:
tour = []
dimension = 0
started = False
for line in f:
if started:
loc = int(line)
if loc == -1:
break
tour.append(loc)
if line.startswith("DIMENSION"):
dimension = int(line.split(" ")[-1])
if line.startswith("NODE_SEQUENCE_SECTION"):
started = True
assert len(tour) > 0, "Unexpected length"
tour = np.array(tour).astype(int) - 1 # Subtract 1 as depot is 1 and should be 0
assert tour[0] == 0 # Tour should start with depot
assert tour[-1] != 0 # Tour should not end with depot
return tour[1:].tolist()
def write_oplib(filename, depot, loc, prize, max_length, name="problem"):
with open(filename, 'w') as f:
f.write("\n".join([
"{} : {}".format(k, v)
for k, v in (
("NAME", name),
("TYPE", "OP"),
("DIMENSION", len(loc) + 1),
("COST_LIMIT", int(max_length * 10000000 + 0.5)),
("EDGE_WEIGHT_TYPE", "EUC_2D"),
)
]))
f.write("\n")
f.write("NODE_COORD_SECTION\n")
f.write("\n".join([
"{}\t{}\t{}".format(i + 1, int(x * 10000000 + 0.5), int(y * 10000000 + 0.5)) # oplib does not take floats
#"{}\t{}\t{}".format(i + 1, x, y)
for i, (x, y) in enumerate([depot] + loc)
]))
f.write("\n")
f.write("NODE_SCORE_SECTION\n")
f.write("\n".join([
"{}\t{}".format(i + 1, d)
for i, d in enumerate([0] + prize)
]))
f.write("\n")
f.write("DEPOT_SECTION\n")
f.write("1\n")
f.write("-1\n")
f.write("EOF\n")
def solve_opga(directory, name, depot, loc, prize, max_length, disable_cache=False):
problem_filename = os.path.join(directory, "{}.opga.pkl".format(name))
if os.path.isfile(problem_filename) and not disable_cache:
(prize, tour, duration) = load_dataset(problem_filename)
else:
# 0 = start, 1 = end so add depot twice
start = time.time()
prize, tour, duration = run_opga_alg(
[(*pos, p) for p, pos in zip([0, 0] + prize, [depot, depot] + loc)],
max_length, return_sol=True, verbose=False
)
duration = time.time() - start # Measure clock time
save_dataset((prize, tour, duration), problem_filename)
# First and last node are depot(s), so first node is 2 but should be 1 (as depot is 0) so subtract 1
assert tour[0][3] == 0
assert tour[-1][3] == 1
return -prize, [i - 1 for x, y, p, i, t in tour[1:-1]], duration
def solve_gurobi(directory, name, depot, loc, prize, max_length, disable_cache=False, timeout=None, gap=None):
# Lazy import so we do not need to have gurobi installed to run this script
from problems.op.op_gurobi import solve_euclidian_op as solve_euclidian_op_gurobi
try:
problem_filename = os.path.join(directory, "{}.gurobi{}{}.pkl".format(
name, "" if timeout is None else "t{}".format(timeout), "" if gap is None else "gap{}".format(gap)))
if os.path.isfile(problem_filename) and not disable_cache:
(cost, tour, duration) = load_dataset(problem_filename)
else:
# 0 = start, 1 = end so add depot twice
start = time.time()
cost, tour = solve_euclidian_op_gurobi(
depot, loc, prize, max_length, threads=1, timeout=timeout, gap=gap
)
duration = time.time() - start # Measure clock time
save_dataset((cost, tour, duration), problem_filename)
# First and last node are depot(s), so first node is 2 but should be 1 (as depot is 0) so subtract 1
assert tour[0] == 0
tour = tour[1:]
assert calc_op_length(depot, loc, tour) <= max_length + MAX_LENGTH_TOL, "Tour exceeds max_length!"
total_cost = -calc_op_total(prize, tour)
assert abs(total_cost - cost) <= 1e-4, "Cost is incorrect"
return total_cost, tour, duration
except Exception as e:
# For some stupid reason, sometimes OR tools cannot find a feasible solution?
# By letting it fail we do not get total results, but we dcan retry by the caching mechanism
print("Exception occured")
print(e)
return None
def solve_ortools(directory, name, depot, loc, prize, max_length, sec_local_search=0, disable_cache=False):
# Lazy import so we do not require ortools by default
from problems.op.op_ortools import solve_op_ortools
try:
problem_filename = os.path.join(directory, "{}.ortools{}.pkl".format(name, sec_local_search))
if os.path.isfile(problem_filename) and not disable_cache:
objval, tour, duration = load_dataset(problem_filename)
else:
# 0 = start, 1 = end so add depot twice
start = time.time()
objval, tour = solve_op_ortools(depot, loc, prize, max_length, sec_local_search=sec_local_search)
duration = time.time() - start
save_dataset((objval, tour, duration), problem_filename)
assert tour[0] == 0, "Tour must start with depot"
tour = tour[1:]
assert calc_op_length(depot, loc, tour) <= max_length + MAX_LENGTH_TOL, "Tour exceeds max_length!"
assert abs(-calc_op_total(prize, tour) - objval) <= 1e-5, "Cost is incorrect"
return -calc_op_total(prize, tour), tour, duration
except Exception as e:
# For some stupid reason, sometimes OR tools cannot find a feasible solution?
# By letting it fail we do not get total results, but we dcan retry by the caching mechanism
print("Exception occured")
print(e)
return None
def run_all_tsiligirides(
dataset_path, sample, num_samples, eval_batch_size, max_calc_batch_size, no_cuda=False, dataset_n=None,
progress_bar_mininterval=0.1, seed=1234):
import torch
from torch.utils.data import DataLoader
from utils import move_to, sample_many
from problems.op.tsiligirides import op_tsiligirides
from problems.op.problem_op import OP
torch.manual_seed(seed)
dataloader = DataLoader(
OP.make_dataset(filename=dataset_path, num_samples=dataset_n if dataset_n is not None else 1000000),
batch_size=eval_batch_size
)
device = torch.device("cuda:0" if torch.cuda.is_available() and not no_cuda else "cpu")
results = []
for batch in tqdm(dataloader, mininterval=progress_bar_mininterval):
start = time.time()
batch = move_to(batch, device)
with torch.no_grad():
if num_samples * eval_batch_size > max_calc_batch_size:
assert eval_batch_size == 1
assert num_samples % max_calc_batch_size == 0
batch_rep = max_calc_batch_size
iter_rep = num_samples // max_calc_batch_size
else:
batch_rep = num_samples
iter_rep = 1
sequences, costs = sample_many(
lambda inp: (None, op_tsiligirides(inp, sample)),
OP.get_costs,
batch, batch_rep=batch_rep, iter_rep=iter_rep)
duration = time.time() - start
results.extend(
[(cost.item(), np.trim_zeros(pi.cpu().numpy(),'b'), duration) for cost, pi in zip(costs, sequences)])
return results, eval_batch_size
if __name__ == "__main__":
executable = os.path.abspath(os.path.join('problems', 'op', 'compass', 'compass'))
parser = argparse.ArgumentParser()
parser.add_argument("method", help="Name of the method to evaluate, 'compass', 'opga' or 'tsili'")
parser.add_argument("datasets", nargs='+', help="Filename of the dataset(s) to evaluate")
parser.add_argument("-f", action='store_true', help="Set true to overwrite")
parser.add_argument("-o", default=None, help="Name of the results file to write")
parser.add_argument("--cpus", type=int, help="Number of CPUs to use, defaults to all cores")
parser.add_argument('--no_cuda', action='store_true', help='Disable CUDA (only for Tsiligirides)')
parser.add_argument('--disable_cache', action='store_true', help='Disable caching')
parser.add_argument('--max_calc_batch_size', type=int, default=1000, help='Size for subbatches')
parser.add_argument('--progress_bar_mininterval', type=float, default=0.1, help='Minimum interval')
parser.add_argument('-n', type=int, help="Number of instances to process")
parser.add_argument('--offset', type=int, help="Offset where to start processing")
parser.add_argument('--results_dir', default='results', help="Name of results directory")
opts = parser.parse_args()
assert opts.o is None or len(opts.datasets) == 1, "Cannot specify result filename with more than one dataset"
for dataset_path in opts.datasets:
assert os.path.isfile(check_extension(dataset_path)), "File does not exist!"
dataset_basename, ext = os.path.splitext(os.path.split(dataset_path)[-1])
if opts.o is None:
results_dir = os.path.join(opts.results_dir, "op", dataset_basename)
os.makedirs(results_dir, exist_ok=True)
out_file = os.path.join(results_dir, "{}{}{}-{}{}".format(
dataset_basename,
"offs{}".format(opts.offset) if opts.offset is not None else "",
"n{}".format(opts.n) if opts.n is not None else "",
opts.method, ext
))
else:
out_file = opts.o
assert opts.f or not os.path.isfile(
out_file), "File already exists! Try running with -f option to overwrite."
match = re.match(r'^([a-z]+)(\d*)$', opts.method)
assert match
method = match[1]
runs = 1 if match[2] == '' else int(match[2])
if method == "tsili" or method == "tsiligreedy":
assert opts.offset is None, "Offset not supported for Tsiligirides"
if method == "tsiligreedy":
sample = False
num_samples = 1
else:
sample = True
num_samples = runs
eval_batch_size = max(1, opts.max_calc_batch_size // num_samples)
results, parallelism = run_all_tsiligirides(
dataset_path, sample, num_samples, eval_batch_size, opts.max_calc_batch_size, opts.no_cuda, opts.n,
opts.progress_bar_mininterval
)
elif method in ("compass", "opga", "gurobi", "gurobigap", "gurobit", "ortools"):
target_dir = os.path.join(results_dir, "{}-{}".format(
dataset_basename,
opts.method
))
assert opts.f or not os.path.isdir(target_dir), \
"Target dir already exists! Try running with -f option to overwrite."
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
dataset = load_dataset(dataset_path)
if method[:6] == "gurobi":
use_multiprocessing = True # We run one thread per instance
def run_func(args):
return solve_gurobi(*args, disable_cache=opts.disable_cache,
timeout=runs if method[6:] == "t" else None,
gap=float(runs) if method[6:] == "gap" else None)
elif method == "compass":
use_multiprocessing = False
def run_func(args):
return solve_compass_log(executable, *args, disable_cache=opts.disable_cache)
elif method == "opga":
use_multiprocessing = True
def run_func(args):
return solve_opga(*args, disable_cache=opts.disable_cache)
else:
assert method == "ortools"
use_multiprocessing = True
def run_func(args):
return solve_ortools(*args, sec_local_search=runs, disable_cache=opts.disable_cache)
results, parallelism = run_all_in_pool(
run_func,
target_dir, dataset, opts, use_multiprocessing=use_multiprocessing
)
else:
assert False, "Unknown method: {}".format(opts.method)
costs, tours, durations = zip(*results) # Not really costs since they should be negative
print("Average cost: {} +- {}".format(np.mean(costs), 2 * np.std(costs) / np.sqrt(len(costs))))
print("Average serial duration: {} +- {}".format(
np.mean(durations), 2 * np.std(durations) / np.sqrt(len(durations))))
print("Average parallel duration: {}".format(np.mean(durations) / parallelism))
print("Calculated total duration: {}".format(timedelta(seconds=int(np.sum(durations) / parallelism))))
save_dataset((results, parallelism), out_file)
| 16,891 | 41.764557 | 118 | py |
RESPECT | RESPECT-main/problems/op/problem_op.py | from torch.utils.data import Dataset
import torch
import os
import pickle
from problems.op.state_op import StateOP
from utils.beam_search import beam_search
class OP(object):
NAME = 'op' # Orienteering problem
@staticmethod
def get_costs(dataset, pi):
if pi.size(-1) == 1: # In case all tours directly return to depot, prevent further problems
assert (pi == 0).all(), "If all length 1 tours, they should be zero"
# Return
return torch.zeros(pi.size(0), dtype=torch.float, device=pi.device), None
# Check that tours are valid, i.e. contain 0 to n -1
sorted_pi = pi.data.sort(1)[0]
# Make sure each node visited once at most (except for depot)
assert ((sorted_pi[:, 1:] == 0) | (sorted_pi[:, 1:] > sorted_pi[:, :-1])).all(), "Duplicates"
prize_with_depot = torch.cat(
(
torch.zeros_like(dataset['prize'][:, :1]),
dataset['prize']
),
1
)
p = prize_with_depot.gather(1, pi)
# Gather dataset in order of tour
loc_with_depot = torch.cat((dataset['depot'][:, None, :], dataset['loc']), 1)
d = loc_with_depot.gather(1, pi[..., None].expand(*pi.size(), loc_with_depot.size(-1)))
length = (
(d[:, 1:] - d[:, :-1]).norm(p=2, dim=-1).sum(1) # Prevent error if len 1 seq
+ (d[:, 0] - dataset['depot']).norm(p=2, dim=-1) # Depot to first
+ (d[:, -1] - dataset['depot']).norm(p=2, dim=-1) # Last to depot, will be 0 if depot is last
)
assert (length <= dataset['max_length'] + 1e-5).all(), \
"Max length exceeded by {}".format((length - dataset['max_length']).max())
# We want to maximize total prize but code minimizes so return negative
return -p.sum(-1), None
@staticmethod
def make_dataset(*args, **kwargs):
return OPDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateOP.initialize(*args, **kwargs)
@staticmethod
def beam_search(input, beam_size, expand_size=None,
compress_mask=False, model=None, max_calc_batch_size=4096):
assert model is not None, "Provide model"
fixed = model.precompute_fixed(input)
def propose_expansions(beam):
return model.propose_expansions(
beam, fixed, expand_size, normalize=True, max_calc_batch_size=max_calc_batch_size
)
state = OP.make_state(
#input, visited_dtype=torch.int64 if compress_mask else torch.uint8
input, visited_dtype=torch.int64 if compress_mask else torch.bool
)
return beam_search(state, beam_size, propose_expansions)
def generate_instance(size, prize_type):
# Details see paper
MAX_LENGTHS = {
20: 2.,
50: 3.,
100: 4.
}
loc = torch.FloatTensor(size, 2).uniform_(0, 1)
depot = torch.FloatTensor(2).uniform_(0, 1)
# Methods taken from Fischetti et al. 1998
if prize_type == 'const':
prize = torch.ones(size)
elif prize_type == 'unif':
prize = (1 + torch.randint(0, 100, size=(size, ))) / 100.
else: # Based on distance to depot
assert prize_type == 'dist'
prize_ = (depot[None, :] - loc).norm(p=2, dim=-1)
prize = (1 + (prize_ / prize_.max(dim=-1, keepdim=True)[0] * 99).int()).float() / 100.
return {
'loc': loc,
# Uniform 1 - 9, scaled by capacities
'prize': prize,
'depot': depot,
'max_length': torch.tensor(MAX_LENGTHS[size])
}
class OPDataset(Dataset):
def __init__(self, filename=None, size=50, num_samples=1000000, offset=0, distribution='const'):
super(OPDataset, self).__init__()
assert distribution is not None, "Data distribution must be specified for OP"
# Currently the distribution can only vary in the type of the prize
prize_type = distribution
self.data_set = []
if filename is not None:
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [
{
'loc': torch.FloatTensor(loc),
'prize': torch.FloatTensor(prize),
'depot': torch.FloatTensor(depot),
'max_length': torch.tensor(max_length)
}
for depot, loc, prize, max_length in (data[offset:offset+num_samples])
]
else:
self.data = [
generate_instance(size, prize_type)
for i in range(num_samples)
]
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| 4,934 | 33.51049 | 106 | py |
RESPECT | RESPECT-main/problems/op/tsiligirides.py | import torch
from problems.op.state_op import StateOP
def op_tsiligirides(batch, sample=False, power=4.0):
state = StateOP.initialize(batch)
all_a = []
while not state.all_finished():
# Compute scores
mask = state.get_mask()
p = (
(mask[..., 1:] == 0).float() *
state.prize[state.ids, 1:] /
((state.coords[state.ids, 1:, :] - state.cur_coord[:, :, None, :]).norm(p=2, dim=-1) + 1e-6)
) ** power
bestp, besta = p.topk(4, dim=-1)
bestmask = mask[..., 1:].gather(-1, besta)
# If no feasible actions, must go to depot
# mask == 0 means feasible, so if mask == 0 sums to 0 there are no feasible and
# all corresponding ps should be 0, so we need to add a column with a 1 that corresponds
# to selecting the end destination
to_depot = ((bestmask == 0).sum(-1, keepdim=True) == 0).float()
# best_p should be zero if we have to go to depot, but because of numeric stabilities, it isn't
p_ = torch.cat((to_depot, bestp), -1)
pnorm = p_ / p_.sum(-1, keepdim=True)
if sample:
a = pnorm[:, 0, :].multinomial(1) # Sample action
else:
# greedy
a = pnorm[:, 0, :].max(-1)[1].unsqueeze(-1) # Add 'sampling dimension'
# a == 0 means depot, otherwise subtract one
final_a = torch.cat((torch.zeros_like(besta[..., 0:1]), besta + 1), -1)[:, 0, :].gather(-1, a)
selected = final_a[..., 0] # Squeeze unnecessary sampling dimension
state = state.update(selected)
all_a.append(selected)
return torch.stack(all_a, -1)
| 1,672 | 37.906977 | 108 | py |
RESPECT | RESPECT-main/problems/op/state_op.py | import torch
from typing import NamedTuple
from utils.boolmask import mask_long2bool, mask_long_scatter
import torch.nn.functional as F
bypass = super
class StateOP(NamedTuple):
# Fixed input
coords: torch.Tensor # Depot + loc
prize: torch.Tensor
# Max length is not a single value, but one for each node indicating max length tour should have when arriving
# at this node, so this is max_length - d(depot, node)
max_length: torch.Tensor
# If this state contains multiple copies (i.e. beam search) for the same instance, then for memory efficiency
# the coords and prizes tensors are not kept multiple times, so we need to use the ids to index the correct rows.
ids: torch.Tensor # Keeps track of original fixed data index of rows
# State
prev_a: torch.Tensor
visited_: torch.Tensor # Keeps track of nodes that have been visited
lengths: torch.Tensor
cur_coord: torch.Tensor
cur_total_prize: torch.Tensor
i: torch.Tensor # Keeps track of step
@property
def visited(self):
#if self.visited_.dtype == torch.uint8:
if self.visited_.dtype == torch.bool:
return self.visited_
else:
return mask_long2bool(self.visited_, n=self.coords.size(-2))
@property
def dist(self):
return (self.coords[:, :, None, :] - self.coords[:, None, :, :]).norm(p=2, dim=-1)
def __getitem__(self, key):
if torch.is_tensor(key) or isinstance(key, slice): # If tensor, idx all tensors by this tensor:
return self._replace(
ids=self.ids[key],
prev_a=self.prev_a[key],
visited_=self.visited_[key],
lengths=self.lengths[key],
cur_coord=self.cur_coord[key],
cur_total_prize=self.cur_total_prize[key],
)
#return super(StateOP, self).__getitem__(key)
return bypass(StateOP, self).__getitem__(key)
# Warning: cannot override len of NamedTuple, len should be number of fields, not batch size
# def __len__(self):
# return len(self.used_capacity)
@staticmethod
#def initialize(input, visited_dtype=torch.uint8):
def initialize(input, visited_dtype=torch.bool):
depot = input['depot']
loc = input['loc']
prize = input['prize']
max_length = input['max_length']
batch_size, n_loc, _ = loc.size()
coords = torch.cat((depot[:, None, :], loc), -2)
return StateOP(
coords=coords,
prize=F.pad(prize, (1, 0), mode='constant', value=0), # add 0 for depot
# max_length is max length allowed when arriving at node, so subtract distance to return to depot
# Additionally, substract epsilon margin for numeric stability
max_length=max_length[:, None] - (depot[:, None, :] - coords).norm(p=2, dim=-1) - 1e-6,
ids=torch.arange(batch_size, dtype=torch.int64, device=loc.device)[:, None], # Add steps dimension
prev_a=torch.zeros(batch_size, 1, dtype=torch.long, device=loc.device),
visited_=( # Visited as mask is easier to understand, as long more memory efficient
# Keep visited_ with depot so we can scatter efficiently (if there is an action for depot)
torch.zeros(
batch_size, 1, n_loc + 1,
#dtype=torch.uint8, device=loc.device
dtype=torch.bool, device=loc.device
)
#if visited_dtype == torch.uint8
if visited_dtype == torch.bool
else torch.zeros(batch_size, 1, (n_loc + 1 + 63) // 64, dtype=torch.int64, device=loc.device) # Ceil
),
lengths=torch.zeros(batch_size, 1, device=loc.device),
cur_coord=input['depot'][:, None, :], # Add step dimension
cur_total_prize=torch.zeros(batch_size, 1, device=loc.device),
i=torch.zeros(1, dtype=torch.int64, device=loc.device) # Vector with length num_steps
)
def get_remaining_length(self):
# max_length[:, 0] is max length arriving at depot so original max_length
return self.max_length[self.ids, 0] - self.lengths
def get_final_cost(self):
assert self.all_finished()
# The cost is the negative of the collected prize since we want to maximize collected prize
return -self.cur_total_prize
def update(self, selected):
assert self.i.size(0) == 1, "Can only update if state represents single step"
# Update the state
selected = selected[:, None] # Add dimension for step
prev_a = selected
# Add the length
cur_coord = self.coords[self.ids, selected]
lengths = self.lengths + (cur_coord - self.cur_coord).norm(p=2, dim=-1) # (batch_dim, 1)
# Add the collected prize
cur_total_prize = self.cur_total_prize + self.prize[self.ids, selected]
#if self.visited_.dtype == torch.uint8:
if self.visited_.dtype == torch.bool:
# Note: here we do not subtract one as we have to scatter so the first column allows scattering depot
# Add one dimension since we write a single value
visited_ = self.visited_.scatter(-1, prev_a[:, :, None], 1)
else:
# This works, by check_unset=False it is allowed to set the depot visited a second a time
visited_ = mask_long_scatter(self.visited_, prev_a, check_unset=False)
return self._replace(
prev_a=prev_a, visited_=visited_,
lengths=lengths, cur_coord=cur_coord, cur_total_prize=cur_total_prize, i=self.i + 1
)
def all_finished(self):
# All must be returned to depot (and at least 1 step since at start also prev_a == 0)
# This is more efficient than checking the mask
return self.i.item() > 0 and (self.prev_a == 0).all()
# return self.visited[:, :, 0].all() # If we have visited the depot we're done
def get_current_node(self):
"""
Returns the current node where 0 is depot, 1...n are nodes
:return: (batch_size, num_steps) tensor with current nodes
"""
return self.prev_a
def get_mask(self):
"""
Gets a (batch_size, n_loc + 1) mask with the feasible actions (0 = depot), depends on already visited and
remaining capacity. 0 = feasible, 1 = infeasible
Forbids to visit depot twice in a row, unless all nodes have been visited
:return:
"""
exceeds_length = (
self.lengths[:, :, None] + (self.coords[self.ids, :, :] - self.cur_coord[:, :, None, :]).norm(p=2, dim=-1)
> self.max_length[self.ids, :]
)
# Note: this always allows going to the depot, but that should always be suboptimal so be ok
# Cannot visit if already visited or if length that would be upon arrival is too large to return to depot
# If the depot has already been visited then we cannot visit anymore
visited_ = self.visited.to(exceeds_length.dtype)
mask = visited_ | visited_[:, :, 0:1] | exceeds_length
# Depot can always be visited
# (so we do not hardcode knowledge that this is strictly suboptimal if other options are available)
mask[:, :, 0] = 0
return mask
def construct_solutions(self, actions):
return actions
| 7,431 | 43.238095 | 118 | py |
RESPECT | RESPECT-main/utils/tensor_functions.py | import torch
def compute_in_batches(f, calc_batch_size, *args, n=None):
"""
Computes memory heavy function f(*args) in batches
:param n: the total number of elements, optional if it cannot be determined as args[0].size(0)
:param f: The function that is computed, should take only tensors as arguments and return tensor or tuple of tensors
:param calc_batch_size: The batch size to use when computing this function
:param args: Tensor arguments with equally sized first batch dimension
:return: f(*args), this should be one or multiple tensors with equally sized first batch dimension
"""
if n is None:
n = args[0].size(0)
n_batches = (n + calc_batch_size - 1) // calc_batch_size # ceil
if n_batches == 1:
return f(*args)
# Run all batches
# all_res = [f(*batch_args) for batch_args in zip(*[torch.chunk(arg, n_batches) for arg in args])]
# We do not use torch.chunk such that it also works for other classes that support slicing
all_res = [f(*(arg[i * calc_batch_size:(i + 1) * calc_batch_size] for arg in args)) for i in range(n_batches)]
# Allow for functions that return None
def safe_cat(chunks, dim=0):
if chunks[0] is None:
assert all(chunk is None for chunk in chunks)
return None
return torch.cat(chunks, dim)
# Depending on whether the function returned a tuple we need to concatenate each element or only the result
if isinstance(all_res[0], tuple):
return tuple(safe_cat(res_chunks, 0) for res_chunks in zip(*all_res))
return safe_cat(all_res, 0)
| 1,608 | 44.971429 | 120 | py |
RESPECT | RESPECT-main/utils/monkey_patch.py | import torch
from itertools import chain
from collections import defaultdict, Iterable
from copy import deepcopy
def load_state_dict(self, state_dict):
"""Loads the optimizer state.
Arguments:
state_dict (dict): optimizer state. Should be an object returned
from a call to :meth:`state_dict`.
"""
# deepcopy, to be consistent with module API
state_dict = deepcopy(state_dict)
# Validate the state_dict
groups = self.param_groups
saved_groups = state_dict['param_groups']
if len(groups) != len(saved_groups):
raise ValueError("loaded state dict has a different number of "
"parameter groups")
param_lens = (len(g['params']) for g in groups)
saved_lens = (len(g['params']) for g in saved_groups)
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
raise ValueError("loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group")
# Update the state
id_map = {old_id: p for old_id, p in
zip(chain(*(g['params'] for g in saved_groups)),
chain(*(g['params'] for g in groups)))}
def cast(param, value):
"""Make a deep copy of value, casting all tensors to device of param."""
if torch.is_tensor(value):
# Floating-point types are a bit special here. They are the only ones
# that are assumed to always match the type of params.
if any(tp in type(param.data).__name__ for tp in {'Half', 'Float', 'Double'}):
value = value.type_as(param.data)
value = value.to(param.device)
return value
elif isinstance(value, dict):
return {k: cast(param, v) for k, v in value.items()}
elif isinstance(value, Iterable):
return type(value)(cast(param, v) for v in value)
else:
return value
# Copy state assigned to params (and cast tensors to appropriate types).
# State that is not assigned to params is copied as is (needed for
# backward compatibility).
state = defaultdict(dict)
for k, v in state_dict['state'].items():
if k in id_map:
param = id_map[k]
state[param] = cast(param, v)
else:
state[k] = v
# Update parameter groups, setting their 'params' value
def update_group(group, new_group):
new_group['params'] = group['params']
return new_group
param_groups = [
update_group(g, ng) for g, ng in zip(groups, saved_groups)]
self.__setstate__({'state': state, 'param_groups': param_groups})
torch.optim.Optimizer.load_state_dict = load_state_dict | 2,734 | 38.071429 | 90 | py |
RESPECT | RESPECT-main/utils/functions.py | import warnings
import torch
import numpy as np
import os
import json
from tqdm import tqdm
from multiprocessing.dummy import Pool as ThreadPool
from multiprocessing import Pool
import torch.nn.functional as F
import networkx as nx
import random
def load_problem(name):
from problems import TSP, CVRP, SDVRP, OP, PCTSPDet, PCTSPStoch, TopoSort
problem = {
'toposort': TopoSort,
'tsp': TSP,
'cvrp': CVRP,
'sdvrp': SDVRP,
'op': OP,
'pctsp_det': PCTSPDet,
'pctsp_stoch': PCTSPStoch,
}.get(name, None)
assert problem is not None, "Currently unsupported problem: {}!".format(name)
return problem
def torch_load_cpu(load_path):
return torch.load(load_path, map_location=lambda storage, loc: storage) # Load on CPU
def move_to(var, device):
if isinstance(var, dict):
return {k: move_to(v, device) for k, v in var.items()}
return var.to(device)
def _load_model_file(load_path, model):
"""Loads the model with parameters from the file and returns optimizer state dict if it is in the file"""
# Load the model parameters from a saved state
load_optimizer_state_dict = None
print(' [*] Loading model from {}'.format(load_path))
load_data = torch.load(
os.path.join(
os.getcwd(),
load_path
), map_location=lambda storage, loc: storage)
if isinstance(load_data, dict):
load_optimizer_state_dict = load_data.get('optimizer', None)
load_model_state_dict = load_data.get('model', load_data)
else:
load_model_state_dict = load_data.state_dict()
state_dict = model.state_dict()
state_dict.update(load_model_state_dict)
model.load_state_dict(state_dict)
return model, load_optimizer_state_dict
def load_args(filename):
with open(filename, 'r') as f:
args = json.load(f)
# Backwards compatibility
if 'data_distribution' not in args:
args['data_distribution'] = None
probl, *dist = args['problem'].split("_")
if probl == "op":
args['problem'] = probl
args['data_distribution'] = dist[0]
return args
def load_model(path, epoch=None):
from nets.attention_model import AttentionModel
from nets.pointer_network import PointerNetwork
if os.path.isfile(path):
model_filename = path
path = os.path.dirname(model_filename)
elif os.path.isdir(path):
if epoch is None:
epoch = max(
int(os.path.splitext(filename)[0].split("-")[1])
for filename in os.listdir(path)
if os.path.splitext(filename)[1] == '.pt'
)
model_filename = os.path.join(path, 'epoch-{}.pt'.format(epoch))
else:
assert False, "{} is not a valid directory or file".format(path)
args = load_args(os.path.join(path, 'args.json'))
problem = load_problem(args['problem'])
model_class = {
'attention': AttentionModel,
'pointer': PointerNetwork
}.get(args.get('model', 'attention'), None)
assert model_class is not None, "Unknown model: {}".format(model_class)
model = model_class(
args['embedding_dim'],
args['hidden_dim'],
problem,
n_encode_layers=args['n_encode_layers'],
mask_inner=True,
mask_logits=True,
normalization=args['normalization'],
tanh_clipping=args['tanh_clipping'],
checkpoint_encoder=args.get('checkpoint_encoder', False),
shrink_size=args.get('shrink_size', None)
)
# Overwrite model parameters by parameters to load
load_data = torch_load_cpu(model_filename)
model.load_state_dict({**model.state_dict(), **load_data.get('model', {})})
model, *_ = _load_model_file(model_filename, model)
model.eval() # Put in eval mode
return model, args
def parse_softmax_temperature(raw_temp):
# Load from file
if os.path.isfile(raw_temp):
return np.loadtxt(raw_temp)[-1, 0]
return float(raw_temp)
def run_all_in_pool(func, directory, dataset, opts, use_multiprocessing=True):
# # Test
# res = func((directory, 'test', *dataset[0]))
# return [res]
num_cpus = os.cpu_count() if opts.cpus is None else opts.cpus
w = len(str(len(dataset) - 1))
offset = getattr(opts, 'offset', None)
if offset is None:
offset = 0
ds = dataset[offset:(offset + opts.n if opts.n is not None else len(dataset))]
pool_cls = (Pool if use_multiprocessing and num_cpus > 1 else ThreadPool)
with pool_cls(num_cpus) as pool:
results = list(tqdm(pool.imap(
func,
[
(
directory,
str(i + offset).zfill(w),
*problem
)
for i, problem in enumerate(ds)
]
), total=len(ds), mininterval=opts.progress_bar_mininterval))
failed = [str(i + offset) for i, res in enumerate(results) if res is None]
assert len(failed) == 0, "Some instances failed: {}".format(" ".join(failed))
return results, num_cpus
def do_batch_rep(v, n):
if isinstance(v, dict):
return {k: do_batch_rep(v_, n) for k, v_ in v.items()}
elif isinstance(v, list):
return [do_batch_rep(v_, n) for v_ in v]
elif isinstance(v, tuple):
return tuple(do_batch_rep(v_, n) for v_ in v)
return v[None, ...].expand(n, *v.size()).contiguous().view(-1, *v.size()[1:])
def sample_many(inner_func, get_cost_func, input, batch_rep=1, iter_rep=1):
"""
:param input: (batch_size, graph_size, node_dim) input node features
:return:
"""
input = do_batch_rep(input, batch_rep)
costs = []
pis = []
for i in range(iter_rep):
_log_p, pi = inner_func(input)
# pi.view(-1, batch_rep, pi.size(-1))
cost, mask = get_cost_func(input, pi)
costs.append(cost.view(batch_rep, -1).t())
pis.append(pi.view(batch_rep, -1, pi.size(-1)).transpose(0, 1))
max_length = max(pi.size(-1) for pi in pis)
# (batch_size * batch_rep, iter_rep, max_length) => (batch_size, batch_rep * iter_rep, max_length)
pis = torch.cat(
[F.pad(pi, (0, max_length - pi.size(-1))) for pi in pis],
1
) # .view(embeddings.size(0), batch_rep * iter_rep, max_length)
costs = torch.cat(costs, 1)
# (batch_size)
mincosts, argmincosts = costs.min(-1)
# (batch_size, minlength)
minpis = pis[torch.arange(pis.size(0), out=argmincosts.new()), argmincosts]
return minpis, mincosts
def order_compare(order_rl, order_sorted):
L = len(order_rl)
cal_relationship = dict()
sorted_relationship = dict()
recall_num = 0
radius = []
for i in range(L):
for key in cal_relationship:
cal_relationship[key].append(order_rl[i])
cal_relationship[order_rl[i].item()] = [i]
for key in sorted_relationship:
sorted_relationship[key].append(order_sorted[i])
sorted_relationship[order_sorted[i].item()] = [i]
for key in sorted_relationship:
recall_num += len([element for element in sorted_relationship[key][1:] if element in cal_relationship[key][1:]])
radius.append(abs(sorted_relationship[key][0]-cal_relationship[key][0]))
#recall_accuracy = recall_num / ((L-1)*L/2.)
recall_accuracy = recall_num
radius_mean = sum(radius) / L
radius.sort()
return recall_accuracy, radius_mean, radius[-1]
def order_check(idx, idx_sorted):
batch_size = idx.shape[0]
accuracy_recall = [0. for _ in range(batch_size)]
radius_mean = [0. for _ in range(batch_size)]
radius_max = [0 for _ in range(batch_size)]
for order in range(batch_size):
order_training = idx[order]
order_sorted = idx_sorted[order]
recall_accuracy, radius_m, max_radius = order_compare(order_training, order_sorted)
accuracy_recall[order] = recall_accuracy
radius_mean[order] = radius_m
radius_max[order] = max_radius
return sum(accuracy_recall)/len(accuracy_recall), sum(radius_mean)/len(radius_mean), max(radius_max), max(accuracy_recall), min(accuracy_recall)
#return accuracy_recall
def orderCompare(order_rl, order_sorted):
L = len(order_rl)
cal_relationship = dict()
sorted_relationship = dict()
mis_match = 0
recall_num = 0
radius = []
for i in range(L):
if order_rl[i] != order_sorted[i]: mis_match += 1
for key in cal_relationship:
cal_relationship[key].append(order_rl[i])
cal_relationship[order_rl[i].item()] = [i]
for key in sorted_relationship:
sorted_relationship[key].append(order_sorted[i])
sorted_relationship[order_sorted[i].item()] = [i]
for key in sorted_relationship:
recall_num += len([element for element in sorted_relationship[key][1:] if element in cal_relationship[key][1:]])
radius.append(abs(sorted_relationship[key][0]-cal_relationship[key][0]))
recall_accuracy = recall_num / ((L-1)*L/2.)
radius_mean = sum(radius) / L
radius.sort()
return mis_match, recall_accuracy, radius_mean, radius[-1]
def orderCheck(idx, idx_sorted):
batch_size = idx.shape[0]
misMatch = [0 for _ in range(batch_size)]
accuracy_recall = [0. for _ in range(batch_size)]
radius_mean = [0. for _ in range(batch_size)]
radius_max = [0 for _ in range(batch_size)]
for order in range(batch_size):
order_training = idx[order]
order_sorted = idx_sorted[order]
mis_match, recall_accuracy, radius_m, max_radius = orderCompare(order_training, order_sorted)
misMatch[order] = mis_match
accuracy_recall[order] = recall_accuracy
radius_mean[order] = radius_m
radius_max[order] = max_radius
return misMatch, accuracy_recall, radius_mean, radius_max
def smart_sort(x, permutation, dim=3):
if dim == 2:
d1, d2 = x.size()
ret = x[
torch.arange(d1).unsqueeze(1).repeat(1, d2).flatten(),
permutation.flatten()
].view(d1, d2)
else:
d1, d2, d3 = x.size()
ret = x[
torch.arange(d1).unsqueeze(1).repeat(1, d2).flatten(),
permutation.flatten()
].view(d1, d2, d3)
return ret
def x_sorting(indice, data):
L = len(data)
if len(indice) != L: return torch.empty(0).cuda()
y_ = data[:,1].view(data.shape[0])
x_ = data[:,0].view(data.shape[0])
left, right = 0, 1
completed_indices = torch.empty(0).cuda()
while right <= L:
if right == L or y_[left] != y_[right]:
if right - left > 1:
_, x_indice = torch.sort(x_[left:right], dim=0)
if len(completed_indices) == 0:
completed_indices = smart_sort(indice[left:right].unsqueeze(0), x_indice.unsqueeze(0), 2).squeeze(0)
else:
completed_indices = torch.cat((completed_indices, smart_sort(indice[left:right].unsqueeze(0), x_indice.unsqueeze(0), 2).squeeze(0)), dim=0)
else:
if len(completed_indices) == 0:
completed_indices = indice[left:right]
else:
completed_indices = torch.cat((completed_indices, indice[left:right]), dim=0)
left = right
right += 1
return completed_indices
def deep_sort_x(indices, d):
d_y_sorting = smart_sort(d, indices)
batch_size = len(d)
for i in range(batch_size):
indices[i] = x_sorting(indices[i], d_y_sorting[i])
return indices
def level_mismatch_cost(y_s, y):
L = len(y_s)
if L != len(y):
print("Warning: lenth mismatch of compared y axis data")
return torch.empty(0).cuda(), torch.empty(0).cuda()
cos = torch.nn.CosineSimilarity(dim=0, eps=1e-6)
left, right = 0, 1
coordi_sorted = torch.empty(0).cuda()
coordi = torch.empty(0).cuda()
while right <= L:
if right == L or y_s[left] != y_s[right]:
if left == 0:
coordi_sorted = y_s[left:(left+1)]
coordi = y[left:right].mean().unsqueeze(0)
else:
coordi_sorted = torch.cat((coordi_sorted, y_s[left:left+1]), dim=0)
coordi = torch.cat((coordi, y[left:right].mean().unsqueeze(0)), dim=0)
#cost_mean += torch.sqrt(torch.sum(torch.square(torch.sub(y[left:right], y_s[left:right]))))
left = right
right += 1
return cos(coordi_sorted, coordi).unsqueeze(0).cuda()
def level_sorting(y_sorted, y_):
batch_size = len(y_)
cost = torch.empty(0).cuda()
for i in range(batch_size):
if i == 0:
cost = level_mismatch_cost(y_sorted[i], y_[i])
else:
cost = torch.cat((cost, level_mismatch_cost(y_sorted[i], y_[i])), dim=0)
return cost
def level_sorting_xy_pairs(indices, d, alpha=0.2, beta=0.8):
y_ = d[:,:,1].view(d.shape[0], d.shape[1])
x_ = d[:,:,0].view(d.shape[0], d.shape[1])
d_y_sorting = smart_sort(d, indices)
batch_size = len(d)
for i in range(batch_size):
indices[i] = x_sorting(indices[i], d_y_sorting[i])
d_xy_sorting = smart_sort(d, indices)
y_s = d_xy_sorting[:,:,1].view(d_xy_sorting.shape[0], d_xy_sorting.shape[1])
x_s = d_xy_sorting[:,:,0].view(d_xy_sorting.shape[0], d_xy_sorting.shape[1])
cos = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
cost = alpha * cos(x_s.cuda(), x_.cuda()).cuda() + beta * cos(y_s.cuda(), y_.cuda()).cuda()
return cost, indices
def topo_sorting(graph):
head = []
tail = []
nodes_of_multiPredecessors = []
visited = dict()
def combine(path1, path2):
if len(path1) <= 0 or len(path2) <= 0: return path1 + path2
result_path = []
pivot = -1
pivot_id1 = len(path1)
pivot_id2 = len(path2)
for _id1 in range(len(path1)):
if pivot_id1 < len(path1): break
if path1[_id1] in nodes_of_multiPredecessors:
for _id2 in range(len(path2)):
if path2[_id2] == path1[_id1]:
pivot = path1[_id1]
pivot_id1 = _id1
pivot_id2 = _id2
sub_combine = []
if pivot_id1 < len(path1) and pivot_id2 < len(path2):
sub_combine = combine(path1[pivot_id1+1:], path2[pivot_id2+1:])
path1 = path1[:pivot_id1]
path2 = path2[:pivot_id2]
point1, point2 = 0, 0
while point1 < len(path1) and point2 < len(path2):
if path1[point1] < path2[point2]:
result_path.append(path1[point1])
point1 += 1
else:
result_path.append(path2[point2])
point2 += 1
if point1 < len(path1): result_path += path1[point1:]
elif point2 < len(path2): result_path += path2[point2:]
if pivot >= 0: result_path.append(pivot)
return result_path + sub_combine
def path_finder(node):
#node_id = np.where(node==2)[0][0]
node_id = torch.nonzero(node==2).item()
if node_id in visited:
return visited[node_id]
if node_id in tail:
return []
sub_paths = []
#for child_id in np.where(node==1)[0]:
for child_id in torch.nonzero(node==1):
sub_paths.append(path_finder(graph[[(graph[i][child_id]==2).item() for i in range(graph.shape[0])].index(True)]))
while len(sub_paths) > 1:
combine_path = combine(sub_paths[0], sub_paths[1])
sub_paths = sub_paths[2:] + [combine_path]
visited[node_id] = sub_paths[0]
#if np.where(node==-1)[0].shape[0] > 1:
if torch.nonzero(node==-1).shape[0] > 1:
nodes_of_multiPredecessors.append(node_id)
return [node_id] + sub_paths[0]
for idx in range(graph.shape[0]):
embedding_node = graph[idx]
if sum([i>=0 for i in embedding_node]) == embedding_node.shape[0]:
head.append(embedding_node)
elif sum([i<=0 for i in embedding_node]) == embedding_node.shape[0] - 1:
tail.append(torch.nonzero(embedding_node==2).item())
path_collections = []
for head_node in head:
path_collections.append(path_finder(head_node)[1:])
while len(path_collections) > 1:
path_combination = combine(path_collections[0], path_collections[1])
path_collections = path_collections[2:] + [path_combination]
result = sorted([torch.nonzero(head[i]==2).item() for i in range(len(head))]) + path_collections[0] + sorted(tail)
print(result)
return result
def graph_sorting_DAG(dataset):
indices = torch.tensor([[0]*dataset.shape[1] for _ in range(dataset.shape[0])]).cuda()
batch_size = dataset.shape[0]
for i in range(batch_size):
print("treated graph: ")
print(dataset[i])
indices[i] = torch.tensor(topo_sorting(dataset[i])).cuda()
return indices
def tensor_to_string(data):
sequence = []
for d in data:
sequence.append(str(d.cpu().numpy())+"\n")
return sequence
"""
if __name__ == "__main__":
#data = []
#size = 10
#num_samples = 2
#random.seed(0)
order = [i for i in range(size)]
for _ in range(num_samples):
G = nx.gnp_random_graph(size, random.random())
D = None
while True:
if nx.is_connected(G):
D = nx.DiGraph([(u, v) for u, v in G.edges()])
if nx.is_directed_acyclic_graph(D):
break
G = nx.gnp_random_graph(size, random.random())
random.shuffle(order)
mapping = {idx:item for idx, item in enumerate(order)}
DAG = nx.relabel.relabel_nodes(D, mapping)
graph = np.diag([2]*size)
for u, v in DAG.edges():
graph[u][v] = 1
graph[v][u] = -1
data.append(torch.FloatTensor(sorted(graph, key=lambda x : x[0]**2- x[-1]**3)))
dataset = torch.stack(data).cuda()
print(dataset)
print(graph_sorting_DAG(dataset))
idx = torch.tensor([[2, 1, 5, 0, 3, 4], [4, 1, 3, 2, 0, 5]]).cuda()
#print(torch.max(idx) / 3)
#p = 0.
#t = torch.Tensor([i for i in range(int(list(idx.shape)[1]))]).cuda()
#print(t)
#idx_sorted = torch.tensor([i for i in range(idx.shape[1])]).cuda()
#diff = torch.abs(idx - idx_sorted)
#print(diff)
#p = torch.max(diff)
#print(p)
#print(p.item())
#diff = torch.mul(diff, torch.FloatTensor([1.]).cuda())
#print(torch.mean(diff, 1).mean())
#print(round(torch.mean(diff, 1).mean().item(), 2))
#print(idx[:, 2:]>torch.tensor([[5], [3]]).cuda())
#print(idx[:, 2:]>idx[:, 2].view(idx.shape[0], -1))
#k = torch.tensor([torch.sum(idx[:, (i+1):]<idx[:, i].view(idx.shape[0], -1)) for i in range(idx.shape[1]-1)])
print(p)
k = torch.cat([torch.sum(idx[:, (i+1):]>idx[:, i].view(idx.shape[0], -1), dim=1).view(idx.shape[0], -1) for i in range(idx.shape[1]-1)], dim=1)
kk = torch.mul(torch.sum(k, dim=1), torch.FloatTensor([1.]).cuda()).mean()/2.
m = torch.FloatTensor([0.]).cuda()
m = torch.max(m, p)
print("{:.2f}".format(m.item()))
#a = torch.FloatTensor([10]).cuda()
#b = torch.FloatTensor([12]).cuda()
#print(torch.max(a, b))
#print(k)
#L = idx.size()[1]
#idx_sorted = torch.tensor([[1, 3, 2, 0, 4], [4, 0, 3, 1, 2]]).cuda()
#idx_sorted = torch.tensor([0, 1, 2, 3, 4, 5]).cuda()
#difference = idx_sorted - idx
#difference = torch.add(torch.sum(torch.div((torch.negative(torch.abs(difference)) + difference), 2), dim=1), torch.tensor(L*(L-1)/2).cuda())
#print(difference)
idx_sorted = torch.tensor([[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]).cuda()
print(tensor_to_string(idx_sorted))
#print(order_check(idx, idx_sorted))
#misMatch, accuracy_recall, radius_mean, radius_max = orderCheck(idx, idx_sorted)
#accuracy_recall, radius_mean, radius_max, accuracy_recall_max, accuracy_recall_min = order_check(idx, idx_sorted)
#print("mis_match: {:.2f}".format(sum(misMatch)/len(misMatch)))
#print("recall_accuracy: ", accuracy_recall)
#print("maximum recall: ", accuracy_recall_max)
#print("minimum recall: ", accuracy_recall_min)
#print("radius_mean: ", radius_mean)
#radius_max.sort()
#print("radius_max: {:d}".format(radius_max))
#data = torch.tensor([[0.4, 0.3], [0.2, 0.3], [0.3, 0.3], [0.1, 0.5], [0.1, 0.8], [0.5, 0.8], [0.9, 0.9]]).cuda()
#indice = torch.tensor([0, 1, 2, 3, 4, 5, 6]).cuda()
#new_indice = x_sorting(indice, data)
#dataset = torch.tensor([[[0.4, 0.5], [0.3, 0.5], [0.2, 0.3], [0.1, 0.3], [0.9, 0.1]], [[0.1, 0.8], [0.2, 0.4], [0.3, 0.8], [0.5, 0.7], [0.6, 0.4]]]).cuda()
#idx = torch.tensor([[4, 2, 3, 1, 0], [4, 1, 3, 2, 0]]).cuda()
#cost, indices = level_sorting_xy_pairs(idx, dataset)
#print(cost)
#print(indices)
#print(deep_sort_x(idx, dataset))
#y_s = torch.FloatTensor([[0.3, 0.3, 0.4, 0.5, 0.5, 0.6], [0.1, 0.1, 0.1, 0.3, 0.3, 0.7]]).cuda()
#y = torch.FloatTensor([[0.4, 0.3, 0.3, 0.5, 0.6, 0.5], [0.3, 0.3, 0.1, 0.7, 0.1, 0.1]]).cuda()
#y_s = torch.FloatTensor([0.3, 0.3, 0.4, 0.5, 0.5, 0.6]).cuda()
#y = torch.FloatTensor([0.4, 0.3, 0.3, 0.5, 0.6, 0.5]).cuda()
#res = level_mismatch_cost(y_s, y)
#print(res)
#res = level_sorting(y_s, y)
#print(res)
#print(res2)
"""
| 21,760 | 33.486529 | 160 | py |
RESPECT | RESPECT-main/utils/boolmask.py | import torch
import torch.nn.functional as F
def _pad_mask(mask):
# By taking -size % 8, we get 0 if exactly divisible by 8
# and required padding otherwise (i.e. -1 % 8 = 7 pad)
pad = -mask.size(-1) % 8
if pad != 0:
mask = F.pad(mask, [0, pad])
return mask, mask.size(-1) // 8
def _mask_bool2byte(mask):
#assert mask.dtype == torch.uint8
assert mask.dtype == torch.bool
# assert (mask <= 1).all() # Precondition, disabled for efficiency
mask, d = _pad_mask(mask)
#return (mask.view(*mask.size()[:-1], d, 8) << torch.arange(8, out=mask.new())).sum(-1, dtype=torch.uint8)
return (mask.view(*mask.size()[:-1], d, 8) << torch.arange(8, out=mask.new())).sum(-1, dtype=torch.bool)
def _mask_byte2long(mask):
#assert mask.dtype == torch.uint8
assert mask.dtype == torch.bool
mask, d = _pad_mask(mask)
# Note this corresponds to a temporary factor 8
# memory overhead by converting to long before summing
# Alternatively, aggregate using for loop
return (mask.view(*mask.size()[:-1], d, 8).long() << (torch.arange(8, dtype=torch.int64, device=mask.device) * 8)).sum(-1)
def mask_bool2long(mask):
#assert mask.dtype == torch.uint8
assert mask.dtype == torch.bool
return _mask_byte2long(_mask_bool2byte(mask))
def _mask_long2byte(mask, n=None):
if n is None:
n = 8 * mask.size(-1)
return (mask[..., None] >> (torch.arange(8, out=mask.new()) * 8))[..., :n].to(torch.uint8).view(*mask.size()[:-1], -1)[..., :n]
def _mask_byte2bool(mask, n=None):
if n is None:
n = 8 * mask.size(-1)
return (mask[..., None] & (mask.new_ones(8) << torch.arange(8, out=mask.new()) * 1)).view(*mask.size()[:-1], -1)[..., :n] > 0
def mask_long2bool(mask, n=None):
assert mask.dtype == torch.int64
return _mask_byte2bool(_mask_long2byte(mask), n=n)
def mask_long_scatter(mask, values, check_unset=True):
"""
Sets values in mask in dimension -1 with arbitrary batch dimensions
If values contains -1, nothing is set
Note: does not work for setting multiple values at once (like normal scatter)
"""
assert mask.size()[:-1] == values.size()
rng = torch.arange(mask.size(-1), out=mask.new())
values_ = values[..., None] # Need to broadcast up do mask dim
# This indicates in which value of the mask a bit should be set
where = (values_ >= (rng * 64)) & (values_ < ((rng + 1) * 64))
# Optional: check that bit is not already set
assert not (check_unset and ((mask & (where.long() << (values_ % 64))) > 0).any())
# Set bit by shifting a 1 to the correct position
# (% not strictly necessary as bitshift is cyclic)
# since where is 0 if no value needs to be set, the bitshift has no effect
return mask | (where.long() << (values_ % 64))
| 2,809 | 37.493151 | 131 | py |
RESPECT | RESPECT-main/utils/lexsort.py | import torch
import numpy as np
def torch_lexsort(keys, dim=-1):
if keys[0].is_cuda:
return _torch_lexsort_cuda(keys, dim)
else:
# Use numpy lex sort
return torch.from_numpy(np.lexsort([k.numpy() for k in keys], axis=dim))
def _torch_lexsort_cuda(keys, dim=-1):
"""
Function calculates a lexicographical sort order on GPU, similar to np.lexsort
Relies heavily on undocumented behavior of torch.sort, namely that when sorting more than
2048 entries in the sorting dim, it performs a sort using Thrust and it uses a stable sort
https://github.com/pytorch/pytorch/blob/695fd981924bd805704ecb5ccd67de17c56d7308/aten/src/THC/generic/THCTensorSort.cu#L330
"""
MIN_NUMEL_STABLE_SORT = 2049 # Minimum number of elements for stable sort
# Swap axis such that sort dim is last and reshape all other dims to a single (batch) dimension
reordered_keys = tuple(key.transpose(dim, -1).contiguous() for key in keys)
flat_keys = tuple(key.view(-1) for key in keys)
d = keys[0].size(dim) # Sort dimension size
numel = flat_keys[0].numel()
batch_size = numel // d
batch_key = torch.arange(batch_size, dtype=torch.int64, device=keys[0].device)[:, None].repeat(1, d).view(-1)
flat_keys = flat_keys + (batch_key,)
# We rely on undocumented behavior that the sort is stable provided that
if numel < MIN_NUMEL_STABLE_SORT:
n_rep = (MIN_NUMEL_STABLE_SORT + numel - 1) // numel # Ceil
rep_key = torch.arange(n_rep, dtype=torch.int64, device=keys[0].device)[:, None].repeat(1, numel).view(-1)
flat_keys = tuple(k.repeat(n_rep) for k in flat_keys) + (rep_key,)
idx = None # Identity sorting initially
for k in flat_keys:
if idx is None:
_, idx = k.sort(-1)
else:
# Order data according to idx and then apply
# found ordering to current idx (so permutation of permutation)
# such that we can order the next key according to the current sorting order
_, idx_ = k[idx].sort(-1)
idx = idx[idx_]
# In the end gather only numel and strip of extra sort key
if numel < MIN_NUMEL_STABLE_SORT:
idx = idx[:numel]
# Get only numel (if we have replicated), swap axis back and shape results
return idx[:numel].view(*reordered_keys[0].size()).transpose(dim, -1) % d
| 2,382 | 41.553571 | 127 | py |
RESPECT | RESPECT-main/utils/beam_search.py | import time
import torch
from typing import NamedTuple
from utils.lexsort import torch_lexsort
def beam_search(*args, **kwargs):
beams, final_state = _beam_search(*args, **kwargs)
return get_beam_search_results(beams, final_state)
def get_beam_search_results(beams, final_state):
beam = beams[-1] # Final beam
if final_state is None:
return None, None, None, None, beam.batch_size
# First state has no actions/parents and should be omitted when backtracking
actions = [beam.action for beam in beams[1:]]
parents = [beam.parent for beam in beams[1:]]
solutions = final_state.construct_solutions(backtrack(parents, actions))
return beam.score, solutions, final_state.get_final_cost()[:, 0], final_state.ids.view(-1), beam.batch_size
def _beam_search(state, beam_size, propose_expansions=None,
keep_states=False):
beam = BatchBeam.initialize(state)
# Initial state
beams = [beam if keep_states else beam.clear_state()]
# Perform decoding steps
while not beam.all_finished():
# Use the model to propose and score expansions
parent, action, score = beam.propose_expansions() if propose_expansions is None else propose_expansions(beam)
if parent is None:
return beams, None
# Expand and update the state according to the selected actions
beam = beam.expand(parent, action, score=score)
# Get topk
beam = beam.topk(beam_size)
# Collect output of step
beams.append(beam if keep_states else beam.clear_state())
# Return the final state separately since beams may not keep state
return beams, beam.state
bypass = super
class BatchBeam(NamedTuple):
"""
Class that keeps track of a beam for beam search in batch mode.
Since the beam size of different entries in the batch may vary, the tensors are not (batch_size, beam_size, ...)
but rather (sum_i beam_size_i, ...), i.e. flattened. This makes some operations a bit cumbersome.
"""
score: torch.Tensor # Current heuristic score of each entry in beam (used to select most promising)
state: None # To track the state
parent: torch.Tensor
action: torch.Tensor
batch_size: int # Can be used for optimizations if batch_size = 1
device: None # Track on which device
# Indicates for each row to which batch it belongs (0, 0, 0, 1, 1, 2, ...), managed by state
@property
def ids(self):
return self.state.ids.view(-1) # Need to flat as state has steps dimension
def __getitem__(self, key):
if torch.is_tensor(key) or isinstance(key, slice): # If tensor, idx all tensors by this tensor:
return self._replace(
# ids=self.ids[key],
score=self.score[key] if self.score is not None else None,
state=self.state[key],
parent=self.parent[key] if self.parent is not None else None,
action=self.action[key] if self.action is not None else None
)
#return super(BatchBeam, self).__getitem__(key)
return bypass(BatchBeam, self).__getitem__(key)
# Do not use __len__ since this is used by namedtuple internally and should be number of fields
# def __len__(self):
# return len(self.ids)
@staticmethod
def initialize(state):
batch_size = len(state.ids)
device = state.ids.device
return BatchBeam(
score=torch.zeros(batch_size, dtype=torch.float, device=device),
state=state,
parent=None,
action=None,
batch_size=batch_size,
device=device
)
def propose_expansions(self):
mask = self.state.get_mask()
# Mask always contains a feasible action
expansions = torch.nonzero(mask[:, 0, :] == 0)
parent, action = torch.unbind(expansions, -1)
return parent, action, None
def expand(self, parent, action, score=None):
return self._replace(
score=score, # The score is cleared upon expanding as it is no longer valid, or it must be provided
state=self.state[parent].update(action), # Pass ids since we replicated state
parent=parent,
action=action
)
def topk(self, k):
idx_topk = segment_topk_idx(self.score, k, self.ids)
return self[idx_topk]
def all_finished(self):
return self.state.all_finished()
def cpu(self):
return self.to(torch.device('cpu'))
def to(self, device):
if device == self.device:
return self
return self._replace(
score=self.score.to(device) if self.score is not None else None,
state=self.state.to(device),
parent=self.parent.to(device) if self.parent is not None else None,
action=self.action.to(device) if self.action is not None else None
)
def clear_state(self):
return self._replace(state=None)
def size(self):
return self.state.ids.size(0)
def segment_topk_idx(x, k, ids):
"""
Finds the topk per segment of data x given segment ids (0, 0, 0, 1, 1, 2, ...).
Note that there may be fewer than k elements in a segment so the returned length index can vary.
x[result], ids[result] gives the sorted elements per segment as well as corresponding segment ids after sorting.
:param x:
:param k:
:param ids:
:return:
"""
assert x.dim() == 1
assert ids.dim() == 1
# Since we may have varying beam size per batch entry we cannot reshape to (batch_size, beam_size)
# And use default topk along dim -1, so we have to be creative
# Now we have to get the topk per segment which is really annoying :(
# we use lexsort on (ids, score), create array with offset per id
# offsets[ids] then gives offsets repeated and only keep for which arange(len) < offsets + k
splits_ = torch.nonzero(ids[1:] - ids[:-1])
if len(splits_) == 0: # Only one group
_, idx_topk = x.topk(min(k, x.size(0)))
return idx_topk
splits = torch.cat((ids.new_tensor([0]), splits_[:, 0] + 1))
# Make a new array in which we store for each id the offset (start) of the group
# This way ids does not need to be increasing or adjacent, as long as each group is a single range
group_offsets = splits.new_zeros((splits.max() + 1,))
group_offsets[ids[splits]] = splits
offsets = group_offsets[ids] # Look up offsets based on ids, effectively repeating for the repetitions per id
# We want topk so need to sort x descending so sort -x (be careful with unsigned data type!)
#idx_sorted = torch_lexsort((-(x if x.dtype != torch.uint8 else x.int()).detach(), ids))
idx_sorted = torch_lexsort((-(x if x.dtype != torch.bool else x.int()).detach(), ids))
# This will filter first k per group (example k = 2)
# ids = [0, 0, 0, 1, 1, 1, 1, 2]
# splits = [0, 3, 7]
# offsets = [0, 0, 0, 3, 3, 3, 3, 7]
# offs+2 = [2, 2, 2, 5, 5, 5, 5, 9]
# arange = [0, 1, 2, 3, 4, 5, 6, 7]
# filter = [1, 1, 0, 1, 1, 0, 0, 1]
# Use filter to get only topk of sorting idx
return idx_sorted[torch.arange(ids.size(0), out=ids.new()) < offsets + k]
def backtrack(parents, actions):
# Now backtrack to find aligned action sequences in reversed order
cur_parent = parents[-1]
reversed_aligned_sequences = [actions[-1]]
for parent, sequence in reversed(list(zip(parents[:-1], actions[:-1]))):
reversed_aligned_sequences.append(sequence.gather(-1, cur_parent))
cur_parent = parent.gather(-1, cur_parent)
return torch.stack(list(reversed(reversed_aligned_sequences)), -1)
class CachedLookup(object):
def __init__(self, data):
self.orig = data
self.key = None
self.current = None
def __getitem__(self, key):
assert not isinstance(key, slice), "CachedLookup does not support slicing, " \
"you can slice the result of an index operation instead"
if torch.is_tensor(key): # If tensor, idx all tensors by this tensor:
if self.key is None:
self.key = key
self.current = self.orig[key]
elif len(key) != len(self.key) or (key != self.key).any():
self.key = key
self.current = self.orig[key]
return self.current
return super(CachedLookup, self).__getitem__(key)
| 8,521 | 36.875556 | 117 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/moon_data_exp.py | """
Two moons experiment for visualization
"""
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from tqdm import tqdm
from ssl_lib.algs.builder import gen_ssl_alg
from ssl_lib.models.utils import ema_update
from ssl_lib.consistency.builder import gen_consistency
def gen_model():
return nn.Sequential(
nn.Linear(2, 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, 2)
)
def gen_ssl_moon_dataset(seed, num_samples, labeled_sample, noise_factor=0.1):
assert num_samples > labeled_sample
data, label = make_moons(num_samples, False, noise_factor, random_state=seed)
data = (data - data.mean(0, keepdims=True)) / data.std(0, keepdims=True)
l0_idx = (label == 0)
l1_idx = (label == 1)
l0_data = data[l0_idx]
l1_data = data[l1_idx]
np.random.seed(seed)
l0_data = np.random.permutation(l0_data)
l1_data = np.random.permutation(l1_data)
labeled_l0 = l0_data[:labeled_sample//2]
labeled_l1 = l1_data[:labeled_sample//2]
unlabeled = np.concatenate([
l0_data[labeled_sample//2:], l1_data[labeled_sample//2:]
])
l0_label = np.zeros(labeled_l0.shape[0])
l1_label = np.ones(labeled_l1.shape[0])
label = np.concatenate([l0_label, l1_label])
return labeled_l0, labeled_l1, unlabeled, label
def scatter_plot_with_confidence(l0_data, l1_data, all_data, model, device, out_dir=None, show=False):
xx, yy = np.meshgrid(
np.linspace(all_data[:,0].min()-0.1, all_data[:,0].max()+0.1, 1000),
np.linspace(all_data[:,1].min()-0.1, all_data[:,1].max()+0.1, 1000))
np_points = np.stack([xx.ravel(),yy.ravel()],1).reshape(-1, 2)
points = torch.from_numpy(np_points).to(device).float()
outputs = model(points).softmax(1)[:,1].detach().to("cpu").numpy().reshape(xx.shape)
plt.contourf(xx, yy, outputs, alpha=0.5, cmap=plt.cm.jet)
plt.scatter(all_data[:,0], all_data[:,1], c="gray")
plt.scatter(l0_data[:,0], l0_data[:,1], c="blue")
plt.scatter(l1_data[:,0], l1_data[:,1], c="red")
plt.xlim(-2, 2)
plt.ylim(-2, 2)
# plt.grid()
plt.tight_layout()
if out_dir is not None:
plt.savefig(os.path.join(out_dir, "confidence_with_labeled.png"))
if show:
plt.show()
plt.contourf(xx, yy, outputs, alpha=0.5, cmap=plt.cm.jet)
plt.scatter(l0_data[:,0], l0_data[:,1], c="blue")
plt.scatter(l1_data[:,0], l1_data[:,1], c="red")
plt.xlim(-2, 2)
plt.ylim(-2, 2)
# plt.grid()
plt.tight_layout()
if out_dir is not None:
plt.savefig(os.path.join(out_dir, "confidence.png"))
if show:
plt.show()
def scatter_plot(l0_data, l1_data, unlabeled_data, out_dir=None, show=False):
plt.scatter(unlabeled_data[:,0], unlabeled_data[:,1], c="gray")
plt.scatter(l0_data[:,0], l0_data[:,1], c="blue")
plt.scatter(l1_data[:,0], l1_data[:,1], c="red")
plt.xlim(-2, 2)
plt.ylim(-2, 2)
# plt.grid()
plt.tight_layout()
if out_dir is not None:
plt.savefig(os.path.join(out_dir, "labeled_raw_data.png"))
if show:
plt.show()
plt.scatter(l0_data[:,0], l0_data[:,1], c="blue")
plt.scatter(l1_data[:,0], l1_data[:,1], c="red")
plt.xlim(-2, 2)
plt.ylim(-2, 2)
# plt.grid()
plt.tight_layout()
if out_dir is not None:
plt.savefig(os.path.join(out_dir, "raw_data.png"))
if show:
plt.show()
def fit(cfg):
torch.manual_seed(cfg.seed)
if torch.cuda.is_available():
device = "cuda"
torch.backends.cudnn.benchmark = True
else:
device = "cpu"
model = gen_model().to(device)
model.train()
optimizer = optim.Adam(model.parameters(), cfg.lr)
weak_augmentation = lambda x: x + torch.randn_like(x) * cfg.gauss_std
# set consistency type
consistency = gen_consistency(cfg.consistency, cfg)
# set ssl algorithm
ssl_alg = gen_ssl_alg(
cfg.alg,
cfg
)
l0_data, l1_data, u_data, label = gen_ssl_moon_dataset(
cfg.seed, cfg.n_sample, cfg.n_labeled, cfg.noise_factor
)
labeled_data = np.concatenate([l0_data, l1_data])
scatter_plot(l0_data, l1_data, u_data, cfg.out_dir, cfg.vis_data)
tch_labeled_data = torch.from_numpy(labeled_data).float().to(device)
tch_unlabeled_data = torch.from_numpy(u_data).float().to(device)
label = torch.from_numpy(label).long().to(device)
for i in range(cfg.iterations):
unlabeled_weak1 = weak_augmentation(tch_unlabeled_data)
unlabeled_weak2 = weak_augmentation(tch_unlabeled_data)
all_data = torch.cat([
tch_labeled_data,
unlabeled_weak1,
unlabeled_weak2], 0)
outputs = model(all_data)
labeled_logits = outputs[:tch_labeled_data.shape[0]]
loss = F.cross_entropy(labeled_logits, label)
if cfg.coef > 0:
unlabeled_logits, unlabeled_logits_target = torch.chunk(outputs[tch_labeled_data.shape[0]:], 2, dim=2)
y, targets, mask = ssl_alg(
stu_preds = unlabeled_logits,
tea_logits = unlabeled_logits_target.detach(),
w_data = unlabeled_weak1,
s_data = unlabeled_weak2,
stu_forward = model,
tea_forward = model
)
L_consistency = consistency(y, targets, mask)
loss += cfg.coef * L_consistency
else:
L_consistency = torch.zeros_like(loss)
if cfg.entropy_minimize > 0:
loss -= cfg.entropy_minimize * (unlabeled_logits.softmax(1) * F.log_softmax(unlabeled_logits, 1)).sum(1).mean()
print("[{}/{}] loss {} | ssl loss {}".format(
i+1, cfg.iterations, loss.item(), L_consistency.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
scatter_plot_with_confidence(l0_data, l1_data, all_data, model, device, cfg.out_dir, cfg.vis_data)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
# dataset config
parser.add_argument("--n_sample", default=1000, type=int, help="number of samples")
parser.add_argument("--n_labeled", default=10, type=int, help="number of labeled samples")
parser.add_argument("--noise_factor", default=0.1, type=float, help="std of gaussian noise")
# optimization config
parser.add_argument("--iterations", default=1000, type=int, help="number of training iteration")
parser.add_argument("--lr", default=0.01, type=float, help="learning rate")
# SSL common config
parser.add_argument("--alg", default="cr", type=str, help="ssl algorithm, ['ict', 'cr', 'pl', 'vat']")
parser.add_argument("--coef", default=1, type=float, help="coefficient for consistency loss")
parser.add_argument("--ema_teacher", action="store_true", help="consistency with mean teacher")
parser.add_argument("--ema_factor", default=0.999, type=float, help="exponential mean avarage factor")
parser.add_argument("--entropy_minimize", "-em", default=0, type=float, help="coefficient of entropy minimization")
parser.add_argument("--threshold", default=None, type=float, help="pseudo label threshold")
parser.add_argument("--sharpen", default=None, type=float, help="tempereture parameter for sharpening")
parser.add_argument("--temp_softmax", default=None, type=float, help="tempereture for softmax")
parser.add_argument("--gauss_std", default=0.1, type=float, help="standard deviation for gaussian noise")
## SSL alg parameter
### ICT config
parser.add_argument("--alpha", default=0.1, type=float, help="parameter for beta distribution in ICT")
### VAT config
parser.add_argument("--eps", default=6, type=float, help="norm of virtual adversarial noise")
parser.add_argument("--xi", default=1e-6, type=float, help="perturbation for finite difference method")
parser.add_argument("--vat_iter", default=1, type=int, help="number of iteration for power iteration")
## consistency config
parser.add_argument("--consistency", "-consis", default="ce", type=str, help="consistency type, ['ce', 'ms']")
parser.add_argument("--sinkhorn_tau", default=10, type=float, help="tempereture parameter for sinkhorn distance")
parser.add_argument("--sinkhorn_iter", default=10, type=int, help="number of iterations for sinkhorn normalization")
# evaluation config
parser.add_argument("--weight_average", action="store_true", help="evaluation with weight-averaged model")
# misc
parser.add_argument("--out_dir", default="log", type=str, help="output directory")
parser.add_argument("--seed", default=96, type=int, help="random seed")
parser.add_argument("--vis_data", action="store_true", help="visualize input data")
args = parser.parse_args()
fit(args)
| 8,998 | 37.788793 | 123 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/train_val_test.py | import logging
import numpy, random, time
import torch
import torch.nn.functional as F
import torch.optim as optim
from ssl_lib.algs.builder import gen_ssl_alg
from ssl_lib.algs import utils as alg_utils
from ssl_lib.models import utils as model_utils
from ssl_lib.consistency.builder import gen_consistency
from ssl_lib.models.builder import gen_model
from ssl_lib.datasets.builder import gen_dataloader
from ssl_lib.param_scheduler import scheduler
from ssl_lib.misc.meter import Meter
def evaluation(raw_model, eval_model, loader, device):
raw_model.eval()
eval_model.eval()
sum_raw_acc = sum_acc = sum_loss = 0
with torch.no_grad():
for (data, labels) in loader:
data, labels = data.to(device), labels.to(device)
preds = eval_model(data)
raw_preds = raw_model(data)
loss = F.cross_entropy(preds, labels)
sum_loss += loss.item()
acc = (preds.max(1)[1] == labels).float().mean()
raw_acc = (raw_preds.max(1)[1] == labels).float().mean()
sum_acc += acc.item()
sum_raw_acc += raw_acc.item()
mean_raw_acc = sum_raw_acc / len(loader)
mean_acc = sum_acc / len(loader)
mean_loss = sum_loss / len(loader)
raw_model.train()
eval_model.train()
return mean_raw_acc, mean_acc, mean_loss
def param_update(
cfg,
cur_iteration,
model,
teacher_model,
optimizer,
ssl_alg,
consistency,
labeled_data,
ul_weak_data,
ul_strong_data,
labels,
average_model
):
start_time = time.time()
all_data = torch.cat([labeled_data, ul_weak_data, ul_strong_data], 0)
forward_func = model.forward
stu_logits = forward_func(all_data)
labeled_preds = stu_logits[:labeled_data.shape[0]]
stu_unlabeled_weak_logits, stu_unlabeled_strong_logits = torch.chunk(stu_logits[labels.shape[0]:], 2, dim=0)
if cfg.tsa:
none_reduced_loss = F.cross_entropy(labeled_preds, labels, reduction="none")
L_supervised = alg_utils.anneal_loss(
labeled_preds, labels, none_reduced_loss, cur_iteration+1,
cfg.iteration, labeled_preds.shape[1], cfg.tsa_schedule)
else:
L_supervised = F.cross_entropy(labeled_preds, labels)
if cfg.coef > 0:
# get target values
if teacher_model is not None: # get target values from teacher model
t_forward_func = teacher_model.forward
tea_logits = t_forward_func(all_data)
tea_unlabeled_weak_logits, _ = torch.chunk(tea_logits[labels.shape[0]:], 2, dim=0)
else:
t_forward_func = forward_func
tea_unlabeled_weak_logits = stu_unlabeled_weak_logits
# calc consistency loss
model.update_batch_stats(False)
y, targets, mask = ssl_alg(
stu_preds = stu_unlabeled_strong_logits,
tea_logits = tea_unlabeled_weak_logits.detach(),
w_data = ul_weak_data,
s_data = ul_strong_data,
stu_forward = forward_func,
tea_forward = t_forward_func
)
model.update_batch_stats(True)
L_consistency = consistency(y, targets, mask, weak_prediction=tea_unlabeled_weak_logits.softmax(1))
else:
L_consistency = torch.zeros_like(L_supervised)
mask = None
# calc total loss
coef = scheduler.exp_warmup(cfg.coef, cfg.warmup_iter, cur_iteration+1)
loss = L_supervised + coef * L_consistency
if cfg.entropy_minimization > 0:
loss -= cfg.entropy_minimization * \
(stu_unlabeled_weak_logits.softmax(1) * F.log_softmax(stu_unlabeled_weak_logits, 1)).sum(1).mean()
# update parameters
cur_lr = optimizer.param_groups[0]["lr"]
optimizer.zero_grad()
loss.backward()
if cfg.weight_decay > 0:
decay_coeff = cfg.weight_decay * cur_lr
model_utils.apply_weight_decay(model.modules(), decay_coeff)
optimizer.step()
# update teacher parameters by exponential moving average
if cfg.ema_teacher:
model_utils.ema_update(
teacher_model, model, cfg.ema_teacher_factor,
cfg.weight_decay * cur_lr if cfg.ema_apply_wd else None,
cur_iteration if cfg.ema_teacher_warmup else None)
# update evaluation model's parameters by exponential moving average
if cfg.weight_average:
model_utils.ema_update(
average_model, model, cfg.wa_ema_factor,
cfg.weight_decay * cur_lr if cfg.wa_apply_wd else None)
# calculate accuracy for labeled data
acc = (labeled_preds.max(1)[1] == labels).float().mean()
return {
"acc": acc,
"loss": loss.item(),
"sup loss": L_supervised.item(),
"ssl loss": L_consistency.item(),
"mask": mask.float().mean().item() if mask is not None else 1,
"coef": coef,
"sec/iter": (time.time() - start_time)
}
def main(cfg, logger):
# set seed
torch.manual_seed(cfg.seed)
numpy.random.seed(cfg.seed)
random.seed(cfg.seed)
# select device
if torch.cuda.is_available():
device = "cuda"
torch.backends.cudnn.benchmark = True
else:
logger.info("CUDA is NOT available")
device = "cpu"
# build data loader
logger.info("load dataset")
lt_loader, ult_loader, val_loader, test_loader, num_classes, img_size = gen_dataloader(cfg.root, cfg.dataset, True, cfg, logger)
# set consistency type
consistency = gen_consistency(cfg.consistency, cfg)
# set ssl algorithm
ssl_alg = gen_ssl_alg(cfg.alg, cfg)
# build student model
model = gen_model(cfg.model, num_classes, img_size).to(device)
# build teacher model
if cfg.ema_teacher:
teacher_model = gen_model(cfg.model, num_classes, img_size).to(device)
teacher_model.load_state_dict(model.state_dict())
else:
teacher_model = None
# for evaluation
if cfg.weight_average:
average_model = gen_model(cfg.model, num_classes, img_size).to(device)
average_model.load_state_dict(model.state_dict())
else:
average_model = None
model.train()
logger.info(model)
# build optimizer
if cfg.optimizer == "sgd":
optimizer = optim.SGD(
model.parameters(), cfg.lr, cfg.momentum, weight_decay=0, nesterov=True
)
elif cfg.optimizer == "adam":
optimizer = optim.AdamW(
model.parameters(), cfg.lr, (cfg.momentum, 0.999), weight_decay=0
)
else:
raise NotImplementedError
# set lr scheduler
if cfg.lr_decay == "cos":
lr_scheduler = scheduler.CosineAnnealingLR(optimizer, cfg.iteration)
elif cfg.lr_decay == "step":
lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [400000, ], cfg.lr_decay_rate)
else:
raise NotImplementedError
# init meter
metric_meter = Meter()
maximum_val_acc = 0
logger.info("training")
for i, (l_data, ul_data) in enumerate(zip(lt_loader, ult_loader)):
l_aug, labels = l_data
ul_w_aug, ul_s_aug, _ = ul_data
params = param_update(
cfg, i, model, teacher_model, optimizer, ssl_alg,
consistency, l_aug.to(device), ul_w_aug.to(device),
ul_s_aug.to(device), labels.to(device),
average_model
)
# moving average for reporting losses and accuracy
metric_meter.add(params, ignores=["coef"])
# display losses every cfg.disp iterations
if ((i+1) % cfg.disp) == 0:
state = metric_meter.state(
header = f'[{i+1}/{cfg.iteration}]',
footer = f'ssl coef {params["coef"]:.4g} | lr {optimizer.param_groups[0]["lr"]:.4g}'
)
logger.info(state)
lr_scheduler.step()
# validation
if ((i + 1) % cfg.checkpoint) == 0 or (i+1) == cfg.iteration:
with torch.no_grad():
if cfg.weight_average:
eval_model = average_model
else:
eval_model = model
logger.info("validation")
mean_raw_acc, mean_val_acc, mean_val_loss = evaluation(model, eval_model, val_loader, device)
logger.info("validation loss %f | validation acc. %f | raw acc. %f", mean_val_loss, mean_val_acc, mean_raw_acc)
# test
if not cfg.only_validation and mean_val_acc > maximum_val_acc:
torch.save(eval_model.state_dict(), os.path.join(cfg.out_dir, "best_model.pth"))
maximum_val_acc = mean_val_acc
logger.info("test")
mean_raw_acc, mean_test_acc, mean_test_loss = evaluation(model, eval_model, test_loader, device)
logger.info("test loss %f | test acc. %f | raw acc. %f", mean_test_loss, mean_test_acc, mean_raw_acc)
torch.save(model.state_dict(), os.path.join(cfg.out_dir, "model_checkpoint.pth"))
torch.save(optimizer.state_dict(), os.path.join(cfg.out_dir, "optimizer_checkpoint.pth"))
logger.info("test accuracy %f", mean_test_acc)
if __name__ == "__main__":
import os, sys
from parser import get_args
args = get_args()
os.makedirs(args.out_dir, exist_ok=True)
# setup logger
plain_formatter = logging.Formatter(
"[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S"
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
s_handler = logging.StreamHandler(stream=sys.stdout)
s_handler.setFormatter(plain_formatter)
s_handler.setLevel(logging.DEBUG)
logger.addHandler(s_handler)
f_handler = logging.FileHandler(os.path.join(args.out_dir, "console.log"))
f_handler.setFormatter(plain_formatter)
f_handler.setLevel(logging.DEBUG)
logger.addHandler(f_handler)
logger.propagate = False
logger.info(args)
main(args, logger)
| 9,950 | 35.054348 | 132 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/train_test.py | import logging
import numpy, random, time, json
import torch
import torch.nn.functional as F
import torch.optim as optim
from ssl_lib.algs.builder import gen_ssl_alg
from ssl_lib.algs import utils as alg_utils
from ssl_lib.models import utils as model_utils
from ssl_lib.consistency.builder import gen_consistency
from ssl_lib.models.builder import gen_model
from ssl_lib.datasets.builder import gen_dataloader
from ssl_lib.param_scheduler import scheduler
from ssl_lib.misc.meter import Meter
def evaluation(raw_model, eval_model, loader, device):
raw_model.eval()
eval_model.eval()
sum_raw_acc = sum_acc = sum_loss = 0
with torch.no_grad():
for (data, labels) in loader:
data, labels = data.to(device), labels.to(device)
preds = eval_model(data)
raw_preds = raw_model(data)
loss = F.cross_entropy(preds, labels)
sum_loss += loss.item()
acc = (preds.max(1)[1] == labels).float().mean()
raw_acc = (raw_preds.max(1)[1] == labels).float().mean()
sum_acc += acc.item()
sum_raw_acc += raw_acc.item()
mean_raw_acc = sum_raw_acc / len(loader)
mean_acc = sum_acc / len(loader)
mean_loss = sum_loss / len(loader)
raw_model.train()
eval_model.train()
return mean_raw_acc, mean_acc, mean_loss
def param_update(
cfg,
cur_iteration,
model,
teacher_model,
optimizer,
ssl_alg,
consistency,
labeled_data,
ul_weak_data,
ul_strong_data,
labels,
average_model
):
start_time = time.time()
all_data = torch.cat([labeled_data, ul_weak_data, ul_strong_data], 0)
forward_func = model.forward
stu_logits = forward_func(all_data)
labeled_preds = stu_logits[:labeled_data.shape[0]]
stu_unlabeled_weak_logits, stu_unlabeled_strong_logits = torch.chunk(stu_logits[labels.shape[0]:], 2, dim=0)
if cfg.tsa:
none_reduced_loss = F.cross_entropy(labeled_preds, labels, reduction="none")
L_supervised = alg_utils.anneal_loss(
labeled_preds, labels, none_reduced_loss, cur_iteration+1,
cfg.iteration, labeled_preds.shape[1], cfg.tsa_schedule)
else:
L_supervised = F.cross_entropy(labeled_preds, labels)
if cfg.coef > 0:
# get target values
if teacher_model is not None: # get target values from teacher model
t_forward_func = teacher_model.forward
tea_logits = t_forward_func(all_data)
tea_unlabeled_weak_logits, _ = torch.chunk(tea_logits[labels.shape[0]:], 2, dim=0)
else:
t_forward_func = forward_func
tea_unlabeled_weak_logits = stu_unlabeled_weak_logits
# calc consistency loss
model.update_batch_stats(False)
y, targets, mask = ssl_alg(
stu_preds = stu_unlabeled_strong_logits,
tea_logits = tea_unlabeled_weak_logits.detach(),
data = ul_strong_data,
stu_forward = forward_func,
tea_forward = t_forward_func
)
model.update_batch_stats(True)
L_consistency = consistency(y, targets, mask, weak_prediction=tea_unlabeled_weak_logits.softmax(1))
else:
L_consistency = torch.zeros_like(L_supervised)
mask = None
# calc total loss
coef = scheduler.exp_warmup(cfg.coef, cfg.warmup_iter, cur_iteration+1)
loss = L_supervised + coef * L_consistency
if cfg.entropy_minimization > 0:
loss -= cfg.entropy_minimization * \
(stu_unlabeled_weak_logits.softmax(1) * F.log_softmax(stu_unlabeled_weak_logits, 1)).sum(1).mean()
# update parameters
cur_lr = optimizer.param_groups[0]["lr"]
optimizer.zero_grad()
loss.backward()
if cfg.weight_decay > 0:
decay_coeff = cfg.weight_decay * cur_lr
model_utils.apply_weight_decay(model.modules(), decay_coeff)
optimizer.step()
# update teacher parameters by exponential moving average
if cfg.ema_teacher:
model_utils.ema_update(
teacher_model, model, cfg.ema_teacher_factor,
cfg.weight_decay * cur_lr if cfg.ema_apply_wd else None,
cur_iteration if cfg.ema_teacher_warmup else None)
# update evaluation model's parameters by exponential moving average
if cfg.weight_average:
model_utils.ema_update(
average_model, model, cfg.wa_ema_factor,
cfg.weight_decay * cur_lr if cfg.wa_apply_wd else None)
# calculate accuracy for labeled data
acc = (labeled_preds.max(1)[1] == labels).float().mean()
return {
"acc": acc,
"loss": loss.item(),
"sup loss": L_supervised.item(),
"ssl loss": L_consistency.item(),
"mask": mask.float().mean().item() if mask is not None else 1,
"coef": coef,
"sec/iter": (time.time() - start_time)
}
def main(cfg, logger):
# set seed
torch.manual_seed(cfg.seed)
numpy.random.seed(cfg.seed)
random.seed(cfg.seed)
# select device
if torch.cuda.is_available():
device = "cuda"
torch.backends.cudnn.benchmark = True
else:
logger.info("CUDA is NOT available")
device = "cpu"
# build data loader
logger.info("load dataset")
lt_loader, ult_loader, test_loader, num_classes, img_size = gen_dataloader(cfg.root, cfg.dataset, False, cfg, logger)
# set consistency type
consistency = gen_consistency(cfg.consistency, cfg)
# set ssl algorithm
ssl_alg = gen_ssl_alg(cfg.alg, cfg)
# build student model
model = gen_model(cfg.model, num_classes, img_size).to(device)
# build teacher model
if cfg.ema_teacher:
teacher_model = gen_model(cfg.model, num_classes, img_size).to(device)
teacher_model.load_state_dict(model.state_dict())
else:
teacher_model = None
# for evaluation
if cfg.weight_average:
average_model = gen_model(cfg.model, num_classes, img_size).to(device)
average_model.load_state_dict(model.state_dict())
else:
average_model = None
model.train()
logger.info(model)
# build optimizer
if cfg.optimizer == "sgd":
optimizer = optim.SGD(
model.parameters(), cfg.lr, cfg.momentum, weight_decay=0, nesterov=True
)
elif cfg.optimizer == "adam":
optimizer = optim.Adam(
model.parameters(), cfg.lr, (cfg.momentum, 0.999), weight_decay=0
)
else:
raise NotImplementedError
# set lr scheduler
if cfg.lr_decay == "cos":
lr_scheduler = scheduler.CosineAnnealingLR(optimizer, cfg.iteration)
elif cfg.lr_decay == "step":
# TODO: fixed milstones
lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [400000, ], cfg.lr_decay_rate)
else:
raise NotImplementedError
# init meter
metric_meter = Meter()
test_acc_list = []
raw_acc_list = []
logger.info("training")
for i, (l_data, ul_data) in enumerate(zip(lt_loader, ult_loader)):
l_aug, labels = l_data
ul_w_aug, ul_s_aug, _ = ul_data
params = param_update(
cfg, i, model, teacher_model, optimizer, ssl_alg,
consistency, l_aug.to(device), ul_w_aug.to(device),
ul_s_aug.to(device), labels.to(device),
average_model
)
# moving average for reporting losses and accuracy
metric_meter.add(params, ignores=["coef"])
# display losses every cfg.disp iterations
if ((i+1) % cfg.disp) == 0:
state = metric_meter.state(
header = f'[{i+1}/{cfg.iteration}]',
footer = f'ssl coef {params["coef"]:.4g} | lr {optimizer.param_groups[0]["lr"]:.4g}'
)
logger.info(state)
lr_scheduler.step()
if ((i + 1) % cfg.checkpoint) == 0 or (i+1) == cfg.iteration:
with torch.no_grad():
if cfg.weight_average:
eval_model = average_model
else:
eval_model = model
logger.info("test")
mean_raw_acc, mean_test_acc, mean_test_loss = evaluation(model, eval_model, test_loader, device)
logger.info("test loss %f | test acc. %f | raw acc. %f", mean_test_loss, mean_test_acc, mean_raw_acc)
test_acc_list.append(mean_test_acc)
raw_acc_list.append(mean_raw_acc)
torch.save(model.state_dict(), os.path.join(cfg.out_dir, "model_checkpoint.pth"))
torch.save(optimizer.state_dict(), os.path.join(cfg.out_dir, "optimizer_checkpoint.pth"))
numpy.save(os.path.join(cfg.out_dir, "results"), test_acc_list)
numpy.save(os.path.join(cfg.out_dir, "raw_results"), raw_acc_list)
accuracies = {}
for i in [1, 10, 20, 50]:
logger.info("mean test acc. over last %d checkpoints: %f", i, numpy.median(test_acc_list[-i:]))
logger.info("mean test acc. for raw model over last %d checkpoints: %f", i, numpy.median(raw_acc_list[-i:]))
accuracies[f"last{i}"] = numpy.median(test_acc_list[-i:])
with open(os.path.join(cfg.out_dir, "results.json"), "w") as f:
json.dump(accuracies, f, sort_keys=True)
if __name__ == "__main__":
import os, sys
from parser import get_args
args = get_args()
os.makedirs(args.out_dir, exist_ok=True)
# setup logger
plain_formatter = logging.Formatter(
"[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S"
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
s_handler = logging.StreamHandler(stream=sys.stdout)
s_handler.setFormatter(plain_formatter)
s_handler.setLevel(logging.DEBUG)
logger.addHandler(s_handler)
f_handler = logging.FileHandler(os.path.join(args.out_dir, "console.log"))
f_handler.setFormatter(plain_formatter)
f_handler.setLevel(logging.DEBUG)
logger.addHandler(f_handler)
logger.propagate = False
logger.info(args)
main(args, logger) | 10,043 | 35.129496 | 121 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/models/resnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import leaky_relu, conv3x3, BatchNorm2d, param_init, BaseModel
class _Residual(nn.Module):
def __init__(self, input_channels, output_channels, stride=1, activate_before_residual=False):
super().__init__()
layer = []
if activate_before_residual:
self.pre_act = nn.Sequential(
BatchNorm2d(input_channels),
leaky_relu()
)
else:
self.pre_act = nn.Identity()
layer.append(BatchNorm2d(input_channels))
layer.append(leaky_relu())
layer.append(conv3x3(input_channels, output_channels, stride))
layer.append(BatchNorm2d(output_channels))
layer.append(leaky_relu())
layer.append(conv3x3(output_channels, output_channels))
if stride >= 2 or input_channels != output_channels:
self.identity = nn.Conv2d(input_channels, output_channels, 1, stride, bias=False)
else:
self.identity = nn.Identity()
self.layer = nn.Sequential(*layer)
def forward(self, x):
x = self.pre_act(x)
return self.identity(x) + self.layer(x)
class ResNet(BaseModel):
"""
ResNet
Parameters
--------
num_classes: int
number of classes
filters: int
number of filters
scales: int
number of scales
repeat: int
number of residual blocks per scale
dropout: float
dropout ratio (None indicates dropout is unused)
"""
def __init__(self, num_classes, filters, scales, repeat, dropout=None, *args, **kwargs):
super().__init__()
feature_extractor = [conv3x3(3, 16)]
channels = 16
for scale in range(scales):
feature_extractor.append(
_Residual(channels, filters<<scale, 2 if scale else 1, activate_before_residual = (scale == 0))
)
channels = filters << scale
for _ in range(repeat - 1):
feature_extractor.append(
_Residual(channels, channels)
)
feature_extractor.append(BatchNorm2d(channels))
feature_extractor.append(leaky_relu())
self.feature_extractor = nn.Sequential(*feature_extractor)
classifier = []
if dropout is not None:
classifier.append(nn.Dropout(dropout))
classifier.append(nn.Linear(channels, num_classes))
self.classifier = nn.Sequential(*classifier)
param_init(self.modules())
| 2,568 | 31.1125 | 111 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/models/utils.py | import math
import torch.nn as nn
import torch.nn.functional as F
class BaseModel(nn.Module):
def forward(self, x):
f = self.feature_extractor(x)
f = f.mean((2, 3))
return self.classifier(f)
def logits_with_feature(self, x):
f = self.feature_extractor(x)
c = self.classifier(f.mean((2, 3)))
return c, f
def update_batch_stats(self, flag):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.update_batch_stats = flag
def conv3x3(i_c, o_c, stride=1, bias=False):
return nn.Conv2d(i_c, o_c, 3, stride, 1, bias=bias)
class BatchNorm2d(nn.BatchNorm2d):
def __init__(self, channels, momentum=1e-3, eps=1e-3):
super().__init__(channels)
self.update_batch_stats = True
def forward(self, x):
if self.update_batch_stats or not self.training:
return super().forward(x)
else:
return nn.functional.batch_norm(
x, None, None, self.weight, self.bias, True, self.momentum, self.eps
)
def leaky_relu():
return nn.LeakyReLU(0.1)
"""
For exponential moving average
"""
def apply_weight_decay(modules, decay_rate):
"""apply weight decay to weight parameters in nn.Conv2d and nn.Linear"""
for m in modules:
if isinstance(m, (nn.Conv2d, nn.Linear)):
m.weight.data -= decay_rate * m.weight.data
def param_init(modules):
for m in modules:
if isinstance(m, nn.Conv2d):
f, _, k, _ = m.weight.shape
nn.init.normal_(m.weight, 0, 1./math.sqrt(0.5 * k * k * f))
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
def __ema(p1, p2, factor):
return factor * p1 + (1 - factor) * p2
def __param_update(ema_model, raw_model, factor):
"""ema for trainable parameters"""
for ema_p, raw_p in zip(ema_model.parameters(), raw_model.parameters()):
ema_p.data = __ema(ema_p.data, raw_p.data, factor)
def __buffer_update(ema_model, raw_model, factor):
"""ema for buffer parameters (e.g., running_mean and running_var in nn.BatchNorm2d)"""
for ema_p, raw_p in zip(ema_model.buffers(), raw_model.buffers()):
ema_p.data = __ema(ema_p.data, raw_p.data, factor)
# """copy buffer parameters (e.g., running_mean and running_var in nn.BatchNorm2d)"""
# for ema_p, raw_p in zip(ema_model.buffers(), raw_model.buffers()):
# ema_p.copy_(raw_p)
def ema_update(ema_model, raw_model, ema_factor, weight_decay_factor=None, global_step=None):
if global_step is not None:
ema_factor = min(1 - 1 / (global_step+1), ema_factor)
__param_update(ema_model, raw_model, ema_factor)
__buffer_update(ema_model, raw_model, ema_factor)
if weight_decay_factor is not None:
apply_weight_decay(ema_model.modules(), weight_decay_factor)
| 2,915 | 30.354839 | 93 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/models/cnn13.py | import torch.nn as nn
from .utils import leaky_relu, conv3x3, BatchNorm2d, BaseModel
class CNN13(BaseModel):
"""
13-layer CNN
Parameters
--------
num_classes: int
number of classes
filters: int
number of filters
"""
def __init__(self, num_classes, filters, *args, **kwargs):
super().__init__()
self.feature_extractor = nn.Sequential(
conv3x3(3, filters, bias=True),
leaky_relu(),
BatchNorm2d(filters),
conv3x3(filters, filters, bias=True),
leaky_relu(),
BatchNorm2d(filters),
conv3x3(filters, filters, bias=True),
leaky_relu(),
BatchNorm2d(filters),
nn.MaxPool2d(2, 2),
conv3x3(filters, 2*filters, bias=True),
leaky_relu(),
BatchNorm2d(2*filters),
conv3x3(2*filters, 2*filters, bias=True),
leaky_relu(),
BatchNorm2d(2*filters),
conv3x3(2*filters, 2*filters, bias=True),
leaky_relu(),
BatchNorm2d(2*filters),
nn.MaxPool2d(2, 2),
nn.Conv2d(2*filters, 4*filters, 3),
leaky_relu(),
BatchNorm2d(4*filters),
nn.Conv2d(4*filters, 2*filters, 1, bias=False),
leaky_relu(),
BatchNorm2d(2*filters),
nn.Conv2d(2*filters, filters, 1, bias=False),
leaky_relu(),
BatchNorm2d(filters)
)
self.classifier = nn.Linear(filters, num_classes)
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
| 1,778 | 30.210526 | 62 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/models/shakenet.py | import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import conv3x3, BatchNorm2d, param_init, BaseModel
class _ShakeShake(nn.Module):
def __init__(self, branch1, branch2):
super().__init__()
self.branch1 = branch1
self.branch2 = branch2
def forward(self, x):
a = self.branch1(x)
b = self.branch2(x)
if not self.training:
return 0.5 * (a + b)
mu = a.new([a.shape[0]] + [1] * (len(a.shape) - 1)).uniform_()
mixf = a + mu * (b - a)
mixb = a + mu[::1] * (b - a)
return (mixf - mixb).detach() + mixb
class _SkipBranch(nn.Module):
def __init__(self, branch1, branch2, bn):
super().__init__()
self.branch1 = branch1
self.branch2 = branch2
self.bn = bn
def forward(self, x):
a = self.branch1(x[..., ::2, ::2])
b = self.branch2(x[..., 1::2, 1::2])
x = torch.cat([a, b], 1)
return self.bn(x)
def _branch(filters, channels, stride=1):
return nn.Sequential(
nn.ReLU(),
conv3x3(channels, filters, stride),
BatchNorm2d(filters),
nn.ReLU(),
conv3x3(filters, filters),
BatchNorm2d(filters)
)
class _Residual(nn.Module):
def __init__(self, channels, filters, stride=1):
super().__init__()
self.branch = _ShakeShake(
_branch(channels, filters, stride),
_branch(channels, filters, stride)
)
if stride == 2:
branch1 = nn.Sequential(nn.ReLU(), nn.Conv2d(channels//2, filters >> 1, 1, bias=False))
branch2 = nn.Sequential(nn.ReLU(), nn.Conv2d(channels//2, filters >> 1, 1, bias=False))
bn = BatchNorm2d(filters)
self.skip = _SkipBranch(branch1, branch2, bn)
elif channels != filters:
self.skip = nn.Sequential(
nn.Conv2d(channels, filters, 1, bias=False),
BatchNorm2d(filters)
)
def forward(self, x):
return self.branch(x) + self.skip(x)
class ShakeNet(BaseModel):
"""
Shake-Shake model
Parameters
--------
num_classes: int
number of classes
filters: int
number of filters
scales: int
number of scales
repeat: int
number of residual blocks per scale
dropout: float
dropout ratio (None indicates dropout is unused)
"""
def __init__(self, num_classes, filters, scales, repeat, dropout=None, *args, **kwargs):
super().__init__()
feature_extractor = [conv3x3(3, 16)]
channels = 16
for scale, i in itertools.product(range(scales), range(repeat)):
if i == 0:
feature_extractor.append(_Residual(channels, filters << scale, stride = 2 if scale else 1))
else:
feature_extractor.append(_Residual(channels, filters << scale))
channels = filters << scale
self.feature_extractor = nn.Sequential(*feature_extractor)
classifier = []
if dropout is not None:
classifier.append(nn.Dropout(dropout))
classifier.append(nn.Linear(channels, num_classes))
param_init(self.modules())
| 3,250 | 28.026786 | 107 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/param_scheduler/scheduler.py | import torch
import warnings
import math
import torch.optim as optim
def exp_warmup(base_value, max_warmup_iter, cur_step):
"""exponential warmup proposed in mean teacher
calcurate
base_value * exp(-5(1 - t)^2), t = cur_step / max_warmup_iter
Parameters
-----
base_value: float
maximum value
max_warmup_iter: int
maximum warmup iteration
cur_step: int
current iteration
"""
if max_warmup_iter <= cur_step:
return base_value
return base_value * math.exp(-5 * (1 - cur_step/max_warmup_iter)**2)
def linear_warmup(base_value, max_warmup_iter, cur_step):
"""linear warmup
calcurate
base_value * (cur_step / max_warmup_iter)
Parameters
-----
base_value: float
maximum value
max_warmup_iter: int
maximum warmup iteration
cur_step: int
current iteration
"""
if max_warmup_iter <= cur_step:
return base_value
return base_value * cur_step / max_warmup_iter
def cosine_decay(base_lr, max_iteration, cur_step):
"""cosine learning rate decay
cosine learning rate decay with parameters proposed FixMatch
base_lr * cos( (7\pi cur_step) / (16 max_warmup_iter) )
Parameters
-----
base_lr: float
maximum learning rate
max_warmup_iter: int
maximum warmup iteration
cur_step: int
current iteration
"""
return base_lr * (math.cos( (7*math.pi*cur_step) / (16*max_iteration) ))
def CosineAnnealingLR(optimizer, max_iteration):
"""
generate cosine annealing learning rate scheduler as LambdaLR
"""
return optim.lr_scheduler.LambdaLR(optimizer, lr_lambda = lambda cur_step : math.cos((7*math.pi*cur_step) / (16*max_iteration)))
| 1,758 | 24.128571 | 132 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/consistency/mean_squared.py | import torch.nn as nn
import torch.nn.functional as F
def mean_squared(y, target, mask=None):
y = y.softmax(1)
loss = F.mse_loss(y, target, reduction="none").mean(1)
if mask is not None:
loss = mask * loss
return loss.mean()
class MeanSquared(nn.Module):
def forward(self, y, target, mask=None, *args, **kwargs):
return mean_squared(y, target.detach(), mask) | 396 | 29.538462 | 61 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/consistency/cross_entropy.py | import torch.nn as nn
import torch.nn.functional as F
def cross_entropy(y, target, mask=None):
if target.ndim == 1: # for hard label
loss = F.cross_entropy(y, target, reduction="none")
else:
loss = -(target * F.log_softmax(y, 1)).sum(1)
if mask is not None:
loss = mask * loss
return loss.mean()
class CrossEntropy(nn.Module):
def forward(self, y, target, mask=None, *args, **kwargs):
return cross_entropy(y, target.detach(), mask)
| 486 | 29.4375 | 61 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/datasets/utils.py | import os
import numpy as np
import torch
from torch.utils.data import Sampler
from torchvision.datasets import SVHN, CIFAR10, CIFAR100, STL10
class InfiniteSampler(Sampler):
""" sampling without replacement """
def __init__(self, num_data, num_sample):
epochs = num_sample // num_data + 1
self.indices = torch.cat([torch.randperm(num_data) for _ in range(epochs)]).tolist()[:num_sample]
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices)
def get_svhn(root):
train_data = SVHN(root, "train", download=True)
test_data = SVHN(root, "test", download=True)
train_data = {"images": np.transpose(train_data.data.astype(np.float32), (0, 2, 3, 1)),
"labels": train_data.labels.astype(np.int32)}
test_data = {"images": np.transpose(test_data.data.astype(np.float32), (0, 2, 3, 1)),
"labels": test_data.labels.astype(np.int32)}
return train_data, test_data
def get_cifar10(root):
train_data = CIFAR10(root, download=True)
test_data = CIFAR10(root, False)
train_data = {"images": train_data.data.astype(np.float32),
"labels": np.asarray(train_data.targets).astype(np.int32)}
test_data = {"images": test_data.data.astype(np.float32),
"labels": np.asarray(test_data.targets).astype(np.int32)}
return train_data, test_data
def get_cifar100(root):
train_data = CIFAR100(root, download=True)
test_data = CIFAR100(root, False)
train_data = {"images": train_data.data.astype(np.float32),
"labels": np.asarray(train_data.targets).astype(np.int32)}
test_data = {"images": test_data.data.astype(np.float32),
"labels": np.asarray(test_data.targets).astype(np.int32)}
return train_data, test_data
def get_stl10(root):
train_data = STL10(root, split="train", download=True)
ul_train_data = STL10(root, split="unlabeled")
test_data = STL10(root, split="test")
train_data = {"images": np.transpose(train_data.data.astype(np.float32), (0, 2, 3, 1)),
"labels": train_data.labels}
ul_train_data = {"images": np.transpose(ul_train_data.data.astype(np.float32), (0, 2, 3, 1)),
"labels": ul_train_data.labels}
test_data = {"images": np.transpose(test_data.data.astype(np.float32), (0, 2, 3, 1)),
"labels": test_data.labels}
return train_data, ul_train_data, test_data
def dataset_split(data, num_data, num_classes, random=False):
"""split dataset into two datasets
Parameters
-----
data: dict with keys ["images", "labels"]
each value is numpy.array
num_data: int
number of dataset1
num_classes: int
number of classes
random: bool
if True, dataset1 is randomly sampled from data.
if False, dataset1 is uniformly sampled from data,
which means that the dataset1 contains the same number of samples per class.
Returns
-----
dataset1, dataset2: the same dict as data.
number of data in dataset1 is num_data.
number of data in dataset1 is len(data) - num_data.
"""
dataset1 = {"images": [], "labels": []}
dataset2 = {"images": [], "labels": []}
images = data["images"]
labels = data["labels"]
# random sampling
if random:
dataset1["images"] = images[:num_data]
dataset1["labels"] = labels[:num_data]
dataset2["images"] = images[num_data:]
dataset2["labels"] = labels[num_data:]
else:
data_per_class = num_data // num_classes
for c in range(num_classes):
c_idx = (labels == c)
c_imgs = images[c_idx]
c_lbls = labels[c_idx]
dataset1["images"].append(c_imgs[:data_per_class])
dataset1["labels"].append(c_lbls[:data_per_class])
dataset2["images"].append(c_imgs[data_per_class:])
dataset2["labels"].append(c_lbls[data_per_class:])
for k in ("images", "labels"):
dataset1[k] = np.concatenate(dataset1[k])
dataset2[k] = np.concatenate(dataset2[k])
return dataset1, dataset2
def get_zca_normalization_param(images, scale=0.1, eps=1e-10):
n_data, height, width, channels = images.shape
images = images.transpose(0, 3, 1, 2)
images = images.reshape(n_data, channels * height * width)
image_cov = np.cov(images, rowvar=False)
U, S, _ = np.linalg.svd(image_cov + scale * np.eye(image_cov.shape[0]))
zca_decomp = np.dot(U, np.dot(np.diag(1/np.sqrt(S + eps)), U.T))
mean = images.mean(axis=0)
return mean, zca_decomp
| 4,664 | 36.620968 | 105 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/datasets/builder.py | import os
import numpy as np
from torch.utils.data import DataLoader
from torchvision import transforms
from . import utils
from . import dataset_class
from ..augmentation.builder import gen_strong_augmentation, gen_weak_augmentation
from ..augmentation.augmentation_pool import numpy_batch_gcn, ZCA, GCN
def __val_labeled_unlabeled_split(cfg, train_data, test_data, num_classes, ul_data=None):
num_validation = int(np.round(len(train_data["images"]) * cfg.val_ratio))
np.random.seed(cfg.seed)
permutation = np.random.permutation(len(train_data["images"]))
train_data["images"] = train_data["images"][permutation]
train_data["labels"] = train_data["labels"][permutation]
val_data, train_data = utils.dataset_split(train_data, num_validation, num_classes, cfg.random_split)
l_train_data, ul_train_data = utils.dataset_split(train_data, cfg.num_labels, num_classes)
if ul_data is not None:
ul_train_data["images"] = np.concatenate([ul_train_data["images"], ul_data["images"]], 0)
ul_train_data["labels"] = np.concatenate([ul_train_data["labels"], ul_data["labels"]], 0)
return val_data, l_train_data, ul_train_data
def __labeled_unlabeled_split(cfg, train_data, test_data, num_classes, ul_data=None):
np.random.seed(cfg.seed)
permutation = np.random.permutation(len(train_data["images"]))
train_data["images"] = train_data["images"][permutation]
train_data["labels"] = train_data["labels"][permutation]
l_train_data, ul_train_data = utils.dataset_split(train_data, cfg.num_labels, num_classes)
if ul_data is not None:
ul_train_data["images"] = np.concatenate([ul_train_data["images"], ul_data["images"]], 0)
ul_train_data["labels"] = np.concatenate([ul_train_data["labels"], ul_data["labels"]], 0)
return l_train_data, ul_train_data
def gen_dataloader(root, dataset, validation_split, cfg, logger=None):
"""
generate train, val, and test dataloaders
Parameters
--------
root: str
root directory
dataset: str
dataset name, ['cifar10', 'cifar100', 'svhn', 'stl10']
validation_split: bool
if True, return validation loader.
validation data is made from training data
cfg: argparse.Namespace or something
logger: logging.Logger
"""
ul_train_data = None
if dataset == "svhn":
train_data, test_data = utils.get_svhn(root)
num_classes = 10
img_size = 32
elif dataset == "stl10":
train_data, ul_train_data, test_data = utils.get_stl10(root)
num_classes = 10
img_size = 96
elif dataset == "cifar10":
train_data, test_data = utils.get_cifar10(root)
num_classes = 10
img_size = 32
elif dataset == "cifar100":
train_data, test_data = utils.get_cifar100(root)
num_classes = 100
img_size = 32
else:
raise NotImplementedError
if validation_split:
val_data, l_train_data, ul_train_data = __val_labeled_unlabeled_split(
cfg, train_data, test_data, num_classes, ul_train_data)
else:
l_train_data, ul_train_data = __labeled_unlabeled_split(
cfg, train_data, test_data, num_classes, ul_train_data)
val_data = None
ul_train_data["images"] = np.concatenate([ul_train_data["images"], l_train_data["images"]], 0)
ul_train_data["labels"] = np.concatenate([ul_train_data["labels"], l_train_data["labels"]], 0)
if logger is not None:
logger.info("number of :\n \
training data: %d\n \
labeled data: %d\n \
unlabeled data: %d\n \
validation data: %d\n \
test data: %d",
len(train_data["images"]),
len(l_train_data["images"]),
len(ul_train_data["images"]),
0 if val_data is None else len(val_data["images"]),
len(test_data["images"]))
labeled_train_data = dataset_class.LabeledDataset(l_train_data)
unlabeled_train_data = dataset_class.UnlabeledDataset(ul_train_data)
train_data = np.concatenate([
labeled_train_data.dataset["images"],
unlabeled_train_data.dataset["images"]
], 0)
if cfg.whiten:
mean = train_data.mean((0, 1, 2)) / 255.
scale = train_data.std((0, 1, 2)) / 255.
elif cfg.zca:
mean, scale = utils.get_zca_normalization_param(numpy_batch_gcn(train_data))
else:
# from [0, 1] to [-1, 1]
mean = [0.5, 0.5, 0.5]
scale = [0.5, 0.5, 0.5]
# set augmentation
# RA: RandAugment, WA: Weak Augmentation
randauglist = "fixmatch" if cfg.alg == "pl" else "uda"
flags = [True if b == "t" else False for b in cfg.wa.split(".")]
if cfg.labeled_aug == "RA":
labeled_augmentation = gen_strong_augmentation(
img_size, mean, scale, flags[0], flags[1], randauglist, cfg.zca)
elif cfg.labeled_aug == "WA":
labeled_augmentation = gen_weak_augmentation(img_size, mean, scale, *flags, cfg.zca)
else:
raise NotImplementedError
labeled_train_data.transform = labeled_augmentation
if cfg.unlabeled_aug == "RA":
unlabeled_augmentation = gen_strong_augmentation(
img_size, mean, scale, flags[0], flags[1], randauglist, cfg.zca)
elif cfg.unlabeled_aug == "WA":
unlabeled_augmentation = gen_weak_augmentation(img_size, mean, scale, *flags, cfg.zca)
else:
raise NotImplementedError
if logger is not None:
logger.info("labeled augmentation")
logger.info(labeled_augmentation)
logger.info("unlabeled augmentation")
logger.info(unlabeled_augmentation)
unlabeled_train_data.weak_augmentation = unlabeled_augmentation
if cfg.strong_aug:
strong_augmentation = gen_strong_augmentation(
img_size, mean, scale, flags[0], flags[1], randauglist, cfg.zca)
unlabeled_train_data.strong_augmentation = strong_augmentation
if logger is not None:
logger.info(strong_augmentation)
if cfg.zca:
test_transform = transforms.Compose([GCN(), ZCA(mean, scale)])
else:
test_transform = transforms.Compose([transforms.Normalize(mean, scale, True)])
test_data = dataset_class.LabeledDataset(test_data, test_transform)
l_train_loader = DataLoader(
labeled_train_data,
cfg.l_batch_size,
sampler=utils.InfiniteSampler(len(labeled_train_data), cfg.iteration * cfg.l_batch_size),
num_workers=cfg.num_workers
)
ul_train_loader = DataLoader(
unlabeled_train_data,
cfg.ul_batch_size,
sampler=utils.InfiniteSampler(len(unlabeled_train_data), cfg.iteration * cfg.ul_batch_size),
num_workers=cfg.num_workers
)
test_loader = DataLoader(
test_data,
1,
shuffle=False,
drop_last=False,
num_workers=cfg.num_workers
)
if validation_split:
validation_data = dataset_class.LabeledDataset(val_data, test_transform)
val_loader = DataLoader(
validation_data,
1,
shuffle=False,
drop_last=False,
num_workers=cfg.num_workers
)
return (
l_train_loader,
ul_train_loader,
val_loader,
test_loader,
num_classes,
img_size
)
else:
return (
l_train_loader,
ul_train_loader,
test_loader,
num_classes,
img_size
)
| 7,546 | 33.619266 | 105 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/datasets/dataset_class.py | import torch
class LabeledDataset:
"""
For labeled dataset
"""
def __init__(self, dataset, transform=None):
self.dataset = dataset
self.transform = transform
def __getitem__(self, idx):
image = torch.from_numpy(self.dataset["images"][idx]).float()
image = image.permute(2, 0, 1).contiguous() / 255.
label = int(self.dataset["labels"][idx])
if self.transform is not None:
image = self.transform(image)
return image, label
def __len__(self):
return len(self.dataset["images"])
class UnlabeledDataset:
"""
For unlabeled dataset
"""
def __init__(self, dataset, weak_augmentation=None, strong_augmentation=None):
self.dataset = dataset
self.weak_augmentation = weak_augmentation
self.strong_augmentation = strong_augmentation
def __getitem__(self, idx):
image = torch.from_numpy(self.dataset["images"][idx]).float()
image = image.permute(2, 0, 1).contiguous() / 255.
label = int(self.dataset["labels"][idx])
w_aug_image = self.weak_augmentation(image)
if self.strong_augmentation is not None:
s_aug_image = self.strong_augmentation(image)
else:
s_aug_image = self.weak_augmentation(image)
return w_aug_image, s_aug_image, label
def __len__(self):
return len(self.dataset["images"])
| 1,422 | 29.276596 | 82 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/augmentation/augmentation_pool.py | import random
import torch
import torch.nn.functional as F
import numpy as np
from PIL import ImageOps, ImageEnhance, ImageFilter, Image
"""
For PIL.Image
"""
def autocontrast(x, *args, **kwargs):
return ImageOps.autocontrast(x.convert("RGB")).convert("RGBA")
def brightness(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Brightness(x).enhance(level)
def color(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Color(x).enhance(level)
def contrast(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Contrast(x).enhance(level)
def equalize(x, *args, **kwargs):
return ImageOps.equalize(x.convert("RGB")).convert("RGBA")
def identity(x, *args, **kwargs):
return x
def invert(x, *args, **kwargs):
return ImageOps.invert(x.convert("RGB")).convert("RGBA")
def posterize(x, level, magnitude=10, max_level=4, *args, **kwargs):
level = int((level / magnitude) * max_level)
return ImageOps.posterize(x.convert("RGB"), 4 - level).convert("RGBA")
def rotate(x, level, magnitude=10, max_level=30, *args, **kwargs):
degree = int((level / magnitude) * max_level)
if random.random() > 0.5:
degree = -degree
return x.rotate(degree)
def sharpness(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Sharpness(x).enhance(level)
def shear_x(x, level, magnitude=10, max_level=0.3, *args, **kwargs):
level = (level / magnitude) * max_level
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, level, 0, 0, 1, 0))
def shear_y(x, level, magnitude=10, max_level=0.3, *args, **kwargs):
level = (level / magnitude) * max_level
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, 0, 0, level, 1, 0))
def solarize(x, level, magnitude=10, max_level=256, *args, **kwargs):
level = int((level / magnitude) * max_level)
return ImageOps.solarize(x.convert("RGB"), 256 - level).convert("RGBA")
def translate_x(x, level, magnitude=10, max_level=10, *args, **kwargs):
level = int((level / magnitude) * max_level)
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, 0, level, 0, 1, 0))
def translate_y(x, level, magnitude=10, max_level=10, *args, **kwargs):
level = int((level / magnitude) * max_level)
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, 0, 0, 0, 1, level))
def cutout(x, level, magnitude=10, max_level=20, *args, **kwargs):
size = int((level / magnitude) * max_level)
if size <= 0:
return x
w, h = x.size
upper_coord, lower_coord = _gen_cutout_coord(h, w, size)
pixels = x.load()
for i in range(upper_coord[0], lower_coord[0]):
for j in range(upper_coord[1], lower_coord[1]):
pixels[i, j] = (127, 127, 127, 0)
return x
def _gen_cutout_coord(height, width, size):
height_loc = random.randint(0, height - 1)
width_loc = random.randint(0, width - 1)
upper_coord = (max(0, height_loc - size // 2),
max(0, width_loc - size // 2))
lower_coord = (min(height, height_loc + size // 2),
min(width, width_loc + size // 2))
return upper_coord, lower_coord
"""
For torch.Tensor
"""
class TorchCutout:
def __init__(self, size=16):
self.size = size
def __call__(self, img):
h, w = img.shape[-2:]
upper_coord, lower_coord = _gen_cutout_coord(h, w, self.size)
mask_height = lower_coord[0] - upper_coord[0]
mask_width = lower_coord[1] - upper_coord[1]
assert mask_height > 0
assert mask_width > 0
mask = torch.ones_like(img)
zeros = torch.zeros((img.shape[0], mask_height, mask_width))
mask[:, upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1]] = zeros
return img * mask
def __repr__(self):
return f"TorchCutout(size={self.size})"
class GaussianNoise:
def __init__(self, std=0.15):
self.std = std
def __call__(self, x):
with torch.no_grad():
return x + torch.randn_like(x) * self.std
def __repr__(self):
return f"GaussianNoise(std={self.std})"
class BatchRandomFlip:
def __init__(self, flip_prob=0.5):
self.p = flip_prob
def __call__(self, x):
with torch.no_grad():
return torch.stack([
torch.flip(img, (-1,))
if random.random() > self.p
else img
for img in x
], 0)
def __repr__(self):
return f"BatchRandomFlip(flip_prob={self.p})"
class RandomFlip:
def __init__(self, flip_prob=0.5):
self.p = flip_prob
def __call__(self, x):
if random.random() > self.p:
return torch.flip(x, (-1,))
return x
def __repr__(self):
return f"RandomFlip(flip_prob={self.p})"
class BatchRandomCrop:
def __init__(self, padding=4):
self.pad = padding
def __call__(self, x):
with torch.no_grad():
b, _, h, w = x.shape
x = F.pad(x, [self.pad for _ in range(4)], mode="reflect")
left, top = torch.randint(0, 1+self.pad*2, (b,)), torch.randint(0, 1+self.pad*2, (b,))
return torch.stack([
img[..., t:t+h, l:l+w]
for img, t, l in zip(x, left, top)
], 0)
def __repr__(self):
return f"BatchRandomCrop(padding={self.pad})"
class RandomCrop:
def __init__(self, padding=4):
self.pad = padding
def __call__(self, x):
with torch.no_grad():
_, h, w = x.shape
x = F.pad(x[None], [self.pad for _ in range(4)], mode="reflect")
left, top = random.randint(0, self.pad*2), random.randint(0, self.pad*2)
return x[0, :, top:top+h, left:left+w]
def __repr__(self):
return f"RandomCrop(padding={self.pad})"
class ZCA:
def __init__(self, mean, scale):
self.mean = torch.from_numpy(mean).float()
self.scale = torch.from_numpy(scale).float()
def __call__(self, x):
c, h, w = x.shape
x = x.reshape(-1)
x = (x - self.mean) @ self.scale
return x.reshape(c, h, w)
def __repr__(self):
return f"ZCA()"
class GCN:
"""global contrast normalization"""
def __init__(self, multiplier=55, eps=1e-10):
self.multiplier = multiplier
self.eps = eps
def __call__(self, x):
x -= x.mean()
norm = x.norm(2)
norm[norm < self.eps] = 1
return self.multiplier * x / norm
def __repr__(self):
return f"GCN(multiplier={self.multiplier}, eps={self.eps})"
"""
For numpy.array
"""
def numpy_batch_gcn(images, multiplier=55, eps=1e-10):
# global contrast normalization
images = images.astype(np.float)
images -= images.mean(axis=(1,2,3), keepdims=True)
per_image_norm = np.sqrt(np.square(images).sum((1,2,3), keepdims=True))
per_image_norm[per_image_norm < eps] = 1
return multiplier * images / per_image_norm
| 7,397 | 27.344828 | 98 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/augmentation/augmentation_class.py | import torch
import torchvision.transforms as tt
from . import augmentation_pool as aug_pool
from .rand_augment import RandAugment
class ReduceChannelwithNormalize:
""" Reduce alpha channel of RGBA """
def __init__(self, mean, scale, zca):
self.mean = mean
self.scale = scale
self.zca = zca
def __call__(self, tch_img):
rgb = tch_img[:3]
i1, i2 = torch.where(tch_img[3] == 0)
if self.zca:
rgb = aug_pool.GCN()(tch_img)
rgb = aug_pool.ZCA(self.mean, self.scale)(rgb)
else:
rgb = tt.functional.normalize(rgb, self.mean, self.scale, True)
rgb[:, i1, i2] = 0
return rgb
def __repr__(self):
return f"ReduceChannelwithNormalize(mean={self.mean}, scale={self.scale})"
class RGB2RGBA:
def __call__(self, x):
return x.convert("RGBA")
def __repr__(self):
return "RGB2RGBA()"
class StrongAugmentation:
"""
Strong augmentation class
including RandAugment and Cutout
"""
def __init__(
self,
img_size: int,
mean: list,
scale: list,
flip: bool,
crop: bool,
alg: str = "fixmatch",
zca: bool = False,
cutout: bool = True,
):
augmentations = [tt.ToPILImage()]
if flip:
augmentations += [tt.RandomHorizontalFlip(p=0.5)]
if crop:
augmentations += [tt.RandomCrop(img_size, int(img_size*0.125), padding_mode="reflect")]
augmentations += [
RGB2RGBA(),
RandAugment(alg=alg),
tt.ToTensor(),
ReduceChannelwithNormalize(mean, scale, zca)
]
if cutout:
augmentations += [aug_pool.TorchCutout(16)]
self.augmentations = tt.Compose(augmentations)
def __call__(self, img):
return self.augmentations(img)
def __repr__(self):
return repr(self.augmentations)
class WeakAugmentation:
"""
Weak augmentation class
including horizontal flip, random crop, and gaussian noise
"""
def __init__(
self,
img_size: int,
mean: list,
scale: list,
flip=True,
crop=True,
noise=True,
zca=False
):
augmentations = [tt.ToPILImage()]
if flip:
augmentations.append(tt.RandomHorizontalFlip())
if crop:
augmentations.append(tt.RandomCrop(img_size, int(img_size*0.125), padding_mode="reflect"))
augmentations += [tt.ToTensor()]
if zca:
augmentations += [aug_pool.GCN(), aug_pool.ZCA(mean, scale)]
else:
augmentations += [tt.Normalize(mean, scale, True)]
if noise:
augmentations.append(aug_pool.GaussianNoise())
self.augmentations = tt.Compose(augmentations)
def __call__(self, img):
return self.augmentations(img)
def __repr__(self):
return repr(self.augmentations)
| 2,986 | 25.433628 | 102 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/algs/consistency.py | import torch
from .utils import sharpening, tempereture_softmax
class ConsistencyRegularization:
"""
Basis Consistency Regularization
Parameters
--------
consistency: str
consistency objective name
threshold: float
threshold to make mask
sharpen: float
sharpening temperature for target value
temp_softmax: float
temperature for temperature softmax
"""
def __init__(
self,
consistency,
threshold: float = None,
sharpen: float = None,
temp_softmax: float = None
):
self.consistency = consistency
self.threshold = threshold
self.sharpen = sharpen
self.tau = temp_softmax
def __call__(
self,
stu_preds,
tea_logits,
*args,
**kwargs
):
mask = self.gen_mask(tea_logits)
targets = self.adjust_target(tea_logits)
return stu_preds, targets, mask
def adjust_target(self, targets):
if self.sharpen is not None:
targets = targets.softmax(1)
targets = sharpening(targets, self.sharpen)
elif self.tau is not None:
targets = tempereture_softmax(targets, self.tau)
else:
targets = targets.softmax(1)
return targets
def gen_mask(self, targets):
targets = targets.softmax(1)
if self.threshold is None or self.threshold == 0:
return torch.ones_like(targets.max(1)[0])
return (targets.max(1)[0] >= self.threshold).float()
def __repr__(self):
return f"Consistency(threshold={self.threshold}, sharpen={self.sharpen}, tau={self.tau})"
| 1,674 | 26.916667 | 97 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/algs/utils.py | import torch
import torch.nn as nn
def make_pseudo_label(logits, threshold):
max_value, hard_label = logits.softmax(1).max(1)
mask = (max_value >= threshold)
return hard_label, mask
def sharpening(soft_labels, temp):
soft_labels = soft_labels.pow(temp)
return soft_labels / soft_labels.abs().sum(1, keepdim=True)
def tempereture_softmax(logits, tau):
return (logits/tau).softmax(1)
def mixup(x, y, alpha):
device = x.device
b = x.shape[0]
permute = torch.randperm(b)
perm_x = x[permute]
perm_y = y[permute]
factor = torch.distributions.beta.Beta(alpha, alpha).sample((b,1)).to(device)
if x.ndim == 4:
x_factor = factor[...,None,None]
else:
x_factor = factor
mixed_x = x_factor * x + (1-x_factor) * perm_x
mixed_y = factor * y + (1-factor) * perm_y
return mixed_x, mixed_y
def anneal_loss(logits, labels, loss, global_step, max_iter, num_classes, schedule):
tsa_start = 1 / num_classes
threshold = get_tsa_threshold(
schedule, global_step, max_iter,
tsa_start, end=1
)
with torch.no_grad():
probs = logits.softmax(1)
correct_label_probs = probs.gather(1, labels[:,None]).squeeze()
mask = correct_label_probs < threshold
return (loss * mask).mean()
def get_tsa_threshold(schedule, global_step, max_iter, start, end):
step_ratio = global_step / max_iter
if schedule == "linear":
coef = step_ratio
elif schedule == "exp":
scale = 5
coef = ((step_ratio - 1) * scale).exp()
elif schedule == "log":
scale = 5
coef = 1 - (-step_ratio * scale).exp()
else:
raise NotImplementedError
return coef * (end - start) + start | 1,736 | 27.47541 | 84 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/algs/vat.py | import torch
from .consistency import ConsistencyRegularization
class VAT(ConsistencyRegularization):
"""
Virtual Adversarial Training https://arxiv.org/abs/1704.03976
Parameters
--------
consistency: str
consistency objective name
threshold: float
threshold to make mask
sharpen: float
sharpening temperature for target value
temp_softmax: float
temperature for temperature softmax
objective: function
objective function
eps: float
virtual adversarial noise norm
xi: float
perturbation for finite differential method
n_iter: int
number of iterations for power method
"""
def __init__(
self,
consistency,
threshold: float = 1.,
sharpen: float = None,
temp_softmax: float = None,
objective = None,
eps = 1.0,
xi = 1e-6,
n_iter = 1
):
super().__init__(
consistency,
threshold,
sharpen,
temp_softmax
)
self.eps = eps
self.xi = xi
self.n_iter = n_iter
self.obj_func = objective
def __call__(
self,
tea_logits,
w_data,
stu_forward,
*args,
**kwargs
):
mask = self.gen_mask(tea_logits)
targets = self.adjust_target(tea_logits)
d = torch.randn_like(w_data)
d = self.__normalize(d)
for _ in range(self.n_iter):
d.requires_grad = True
x_hat = w_data + self.xi * d
y = stu_forward(x_hat)
loss = self.obj_func(y, targets)
d = torch.autograd.grad(loss, d)[0]
d = self.__normalize(d).detach()
x_hat = w_data + self.eps * d
y = stu_forward(x_hat)
return y, targets, mask
def __normalize(self, v):
v = v / (1e-12 + self.__reduce_max(v.abs(), range(1, len(v.shape)))) # to avoid overflow by v.pow(2)
v = v / (1e-6 + v.pow(2).sum(list(range(1, len(v.shape))), keepdim=True)).sqrt()
return v
def __reduce_max(self, v, idx_list):
for i in idx_list:
v = v.max(i, keepdim=True)[0]
return v
def __repr__(self):
return f"VAT(threshold={self.threshold}, \
sharpen={self.sharpen}, \
tau={self.tau}, \
eps={self.eps}), \
xi={self.xi}"
| 2,419 | 26.5 | 108 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/algs/pseudo_label.py | import torch
import torch.nn.functional as F
from .consistency import ConsistencyRegularization
from ..consistency.cross_entropy import CrossEntropy
from .utils import make_pseudo_label, sharpening
class PseudoLabel(ConsistencyRegularization):
"""
PseudoLabel
Parameters
--------
consistency: str
consistency objective name
threshold: float
threshold to make mask
sharpen: float
sharpening temperature for target value
temp_softmax: float
temperature for temperature softmax
"""
def __init__(
self,
consistency,
threshold = 0.95,
sharpen: float = None,
temp_softmax: float = None
):
super().__init__(
consistency,
threshold,
sharpen,
temp_softmax
)
def __call__(self, stu_preds, tea_logits, *args, **kwargs):
hard_label, mask = make_pseudo_label(tea_logits, self.threshold)
return stu_preds, hard_label, mask
def __repr__(self):
return f"PseudoLabel(threshold={self.threshold}, sharpen={self.sharpen}, tau={self.tau})"
| 1,136 | 25.44186 | 97 | py |
pytorch-consistency-regularization | pytorch-consistency-regularization-master/ssl_lib/algs/ict.py | import torch
from .consistency import ConsistencyRegularization
from .utils import mixup
class ICT(ConsistencyRegularization):
"""
Interpolation Consistency Training https://arxiv.org/abs/1903.03825
Parameters
--------
consistency: str
consistency objective name
threshold: float
threshold to make mask
sharpen: float
sharpening temperature for target value
temp_softmax: float
temperature for temperature softmax
alpha: float
beta distribution parameter
"""
def __init__(
self,
consistency,
threshold: float = 1.,
sharpen: float = None,
temp_softmax: float = None,
alpha: float = 0.1
):
super().__init__(
consistency,
threshold,
sharpen,
temp_softmax
)
self.alpha = alpha
def __call__(
self,
tea_logits,
w_data,
stu_forward,
*args,
**kwargs
):
mask = self.gen_mask(tea_logits)
targets = self.adjust_target(tea_logits)
mixed_x, mixed_targets = mixup(w_data, targets, self.alpha)
y = stu_forward(mixed_x)
return y, mixed_targets, mask
def __repr__(self):
return f"ICT(threshold={self.threshold}, sharpen={self.sharpen}, tau={self.tau}, alpha={self.alpha})"
| 1,377 | 24.054545 | 109 | py |
rulstm | rulstm-master/FEATEXT/extract_example_obj.py | import torch
import numpy as np
from torch import nn
from pretrainedmodels import bninception
from torchvision import transforms
from glob import glob
from PIL import Image
import lmdb
from tqdm import tqdm
from os.path import basename
env = lmdb.open('features/obj', map_size=1099511627776)
video_name = 'P01_01_frame_{:010d}.jpg'
detections = np.load('data/sample_obj.npy', allow_pickle=True, encoding='bytes')
for i, dets in enumerate(tqdm(detections,'Extracting features')):
feat = np.zeros(352, dtype='float32')
for d in dets:
feat[int(d[0])]+=d[5]
key = video_name.format(i+1)
with env.begin(write=True) as txn:
txn.put(key.encode(),feat)
| 681 | 26.28 | 80 | py |
rulstm | rulstm-master/FEATEXT/extract_example_rgb.py | import torch
from torch import nn
from pretrainedmodels import bninception
from torchvision import transforms
from glob import glob
from PIL import Image
import lmdb
from tqdm import tqdm
from os.path import basename
from argparse import ArgumentParser
env = lmdb.open('features/rgb', map_size=1099511627776)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = bninception(pretrained=None)
state_dict = torch.load('models/TSN-rgb.pth.tar')['state_dict']
state_dict = {k.replace('module.base_model.','') : v for k,v in state_dict.items()}
model.load_state_dict(state_dict, strict=False)
model.last_linear = nn.Identity()
model.global_pool = nn.AdaptiveAvgPool2d(1)
model.to(device)
transform = transforms.Compose([
transforms.Resize([256, 454]),
transforms.ToTensor(),
transforms.Lambda(lambda x: x[[2,1,0],...]*255), #to BGR
transforms.Normalize(mean=[104, 117, 128],
std=[1, 1, 1]),
])
imgs = sorted(glob('data/sample_rgb/*.jpg'))
model.eval()
for im in tqdm(imgs,'Extracting features'):
key = basename(im)
img = Image.open(im)
data = transform(img).unsqueeze(0).to(device)
feat = model(data).squeeze().detach().cpu().numpy()
with env.begin(write=True) as txn:
txn.put(key.encode(),feat.tobytes())
| 1,291 | 26.489362 | 83 | py |
rulstm | rulstm-master/FEATEXT/extract_example_flow.py | import torch
from torch import nn
from pretrainedmodels import bninception
from torchvision import transforms
from glob import glob
from PIL import Image
import lmdb
from tqdm import tqdm
from os.path import basename
from argparse import ArgumentParser
env = lmdb.open('features/flow', map_size=1099511627776)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = bninception(pretrained=None)
model.conv1_7x7_s2 = nn.Conv2d(10, 64,kernel_size=(7,7), stride=(2,2), padding=(3,3))
state_dict = torch.load('models/TSN-flow.pth.tar')['state_dict']
state_dict = {k.replace('module.base_model.','') : v for k,v in state_dict.items()}
model.load_state_dict(state_dict, strict=False)
model.last_linear = nn.Identity()
model.global_pool = nn.AdaptiveAvgPool2d(1)
model.to(device)
transform = transforms.Compose([
transforms.Resize([256, 454]),
transforms.ToTensor(),
transforms.Lambda(lambda x: x*255),
transforms.Normalize(mean=[128],
std=[1]),
])
imgs = sorted(glob('data/sample_flow/*_u_*.jpg'))
flow_buffer = []
model.eval()
for im in tqdm(imgs,'Extracting features'):
key = basename(im).replace('flow_u_','frame_')
img_u = Image.open(im).convert('L')
img_v = Image.open(im.replace('_u_','_v_')).convert('L')
#repeat the first five frames
for _ in range(1 if len(flow_buffer)>0 else 5):
flow_buffer.append(transform(img_u))
flow_buffer.append(transform(img_v))
if len(flow_buffer)>10:
del flow_buffer[0]
del flow_buffer[0]
if len(flow_buffer)==10:
data = torch.cat(flow_buffer[-10:],0).unsqueeze(0).to(device)
feat = model(data).squeeze().detach().cpu().numpy()
with env.begin(write=True) as txn:
txn.put(key.encode(),feat.tobytes())
| 1,787 | 29.827586 | 85 | py |
rulstm | rulstm-master/RULSTM/main.py | """Main training/test program for RULSTM"""
from argparse import ArgumentParser
from dataset import SequenceDataset
from os.path import join
from models import RULSTM, RULSTMFusion
import torch
from torch.utils.data import DataLoader
from torch.nn import functional as F
from utils import topk_accuracy, ValueMeter, topk_accuracy_multiple_timesteps, get_marginal_indexes, marginalize, softmax, topk_recall_multiple_timesteps, tta, predictions_to_json, MeanTopKRecallMeter
from tqdm import tqdm
import numpy as np
import pandas as pd
import json
pd.options.display.float_format = '{:05.2f}'.format
parser = ArgumentParser(description="Training program for RULSTM")
parser.add_argument('mode', type=str, choices=['train', 'validate', 'test', 'test', 'validate_json'], default='train',
help="Whether to perform training, validation or test.\
If test is selected, --json_directory must be used to provide\
a directory in which to save the generated jsons.")
parser.add_argument('path_to_data', type=str,
help="Path to the data folder, \
containing all LMDB datasets")
parser.add_argument('path_to_models', type=str,
help="Path to the directory where to save all models")
parser.add_argument('--alpha', type=float, default=0.25,
help="Distance between time-steps in seconds")
parser.add_argument('--S_enc', type=int, default=6,
help="Number of encoding steps. \
If early recognition is performed, \
this value is discarded.")
parser.add_argument('--S_ant', type=int, default=8,
help="Number of anticipation steps. \
If early recognition is performed, \
this is the number of frames sampled for each action.")
parser.add_argument('--task', type=str, default='anticipation', choices=[
'anticipation', 'early_recognition'], help='Task to tackle: \
anticipation or early recognition')
parser.add_argument('--img_tmpl', type=str,
default='frame_{:010d}.jpg', help='Template to use to load the representation of a given frame')
parser.add_argument('--modality', type=str, default='rgb',
choices=['rgb', 'flow', 'obj', 'fusion'], help = "Modality. Rgb/flow/obj represent single branches, whereas fusion indicates the whole model with modality attention.")
parser.add_argument('--sequence_completion', action='store_true',
help='A flag to selec sequence completion pretraining rather than standard training.\
If not selected, a valid checkpoint for sequence completion pretraining\
should be available unless --ignore_checkpoints is specified')
parser.add_argument('--mt5r', action='store_true')
parser.add_argument('--num_class', type=int, default=2513,
help='Number of classes')
parser.add_argument('--hidden', type=int, default=1024,
help='Number of hidden units')
parser.add_argument('--feat_in', type=int, default=1024,
help='Input size. If fusion, it is discarded (see --feats_in)')
parser.add_argument('--feats_in', type=int, nargs='+', default=[1024, 1024, 352],
help='Input sizes when the fusion modality is selected.')
parser.add_argument('--dropout', type=float, default=0.8, help="Dropout rate")
parser.add_argument('--batch_size', type=int, default=128, help="Batch Size")
parser.add_argument('--num_workers', type=int, default=4,
help="Number of parallel thread to fetch the data")
parser.add_argument('--lr', type=float, default=0.01, help="Learning rate")
parser.add_argument('--momentum', type=float, default=0.9, help="Momentum")
parser.add_argument('--display_every', type=int, default=10,
help="Display every n iterations")
parser.add_argument('--epochs', type=int, default=100, help="Training epochs")
parser.add_argument('--visdom', action='store_true',
help="Whether to log using visdom")
parser.add_argument('--ignore_checkpoints', action='store_true',
help='If specified, avoid loading existing models (no pre-training)')
parser.add_argument('--resume', action='store_true',
help='Whether to resume suspended training')
parser.add_argument('--ek100', action='store_true',
help="Whether to use EPIC-KITCHENS-100")
parser.add_argument('--json_directory', type=str, default = None, help = 'Directory in which to save the generated jsons.')
args = parser.parse_args()
if args.mode == 'test' or args.mode=='validate_json':
assert args.json_directory is not None
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if args.task == 'anticipation':
exp_name = f"RULSTM-{args.task}_{args.alpha}_{args.S_enc}_{args.S_ant}_{args.modality}"
else:
exp_name = f"RULSTM-{args.task}_{args.alpha}_{args.S_ant}_{args.modality}"
if args.mt5r:
exp_name += '_mt5r'
if args.sequence_completion:
exp_name += '_sequence_completion'
if args.visdom:
# if visdom is required
# load visdom loggers from torchent
from torchnet.logger import VisdomPlotLogger, VisdomSaver
# define loss and accuracy logger
visdom_loss_logger = VisdomPlotLogger('line', env=exp_name, opts={
'title': 'Loss', 'legend': ['training', 'validation']})
visdom_accuracy_logger = VisdomPlotLogger('line', env=exp_name, opts={
'title': 'Top5 Acc@1s', 'legend': ['training', 'validation']})
# define a visdom saver to save the plots
visdom_saver = VisdomSaver(envs=[exp_name])
def get_loader(mode, override_modality = None):
if override_modality:
path_to_lmdb = join(args.path_to_data, override_modality)
else:
path_to_lmdb = join(args.path_to_data, args.modality) if args.modality != 'fusion' else [join(args.path_to_data, m) for m in ['rgb', 'flow', 'obj']]
kargs = {
'path_to_lmdb': path_to_lmdb,
'path_to_csv': join(args.path_to_data, f"{mode}.csv"),
'time_step': args.alpha,
'img_tmpl': args.img_tmpl,
'action_samples': args.S_ant if args.task == 'early_recognition' else None,
'past_features': args.task == 'anticipation',
'sequence_length': args.S_enc + args.S_ant,
'label_type': ['verb', 'noun', 'action'] if args.mode != 'train' else 'action',
'challenge': 'test' in mode
}
_set = SequenceDataset(**kargs)
return DataLoader(_set, batch_size=args.batch_size, num_workers=args.num_workers,
pin_memory=True, shuffle=mode == 'training')
def get_model():
if args.modality != 'fusion': # single branch
model = RULSTM(args.num_class, args.feat_in, args.hidden,
args.dropout, sequence_completion=args.sequence_completion)
# load checkpoint only if not in sequence completion mode
# and inf the flag --ignore_checkpoints has not been specified
if args.mode == 'train' and not args.ignore_checkpoints and not args.sequence_completion:
checkpoint = torch.load(join(
args.path_to_models, exp_name + '_sequence_completion_best.pth.tar'))['state_dict']
model.load_state_dict(checkpoint)
else:
rgb_model = RULSTM(args.num_class, args.feats_in[0], args.hidden, args.dropout, return_context = args.task=='anticipation')
flow_model = RULSTM(args.num_class, args.feats_in[1], args.hidden, args.dropout, return_context = args.task=='anticipation')
obj_model = RULSTM(args.num_class, args.feats_in[2], args.hidden, args.dropout, return_context = args.task=='anticipation')
if args.task=='early_recognition' or (args.mode == 'train' and not args.ignore_checkpoints):
checkpoint_rgb = torch.load(join(args.path_to_models,\
exp_name.replace('fusion','rgb') +'_best.pth.tar'))['state_dict']
checkpoint_flow = torch.load(join(args.path_to_models,\
exp_name.replace('fusion','flow') +'_best.pth.tar'))['state_dict']
checkpoint_obj = torch.load(join(args.path_to_models,\
exp_name.replace('fusion','obj') +'_best.pth.tar'))['state_dict']
rgb_model.load_state_dict(checkpoint_rgb)
flow_model.load_state_dict(checkpoint_flow)
obj_model.load_state_dict(checkpoint_obj)
if args.task == 'early_recognition':
return [rgb_model, flow_model, obj_model]
model = RULSTMFusion([rgb_model, flow_model, obj_model], args.hidden, args.dropout)
return model
def load_checkpoint(model, best=False):
if best:
chk = torch.load(join(args.path_to_models, exp_name + '_best.pth.tar'))
else:
chk = torch.load(join(args.path_to_models, exp_name + '.pth.tar'))
epoch = chk['epoch']
best_perf = chk['best_perf']
perf = chk['perf']
model.load_state_dict(chk['state_dict'])
return epoch, perf, best_perf
def save_model(model, epoch, perf, best_perf, is_best=False):
torch.save({'state_dict': model.state_dict(), 'epoch': epoch,
'perf': perf, 'best_perf': best_perf}, join(args.path_to_models, exp_name + '.pth.tar'))
if is_best:
torch.save({'state_dict': model.state_dict(), 'epoch': epoch, 'perf': perf, 'best_perf': best_perf}, join(
args.path_to_models, exp_name + '_best.pth.tar'))
if args.visdom:
# save visdom logs for persitency
visdom_saver.save()
def log(mode, epoch, loss_meter, accuracy_meter, best_perf=None, green=False):
if green:
print('\033[92m', end="")
print(
f"[{mode}] Epoch: {epoch:0.2f}. "
f"Loss: {loss_meter.value():.2f}. "
f"Accuracy: {accuracy_meter.value():.2f}% ", end="")
if best_perf:
print(f"[best: {best_perf:0.2f}]%", end="")
print('\033[0m')
if args.visdom:
visdom_loss_logger.log(epoch, loss_meter.value(), name=mode)
visdom_accuracy_logger.log(epoch, accuracy_meter.value(), name=mode)
def get_scores_early_recognition_fusion(models, loaders):
verb_scores = 0
noun_scores = 0
action_scores = 0
for model, loader in zip(models, loaders):
outs = get_scores(model, loader)
verb_scores += outs[0]
noun_scores += outs[1]
action_scores += outs[2]
verb_scores /= len(models)
noun_scores /= len(models)
action_scores /= len(models)
return [verb_scores, noun_scores, action_scores] + list(outs[3:])
def get_scores(model, loader, challenge=False, include_discarded = False):
model.eval()
predictions = []
labels = []
ids = []
with torch.set_grad_enabled(False):
for batch in tqdm(loader, 'Evaluating...', len(loader)):
x = batch['past_features' if args.task ==
'anticipation' else 'action_features']
if type(x) == list:
x = [xx.to(device) for xx in x]
else:
x = x.to(device)
y = batch['label'].numpy()
ids.append(batch['id'])
preds = model(x).cpu().numpy()[:, -args.S_ant:, :]
predictions.append(preds)
labels.append(y)
action_scores = np.concatenate(predictions)
labels = np.concatenate(labels)
ids = np.concatenate(ids)
actions = pd.read_csv(
join(args.path_to_data, 'actions.csv'), index_col='id')
vi = get_marginal_indexes(actions, 'verb')
ni = get_marginal_indexes(actions, 'noun')
action_probs = softmax(action_scores.reshape(-1, action_scores.shape[-1]))
verb_scores = marginalize(action_probs, vi).reshape(
action_scores.shape[0], action_scores.shape[1], -1)
noun_scores = marginalize(action_probs, ni).reshape(
action_scores.shape[0], action_scores.shape[1], -1)
if include_discarded:
dlab = np.array(loader.dataset.discarded_labels)
dislab = np.array(loader.dataset.discarded_ids)
ids = np.concatenate([ids, dislab])
num_disc = len(dlab)
labels = np.concatenate([labels, dlab])
verb_scores = np.concatenate((verb_scores, np.zeros((num_disc, *verb_scores.shape[1:]))))
noun_scores = np.concatenate((noun_scores, np.zeros((num_disc, *noun_scores.shape[1:]))))
action_scores = np.concatenate((action_scores, np.zeros((num_disc, *action_scores.shape[1:]))))
if labels.max()>0 and not challenge:
return verb_scores, noun_scores, action_scores, labels[:, 0], labels[:, 1], labels[:, 2], ids
else:
return verb_scores, noun_scores, action_scores, ids
def trainval(model, loaders, optimizer, epochs, start_epoch, start_best_perf):
"""Training/Validation code"""
best_perf = start_best_perf # to keep track of the best performing epoch
for epoch in range(start_epoch, epochs):
# define training and validation meters
loss_meter = {'training': ValueMeter(), 'validation': ValueMeter()}
if args.mt5r:
accuracy_meter = {'training': MeanTopKRecallMeter(args.num_class), 'validation': MeanTopKRecallMeter(args.num_class)}
else:
accuracy_meter = {'training': ValueMeter(), 'validation': ValueMeter()}
for mode in ['training', 'validation']:
# enable gradients only if training
with torch.set_grad_enabled(mode == 'training'):
if mode == 'training':
model.train()
else:
model.eval()
for i, batch in enumerate(loaders[mode]):
x = batch['past_features' if args.task ==
'anticipation' else 'action_features']
if type(x) == list:
x = [xx.to(device) for xx in x]
else:
x = x.to(device)
y = batch['label'].to(device)
bs = y.shape[0] # batch size
preds = model(x)
# take only last S_ant predictions
preds = preds[:, -args.S_ant:, :].contiguous()
# linearize predictions
linear_preds = preds.view(-1, preds.shape[-1])
# replicate the labels across timesteps and linearize
linear_labels = y.view(-1, 1).expand(-1,
preds.shape[1]).contiguous().view(-1)
loss = F.cross_entropy(linear_preds, linear_labels)
# get the predictions for anticipation time = 1s (index -4) (anticipation)
# or for the last time-step (100%) (early recognition)
# top5 accuracy at 1s
idx = -4 if args.task == 'anticipation' else -1
# use top-5 for anticipation and top-1 for early recognition
k = 5 if args.task == 'anticipation' else 1
acc = topk_accuracy(
preds[:, idx, :].detach().cpu().numpy(), y.detach().cpu().numpy(), (k,))[0]*100
# store the values in the meters to keep incremental averages
loss_meter[mode].add(loss.item(), bs)
if args.mt5r:
accuracy_meter[mode].add(preds[:, idx, :].detach().cpu().numpy(),
y.detach().cpu().numpy())
else:
accuracy_meter[mode].add(acc, bs)
# if in training mode
if mode == 'training':
optimizer.zero_grad()
loss.backward()
optimizer.step()
# compute decimal epoch for logging
e = epoch + i/len(loaders[mode])
# log training during loop
# avoid logging the very first batch. It can be biased.
if mode == 'training' and i != 0 and i % args.display_every == 0:
log(mode, e, loss_meter[mode], accuracy_meter[mode])
# log at the end of each epoch
log(mode, epoch+1, loss_meter[mode], accuracy_meter[mode],
max(accuracy_meter[mode].value(), best_perf) if mode == 'validation'
else None, green=True)
if best_perf < accuracy_meter['validation'].value():
best_perf = accuracy_meter['validation'].value()
is_best = True
else:
is_best = False
# save checkpoint at the end of each train/val epoch
save_model(model, epoch+1, accuracy_meter['validation'].value(), best_perf,
is_best=is_best)
def get_validation_ids():
unseen_participants_ids = pd.read_csv(join(args.path_to_data, 'validation_unseen_participants_ids.csv'), names=['id'], squeeze=True)
tail_verbs_ids = pd.read_csv(join(args.path_to_data, 'validation_tail_verbs_ids.csv'), names=['id'], squeeze=True)
tail_nouns_ids = pd.read_csv(join(args.path_to_data, 'validation_tail_nouns_ids.csv'), names=['id'], squeeze=True)
tail_actions_ids = pd.read_csv(join(args.path_to_data, 'validation_tail_actions_ids.csv'), names=['id'], squeeze=True)
return unseen_participants_ids, tail_verbs_ids, tail_nouns_ids, tail_actions_ids
def get_many_shot():
"""Get many shot verbs, nouns and actions for class-aware metrics (Mean Top-5 Recall)"""
# read the list of many shot verbs
many_shot_verbs = pd.read_csv(
join(args.path_to_data, 'EPIC_many_shot_verbs.csv'))['verb_class'].values
# read the list of many shot nouns
many_shot_nouns = pd.read_csv(
join(args.path_to_data, 'EPIC_many_shot_nouns.csv'))['noun_class'].values
# read the list of actions
actions = pd.read_csv(join(args.path_to_data, 'actions.csv'))
# map actions to (verb, noun) pairs
a_to_vn = {a[1]['id']: tuple(a[1][['verb', 'noun']].values)
for a in actions.iterrows()}
# create the list of many shot actions
# an action is "many shot" if at least one
# between the related verb and noun are many shot
many_shot_actions = []
for a, (v, n) in a_to_vn.items():
if v in many_shot_verbs or n in many_shot_nouns:
many_shot_actions.append(a)
return many_shot_verbs, many_shot_nouns, many_shot_actions
def main():
model = get_model()
if type(model) == list:
model = [m.to(device) for m in model]
else:
model.to(device)
if args.mode == 'train':
loaders = {m: get_loader(m) for m in ['training', 'validation']}
if args.resume:
start_epoch, _, start_best_perf = load_checkpoint(model)
else:
start_epoch = 0
start_best_perf = 0
optimizer = torch.optim.SGD(
model.parameters(), lr=args.lr, momentum=args.momentum)
trainval(model, loaders, optimizer, args.epochs,
start_epoch, start_best_perf)
elif args.mode == 'validate':
if args.task == 'early_recognition' and args.modality == 'fusion':
loaders = [get_loader('validation', 'rgb'), get_loader('validation', 'flow'), get_loader('validation', 'obj')]
verb_scores, noun_scores, action_scores, verb_labels, noun_labels, action_labels = get_scores_early_recognition_fusion(model, loaders)
else:
epoch, perf, _ = load_checkpoint(model, best=True)
print(
f"Loaded checkpoint for model {type(model)}. Epoch: {epoch}. Perf: {perf:0.2f}.")
loader = get_loader('validation')
verb_scores, noun_scores, action_scores, verb_labels, noun_labels, action_labels, ids = get_scores(model, loader, include_discarded=args.ek100)
if not args.ek100:
verb_accuracies = topk_accuracy_multiple_timesteps(
verb_scores, verb_labels)
noun_accuracies = topk_accuracy_multiple_timesteps(
noun_scores, noun_labels)
action_accuracies = topk_accuracy_multiple_timesteps(
action_scores, action_labels)
many_shot_verbs, many_shot_nouns, many_shot_actions = get_many_shot()
verb_recalls = topk_recall_multiple_timesteps(
verb_scores, verb_labels, k=5, classes=many_shot_verbs)
noun_recalls = topk_recall_multiple_timesteps(
noun_scores, noun_labels, k=5, classes=many_shot_nouns)
action_recalls = topk_recall_multiple_timesteps(
action_scores, action_labels, k=5, classes=many_shot_actions)
all_accuracies = np.concatenate(
[verb_accuracies, noun_accuracies, action_accuracies, verb_recalls, noun_recalls, action_recalls])
all_accuracies = all_accuracies[[0, 1, 6, 2, 3, 7, 4, 5, 8]]
indices = [
('Verb', 'Top-1 Accuracy'),
('Verb', 'Top-5 Accuracy'),
('Verb', 'Mean Top-5 Recall'),
('Noun', 'Top-1 Accuracy'),
('Noun', 'Top-5 Accuracy'),
('Noun', 'Mean Top-5 Recall'),
('Action', 'Top-1 Accuracy'),
('Action', 'Top-5 Accuracy'),
('Action', 'Mean Top-5 Recall'),
]
if args.task == 'anticipation':
cc = np.linspace(args.alpha*args.S_ant, args.alpha, args.S_ant, dtype=str)
else:
cc = [f"{c:0.1f}%" for c in np.linspace(0,100,args.S_ant+1)[1:]]
scores = pd.DataFrame(all_accuracies*100, columns=cc, index=pd.MultiIndex.from_tuples(indices))
else:
overall_verb_recalls = topk_recall_multiple_timesteps(
verb_scores, verb_labels, k=5)
overall_noun_recalls = topk_recall_multiple_timesteps(
noun_scores, noun_labels, k=5)
overall_action_recalls = topk_recall_multiple_timesteps(
action_scores, action_labels, k=5)
unseen, tail_verbs, tail_nouns, tail_actions = get_validation_ids()
unseen_bool_idx = pd.Series(ids).isin(unseen).values
tail_verbs_bool_idx = pd.Series(ids).isin(tail_verbs).values
tail_nouns_bool_idx = pd.Series(ids).isin(tail_nouns).values
tail_actions_bool_idx = pd.Series(ids).isin(tail_actions).values
tail_verb_recalls = topk_recall_multiple_timesteps(
verb_scores[tail_verbs_bool_idx], verb_labels[tail_verbs_bool_idx], k=5)
tail_noun_recalls = topk_recall_multiple_timesteps(
noun_scores[tail_nouns_bool_idx], noun_labels[tail_nouns_bool_idx], k=5)
tail_action_recalls = topk_recall_multiple_timesteps(
action_scores[tail_actions_bool_idx], action_labels[tail_actions_bool_idx], k=5)
unseen_verb_recalls = topk_recall_multiple_timesteps(
verb_scores[unseen_bool_idx], verb_labels[unseen_bool_idx], k=5)
unseen_noun_recalls = topk_recall_multiple_timesteps(
noun_scores[unseen_bool_idx], noun_labels[unseen_bool_idx], k=5)
unseen_action_recalls = topk_recall_multiple_timesteps(
action_scores[unseen_bool_idx], action_labels[unseen_bool_idx], k=5)
all_accuracies = np.concatenate(
[overall_verb_recalls, overall_noun_recalls, overall_action_recalls, unseen_verb_recalls, unseen_noun_recalls, unseen_action_recalls, tail_verb_recalls, tail_noun_recalls, tail_action_recalls]
) #9 x 8
#all_accuracies = all_accuracies[[0, 1, 6, 2, 3, 7, 4, 5, 8]]
indices = [
('Overall Mean Top-5 Recall', 'Verb'),
('Overall Mean Top-5 Recall', 'Noun'),
('Overall Mean Top-5 Recall', 'Action'),
('Unseen Mean Top-5 Recall', 'Verb'),
('Unseen Mean Top-5 Recall', 'Noun'),
('Unseen Mean Top-5 Recall', 'Action'),
('Tail Mean Top-5 Recall', 'Verb'),
('Tail Mean Top-5 Recall', 'Noun'),
('Tail Mean Top-5 Recall', 'Action'),
]
if args.task == 'anticipation':
cc = np.linspace(args.alpha*args.S_ant, args.alpha, args.S_ant, dtype=str)
else:
cc = [f"{c:0.1f}%" for c in np.linspace(0,100,args.S_ant+1)[1:]]
scores = pd.DataFrame(all_accuracies*100, columns=cc, index=pd.MultiIndex.from_tuples(indices))
print(scores)
if args.task == 'anticipation':
tta_verb = tta(verb_scores, verb_labels)
tta_noun = tta(noun_scores, noun_labels)
tta_action = tta(action_scores, action_labels)
print(
f"\nMean TtA(5): VERB: {tta_verb:0.2f} NOUN: {tta_noun:0.2f} ACTION: {tta_action:0.2f}")
elif args.mode == 'validate':
if args.task == 'early_recognition' and args.modality == 'fusion':
loaders = [get_loader('validation', 'rgb'), get_loader('validation', 'flow'),
get_loader('validation', 'obj')]
verb_scores, noun_scores, action_scores, verb_labels, noun_labels, action_labels = get_scores_early_recognition_fusion(
model, loaders)
else:
epoch, perf, _ = load_checkpoint(model, best=True)
print(
f"Loaded checkpoint for model {type(model)}. Epoch: {epoch}. Perf: {perf:0.2f}.")
loader = get_loader('validation')
verb_scores, noun_scores, action_scores, verb_labels, noun_labels, action_labels,_ = get_scores(model,
loader)
elif 'test' in args.mode:
if args.ek100:
mm = ['timestamps']
else:
mm = ['seen', 'unseen']
for m in mm:
if args.task == 'early_recognition' and args.modality == 'fusion':
loaders = [get_loader(f"test_{m}", 'rgb'), get_loader(f"test_{m}", 'flow'), get_loader(f"test_{m}", 'obj')]
discarded_ids = loaders[0].dataset.discarded_ids
verb_scores, noun_scores, action_scores, ids = get_scores_early_recognition_fusion(model, loaders)
else:
loader = get_loader(f"test_{m}")
epoch, perf, _ = load_checkpoint(model, best=True)
discarded_ids = loader.dataset.discarded_ids
print(
f"Loaded checkpoint for model {type(model)}. Epoch: {epoch}. Perf: {perf:0.2f}.")
verb_scores, noun_scores, action_scores, ids = get_scores(model, loader)
idx = -4 if args.task == 'anticipation' else -1
ids = list(ids) + list(discarded_ids)
verb_scores = np.concatenate((verb_scores, np.zeros((len(discarded_ids), *verb_scores.shape[1:])))) [:,idx,:]
noun_scores = np.concatenate((noun_scores, np.zeros((len(discarded_ids), *noun_scores.shape[1:])))) [:,idx,:]
action_scores = np.concatenate((action_scores, np.zeros((len(discarded_ids), *action_scores.shape[1:])))) [:,idx,:]
actions = pd.read_csv(join(args.path_to_data, 'actions.csv'))
# map actions to (verb, noun) pairs
a_to_vn = {a[1]['id']: tuple(a[1][['verb', 'noun']].values)
for a in actions.iterrows()}
preds = predictions_to_json(verb_scores, noun_scores, action_scores, ids, a_to_vn, version = '0.2' if args.ek100 else '0.1', sls=True)
if args.ek100:
with open(join(args.json_directory,exp_name+f"_test.json"), 'w') as f:
f.write(json.dumps(preds, indent=4, separators=(',',': ')))
else:
with open(join(args.json_directory,exp_name+f"_{m}.json"), 'w') as f:
f.write(json.dumps(preds, indent=4, separators=(',',': ')))
elif 'validate_json' in args.mode:
if args.task == 'early_recognition' and args.modality == 'fusion':
loaders = [get_loader("validation", 'rgb'), get_loader("validation", 'flow'), get_loader("validation", 'obj')]
discarded_ids = loaders[0].dataset.discarded_ids
verb_scores, noun_scores, action_scores, ids = get_scores_early_recognition_fusion(model, loaders)
else:
loader = get_loader("validation")
epoch, perf, _ = load_checkpoint(model, best=True)
discarded_ids = loader.dataset.discarded_ids
print(
f"Loaded checkpoint for model {type(model)}. Epoch: {epoch}. Perf: {perf:0.2f}.")
verb_scores, noun_scores, action_scores, ids = get_scores(model, loader, challenge=True)
idx = -4 if args.task == 'anticipation' else -1
ids = list(ids) + list(discarded_ids)
verb_scores = np.concatenate((verb_scores, np.zeros((len(discarded_ids), *verb_scores.shape[1:])))) [:,idx,:]
noun_scores = np.concatenate((noun_scores, np.zeros((len(discarded_ids), *noun_scores.shape[1:])))) [:,idx,:]
action_scores = np.concatenate((action_scores, np.zeros((len(discarded_ids), *action_scores.shape[1:])))) [:,idx,:]
actions = pd.read_csv(join(args.path_to_data, 'actions.csv'))
# map actions to (verb, noun) pairs
a_to_vn = {a[1]['id']: tuple(a[1][['verb', 'noun']].values)
for a in actions.iterrows()}
preds = predictions_to_json(verb_scores, noun_scores, action_scores, ids, a_to_vn, version = '0.2' if args.ek100 else '0.1', sls=True)
with open(join(args.json_directory,exp_name+f"_validation.json"), 'w') as f:
f.write(json.dumps(preds, indent=4, separators=(',',': ')))
if __name__ == '__main__':
main()
| 30,172 | 46.219092 | 208 | py |
rulstm | rulstm-master/RULSTM/dataset.py | """ Implements a dataset object which allows to read representations from LMDB datasets in a multi-modal fashion
The dataset can sample frames for both the anticipation and early recognition tasks."""
import numpy as np
import lmdb
from tqdm import tqdm
from torch.utils import data
import pandas as pd
def read_representations(frames, env, tran=None):
""" Reads a set of representations, given their frame names and an LMDB environment.
Applies a transformation to the features if provided"""
features = []
# for each frame
for f in frames:
# read the current frame
with env.begin() as e:
dd = e.get(f.strip().encode('utf-8'))
if dd is None:
print(f)
# convert to numpy array
data = np.frombuffer(dd, 'float32')
# append to list
features.append(data)
# convert list to numpy array
features=np.array(features)
# apply transform if provided
if tran:
features=tran(features)
return features
def read_data(frames, env, tran=None):
"""A wrapper form read_representations to handle loading from more environments.
This is used for multimodal data loading (e.g., RGB + Flow)"""
# if env is a list
if isinstance(env, list):
# read the representations from all environments
l = [read_representations(frames, e, tran) for e in env]
return l
else:
# otherwise, just read the representations
return read_representations(frames, env, tran)
class SequenceDataset(data.Dataset):
def __init__(self, path_to_lmdb, path_to_csv, label_type = 'action',
time_step = 0.25, sequence_length = 14, fps = 30,
img_tmpl = "frame_{:010d}.jpg",
transform = None,
challenge = False,
past_features = True,
action_samples = None):
"""
Inputs:
path_to_lmdb: path to the folder containing the LMDB dataset
path_to_csv: path to training/validation csv
label_type: which label to return (verb, noun, or action)
time_step: in seconds
sequence_length: in time steps
fps: framerate
img_tmpl: image template to load the features
tranform: transformation to apply to each sample
challenge: allows to load csvs containing only time-stamp for the challenge
past_features: if past features should be returned
action_samples: number of frames to be evenly sampled from each action
"""
# read the csv file
if challenge:
self.annotations = pd.read_csv(path_to_csv, header=None, names=['video','start','end'])
else:
self.annotations = pd.read_csv(path_to_csv, header=None, names=['video','start','end','verb','noun','action'])
self.challenge=challenge
self.path_to_lmdb = path_to_lmdb
self.time_step = time_step
self.past_features = past_features
self.action_samples = action_samples
self.fps=fps
self.transform = transform
self.label_type = label_type
self.sequence_length = sequence_length
self.img_tmpl = img_tmpl
self.action_samples = action_samples
# initialize some lists
self.ids = [] # action ids
self.discarded_ids = [] # list of ids discarded (e.g., if there were no enough frames before the beginning of the action
self.discarded_labels = [] # list of labels discarded (e.g., if there were no enough frames before the beginning of the action
self.past_frames = [] # names of frames sampled before each action
self.action_frames = [] # names of frames sampled from each action
self.labels = [] # labels of each action
# populate them
self.__populate_lists()
# if a list to datasets has been provided, load all of them
if isinstance(self.path_to_lmdb, list):
self.env = [lmdb.open(l, readonly=True, lock=False) for l in self.path_to_lmdb]
else:
# otherwise, just load the single LMDB dataset
self.env = lmdb.open(self.path_to_lmdb, readonly=True, lock=False)
def __get_frames(self, frames, video):
""" format file names using the image template """
frames = np.array(list(map(lambda x: video+"_"+self.img_tmpl.format(x), frames)))
return frames
def __populate_lists(self):
""" Samples a sequence for each action and populates the lists. """
for _, a in tqdm(self.annotations.iterrows(), 'Populating Dataset', total = len(self.annotations)):
# sample frames before the beginning of the action
frames = self.__sample_frames_past(a.start)
if self.action_samples:
# sample frames from the action
# to sample n frames, we first sample n+1 frames with linspace, then discard the first one
action_frames = np.linspace(a.start, a.end, self.action_samples+1, dtype=int)[1:]
# check if there were enough frames before the beginning of the action
if frames.min()>=1: #if the smaller frame is at least 1, the sequence is valid
self.past_frames.append(self.__get_frames(frames, a.video))
self.ids.append(a.name)
# handle whether a list of labels is required (e.g., [verb, noun]), rather than a single action
if isinstance(self.label_type, list):
if self.challenge: # if sampling for the challenge, there are no labels, just add -1
self.labels.append(-1)
else:
# otherwise get the required labels
self.labels.append(a[self.label_type].values.astype(int))
else: #single label version
if self.challenge:
self.labels.append(-1)
else:
self.labels.append(a[self.label_type])
if self.action_samples:
self.action_frames.append(self.__get_frames(action_frames, a.video))
else:
#if the sequence is invalid, do nothing, but add the id to the discarded_ids list
self.discarded_ids.append(a.name)
if isinstance(self.label_type, list):
if self.challenge: # if sampling for the challenge, there are no labels, just add -1
self.discarded_labels.append(-1)
else:
# otherwise get the required labels
self.discarded_labels.append(a[self.label_type].values.astype(int))
else: #single label version
if self.challenge:
self.discarded_labels.append(-1)
else:
self.discarded_labels.append(a[self.label_type])
def __sample_frames_past(self, point):
"""Samples frames before the beginning of the action "point" """
# generate the relative timestamps, depending on the requested sequence_length
# e.g., 2. , 1.75, 1.5 , 1.25, 1. , 0.75, 0.5 , 0.25
# in this case "2" means, sample 2s before the beginning of the action
time_stamps = np.arange(self.time_step,self.time_step*(self.sequence_length+1),self.time_step)[::-1]
# compute the time stamp corresponding to the beginning of the action
end_time_stamp = point/self.fps
# subtract time stamps to the timestamp of the last frame
time_stamps = end_time_stamp-time_stamps
# convert timestamps to frames
# use floor to be sure to consider the last frame before the timestamp (important for anticipation!)
# and never sample any frame after that time stamp
frames = np.floor(time_stamps*self.fps).astype(int)
# sometimes there are not enough frames before the beginning of the action
# in this case, we just pad the sequence with the first frame
# this is done by replacing all frames smaller than 1
# with the first frame of the sequence
if frames.max()>=1:
frames[frames<1]=frames[frames>=1].min()
return frames
def __len__(self):
return len(self.ids)
def __getitem__(self, index):
""" sample a given sequence """
# get past frames
past_frames = self.past_frames[index]
if self.action_samples:
# get action frames
action_frames = self.action_frames[index]
# return a dictionary containing the id of the current sequence
# this is useful to produce the jsons for the challenge
out = {'id':self.ids[index]}
if self.past_features:
# read representations for past frames
out['past_features'] = read_data(past_frames, self.env, self.transform)
# get the label of the current sequence
label = self.labels[index]
out['label'] = label
if self.action_samples:
# read representations for the action samples
out['action_features'] = read_data(action_frames, self.env, self.transform)
return out
| 9,400 | 43.554502 | 134 | py |
rulstm | rulstm-master/RULSTM/models.py | from torch import nn
import torch
from torch.nn.init import normal, constant
import numpy as np
from torch.nn import functional as F
class OpenLSTM(nn.Module):
""""An LSTM implementation that returns the intermediate hidden and cell states.
The original implementation of PyTorch only returns the last cell vector.
For RULSTM, we want all cell vectors computed at intermediate steps"""
def __init__(self, feat_in, feat_out, num_layers=1, dropout=0):
"""
feat_in: input feature size
feat_out: output feature size
num_layers: number of layers
dropout: dropout probability
"""
super(OpenLSTM, self).__init__()
# simply create an LSTM with the given parameters
self.lstm = nn.LSTM(feat_in, feat_out, num_layers=num_layers, dropout=dropout)
def forward(self, seq):
# manually iterate over each input to save the individual cell vectors
last_cell=None
last_hid=None
hid = []
cell = []
for i in range(seq.shape[0]):
el = seq[i,...].unsqueeze(0)
if last_cell is not None:
_, (last_hid, last_cell) = self.lstm(el, (last_hid,last_cell))
else:
_, (last_hid, last_cell) = self.lstm(el)
hid.append(last_hid)
cell.append(last_cell)
return torch.stack(hid, 0), torch.stack(cell, 0)
class RULSTM(nn.Module):
def __init__(self, num_class, feat_in, hidden, dropout=0.8, depth=1,
sequence_completion=False, return_context=False):
"""
num_class: number of classes
feat_in: number of input features
hidden: number of hidden units
dropout: dropout probability
depth: number of LSTM layers
sequence_completion: if the network should be arranged for sequence completion pre-training
return_context: whether to return the Rolling LSTM hidden and cell state (useful for MATT) during forward
"""
super(RULSTM, self).__init__()
self.feat_in = feat_in
self.dropout = nn.Dropout(dropout)
self.hidden=hidden
self.rolling_lstm = OpenLSTM(feat_in, hidden, num_layers=depth, dropout=dropout if depth>1 else 0)
self.unrolling_lstm = nn.LSTM(feat_in, hidden, num_layers=depth, dropout=dropout if depth>1 else 0)
self.classifier = nn.Sequential(nn.Dropout(dropout), nn.Linear(hidden, num_class))
self.sequence_completion = sequence_completion
self.return_context = return_context
def forward(self, inputs):
# permute the inputs for compatibility with the LSTM
inputs=inputs.permute(1,0,2)
# pass the frames through the rolling LSTM
# and get the hidden (x) and cell (c) states at each time-step
x, c = self.rolling_lstm(self.dropout(inputs))
x = x.contiguous() # batchsize x timesteps x hidden
c = c.contiguous() # batchsize x timesteps x hidden
# accumulate the predictions in a list
predictions = [] # accumulate the predictions in a list
# for each time-step
for t in range(x.shape[0]):
# get the hidden and cell states at current time-step
hid = x[t,...]
cel = c[t,...]
if self.sequence_completion:
# take current + future inputs (looks into the future)
ins = inputs[t:,...]
else:
# replicate the current input for the correct number of times (time-steps remaining to the beginning of the action)
ins = inputs[t,...].unsqueeze(0).expand(inputs.shape[0]-t+1,inputs.shape[1],inputs.shape[2]).to(inputs.device)
# initialize the LSTM and iterate over the inputs
h_t, (_,_) = self.unrolling_lstm(self.dropout(ins), (hid.contiguous(), cel.contiguous()))
# get last hidden state
h_n = h_t[-1,...]
# append the last hidden state to the list
predictions.append(h_n)
# obtain the final prediction tensor by concatenating along dimension 1
x = torch.stack(predictions,1)
# apply the classifier to each output feature vector (independently)
y = self.classifier(x.view(-1,x.size(2))).view(x.size(0), x.size(1), -1)
if self.return_context:
# return y and the concatenation of hidden and cell states
c=c.squeeze().permute(1,0,2)
return y, torch.cat([x, c],2)
else:
return y
class RULSTMFusion(nn.Module):
def __init__(self, branches, hidden, dropout=0.8):
"""
branches: list of pre-trained branches. Each branch should have the "return_context" property to True
hidden: size of hidden vectors of the branches
dropout: dropout probability
"""
super(RULSTMFusion, self).__init__()
self.branches = nn.ModuleList(branches)
# input size for the MATT network
# given by 2 (hidden and cell state) * num_branches * hidden_size
in_size = 2*len(self.branches)*hidden
# MATT network: an MLP with 3 layers
self.MATT = nn.Sequential(nn.Linear(in_size,int(in_size/4)),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(int(in_size/4), int(in_size/8)),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(int(in_size/8), len(self.branches)))
def forward(self, inputs):
"""inputs: tuple containing the inputs to the single branches"""
scores, contexts = [], []
# for each branch
for i in range(len(inputs)):
# feed the inputs to the LSTM and get the scores and context vectors
s, c = self.branches[i](inputs[i])
scores.append(s)
contexts.append(c)
context = torch.cat(contexts, 2)
context = context.view(-1, context.shape[-1])
# Apply the MATT network to the context vectors
# and normalize the outputs using softmax
a = F.softmax(self.MATT(context),1)
# array to contain the fused scores
sc = torch.zeros_like(scores[0])
# fuse all scores multiplying by the weights
for i in range(len(inputs)):
s = (scores[i].view(-1,scores[i].shape[-1])*a[:,i].unsqueeze(1)).view(sc.shape)
sc += s
# return the fused scores
return sc
| 6,687 | 40.540373 | 131 | py |
rulstm | rulstm-master/FasterRCNN/tools/detect_video.py | #!/usr/bin/env python
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Perform inference on all the frames of a video
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import argparse
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import glob
import logging
import os
import sys
import time
import numpy as np
from caffe2.python import workspace
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.io import cache_url
from detectron.utils.logging import setup_logging
from detectron.utils.timer import Timer
import detectron.core.test_engine as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
import detectron.utils.vis as vis_utils
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
def parse_args():
parser = argparse.ArgumentParser(description='End-to-end inference')
parser.add_argument(
'--cfg',
dest='cfg',
help='cfg model file (/path/to/model_config.yaml)',
default=None,
type=str
)
parser.add_argument(
'--wts',
dest='weights',
help='weights model file (/path/to/model_weights.pkl)',
default=None,
type=str
)
parser.add_argument(
'--top_predictions',
dest='top_predictions',
help='Number of predictions to store',
default=100,
type=int
)
parser.add_argument(
'path_to_video', help='path_to_video', default=None
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def format_dets(boxes):
all_boxes = []
for i,b in enumerate(boxes):
if len(b)>0:
b=np.array(b)
ii = np.ones((len(b),1))*i-1
b=np.hstack([ii,b])
all_boxes.append(b)
if len(all_boxes)>0:
all_boxes = np.concatenate(all_boxes)
else:
all_boxes = np.zeros((0, 6))
return all_boxes
def main(args):
logger = logging.getLogger(__name__)
merge_cfg_from_file(args.cfg)
cfg.NUM_GPUS = 1
args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
if os.path.isfile(args.path_to_video+'_detections.npy'):
return
assert not cfg.MODEL.RPN_ONLY, \
'RPN models are not supported'
assert not cfg.TEST.PRECOMPUTED_PROPOSALS, \
'Models that require precomputed proposals are not supported'
model = infer_engine.initialize_model_from_cfg(args.weights)
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
vid = cv2.VideoCapture(args.path_to_video)
ret, im = vid.read()
all_boxes = []
while ret:
timers = defaultdict(Timer)
t = time.time()
with c2_utils.NamedCudaScope(0):
cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
model, im, None, timers=timers
)
all_boxes.append(format_dets(cls_boxes))
logger.info('Inference time: {:.3f}s'.format(time.time() - t))
for k, v in timers.items():
logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
ret, im = vid.read()
np.save(args.path_to_video+'_detections',all_boxes)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
setup_logging(__name__)
args = parse_args()
main(args)
| 4,411 | 29.013605 | 78 | py |
chase | chase-master/python/src/example.py | # MLP for Pima Indians Dataset with grid search via sklearn
#import tensorflow as tf
from sklearn.cross_validation import train_test_split, cross_val_predict, cross_val_score
from sklearn.metrics import accuracy_score
import os
os.environ['THEANO_FLAGS']="device=cpu,openmp=True"
import datetime
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
import numpy
# Function to create model, required for KerasClassifier
def create_model(optimizer='rmsprop', init='glorot_uniform'):
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, kernel_initializer=init, activation='relu'))
model.add(Dense(8, kernel_initializer=init, activation='relu'))
model.add(Dense(1, kernel_initializer=init, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
print(datetime.datetime.now())
# fix random seed for reproducibility
seed = 1
numpy.random.seed(seed)
# load pima indians dataset
dataset = numpy.loadtxt("/home/zqz/Work/data/pima-indians-diabetes.data", delimiter=",")
# split into input (X) and output (Y) variables
print(">>>>> n-fold")
X = dataset[:,0:8]
Y = dataset[:,8]
model = KerasClassifier(build_fn=create_model, epochs=10, batch_size=5,verbose=0)
results = cross_val_score(model, X, Y, cv=5)
print(results.mean())
print(">>>>> grid search")
X_train_data, X_test_data, y_train, y_test = \
train_test_split(dataset[:,0:8], dataset[:,8],
test_size=0.25,
random_state=42)
# create model
model = KerasClassifier(build_fn=create_model, verbose=0)
# grid search epochs, batch size and optimizer
optimizers = ['adam']
init = ['uniform']
epochs = [10]
batches = [5]
param_grid = dict(optimizer=optimizers, epochs=epochs, batch_size=batches, init=init)
grid = GridSearchCV(estimator=model, param_grid=param_grid, cv=5)
grid_result = grid.fit(X_train_data, y_train)
print("\tcrossfold running...{}".format(datetime.datetime.now()))
#nfold_predictions = cross_val_predict(grid.best_estimator_, X_train_data, y_train, cv=5)
print(cross_val_score(grid.best_estimator_, X_train_data, y_train, cv=5).mean())
#0.69827588 0.63478262 0.61739132 0.68695653 0.62608697
best_param_ann = grid.best_params_
print("\tbest params are:{}".format(best_param_ann))
best_estimator = grid.best_estimator_
heldout_predictions = best_estimator.predict(X_test_data)
print("\ttesting on the heldout...")
print(accuracy_score(y_test,heldout_predictions))
print(datetime.datetime.now())
#K.clear_session()
# from theano import function, config, shared, tensor
# import numpy
# import time
#
# vlen = 10 * 30 * 768 # 10 x #cores x # threads per core
# iters = 1000
#
# rng = numpy.random.RandomState(22)
# x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
# f = function([], tensor.exp(x))
# print(f.maker.fgraph.toposort())
# t0 = time.time()
# for i in range(iters):
# r = f()
# t1 = time.time()
# print("Looping %d times took %f seconds" % (iters, t1 - t0))
# print("Result is %s" % (r,))
# if numpy.any([isinstance(x.op, tensor.Elemwise) and
# ('Gpu' not in type(x.op).__name__)
# for x in f.maker.fgraph.toposort()]):
# print('Used the cpu')
# else:
# print('Used the gpu')
| 3,449 | 34.204082 | 92 | py |
chase | chase-master/python/src/ml/classifier_dnn.py | import os
from numpy.random import seed
seed(1)
os.environ['PYTHONHASHSEED'] = '0'
os.environ['THEANO_FLAGS'] = "floatX=float64,device=cpu,openmp=True"
# os.environ['THEANO_FLAGS']="openmp=True"
os.environ['OMP_NUM_THREADS'] = '16'
import theano
theano.config.openmp = True
# import tensorflow as tf
# tf.set_random_seed(2)
# single thread
# session_conf = tf.ConfigProto(
# intra_op_parallelism_threads=1,
# inter_op_parallelism_threads=1)
# sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
# sess = tf.Session(config=session_conf)
# with sess.as_default():
# print(tf.constant(42).eval())
import datetime
import logging
import sys
import functools
import gensim
import numpy
import random as rn
import pandas as pd
import pickle
from keras.layers import Embedding
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.cross_validation import cross_val_predict, train_test_split, cross_val_score, StratifiedKFold
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import GridSearchCV
from keras.preprocessing import sequence
from ml import util
from ml import nlp
from ml import text_preprocess as tp
from ml import dnn_model_creator as dmc
MAX_SEQUENCE_LENGTH = 100 # maximum # of words allowed in a tweet
WORD_EMBEDDING_DIM_OUTPUT = 300
WORD_EMBEDDING_DIM_OUTPUT = 300
CPUS = 1
def get_word_vocab(tweets, out_folder, normalize_option):
word_vectorizer = CountVectorizer(
# vectorizer = sklearn.feature_extraction.text.CountVectorizer(
tokenizer=functools.partial(nlp.tokenize, stem_or_lemma=normalize_option),
preprocessor=tp.strip_hashtags,
ngram_range=(1, 1),
stop_words=nlp.stopwords, # We do better when we keep stopwords
decode_error='replace',
max_features=50000,
min_df=1,
max_df=0.99
)
# logger.info("\tgenerating word vectors, {}".format(datetime.datetime.now()))
counts = word_vectorizer.fit_transform(tweets).toarray()
# logger.info("\t\t complete, dim={}, {}".format(counts.shape, datetime.datetime.now()))
vocab = {v: i for i, v in enumerate(word_vectorizer.get_feature_names())}
pickle.dump(vocab, open(out_folder + "/" + "DNN_WORD_EMBEDDING" + ".pk", "wb"))
word_embedding_input = []
for tweet in counts:
tweet_vocab = []
for i in range(0, len(tweet)):
if tweet[i] != 0:
tweet_vocab.append(i)
word_embedding_input.append(tweet_vocab)
return word_embedding_input, vocab
def create_model(model_descriptor: str, max_index=100, wemb_matrix=None, wdist_matrix=None):
'''A model that uses word embeddings'''
if wemb_matrix is None:
if wdist_matrix is not None:
embedding_layers = [Embedding(input_dim=max_index, output_dim=WORD_EMBEDDING_DIM_OUTPUT,
input_length=MAX_SEQUENCE_LENGTH),
Embedding(input_dim=max_index, output_dim=len(wdist_matrix[0]),
weights=[wdist_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)]
else:
embedding_layers = [Embedding(input_dim=max_index, output_dim=WORD_EMBEDDING_DIM_OUTPUT,
input_length=MAX_SEQUENCE_LENGTH)]
else:
if wdist_matrix is not None:
concat_matrices = util.concat_matrices(wemb_matrix, wdist_matrix)
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layers = [Embedding(input_dim=max_index, output_dim=len(concat_matrices[0]),
weights=[concat_matrices],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)]
else:
embedding_layers = [Embedding(input_dim=max_index, output_dim=len(wemb_matrix[0]),
weights=[wemb_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)]
if model_descriptor.startswith("b_"):
model_descriptor = model_descriptor[2:].strip()
model = dmc.create_model_with_branch(embedding_layers, model_descriptor)
elif model_descriptor.startswith("f_"):
model = dmc.create_final_model_with_concat_cnn(embedding_layers, model_descriptor)
else:
model = dmc.create_model_without_branch(embedding_layers, model_descriptor)
# create_model_conv_lstm_multi_filter(embedding_layer)
# logger.info("New run started at {}\n{}".format(datetime.datetime.now(), model.summary()))
return model
class MyKerasClassifier(KerasClassifier):
def predict(self, x, **kwargs):
kwargs = self.filter_sk_params(self.model.predict, kwargs)
proba = self.model.predict(x, **kwargs)
if proba.shape[-1] > 1:
classes = proba.argmax(axis=-1)
else:
classes = (proba > 0.5).astype('int32')
return self.classes_[classes]
# def pretrained_embedding_with_wdist(word_vocab: dict, models: list, expected_emb_dim, randomize_strategy,
# word_dist_scores_file=None):
# # logger.info("\tloading pre-trained embedding model... {}".format(datetime.datetime.now()))
# # logger.info("\tloading complete. {}".format(datetime.datetime.now()))
# word_dist_scores = None
# if word_dist_scores_file is not None:
# print("using word dist features...")
# word_dist_scores = util.read_word_dist_features(word_dist_scores_file)
# expected_emb_dim += 2
#
# randomized_vectors = {}
# matrix = numpy.zeros((len(word_vocab), expected_emb_dim))
# count = 0
# random = 0
# for word, i in word_vocab.items():
# is_in_model = False
# for model in models:
# if word in model.wv.vocab.keys():
# is_in_model = True
# vec = model.wv[word]
# if word_dist_scores is not None:
# vec = util.append_word_dist_features(vec, word, word_dist_scores)
# matrix[i] = vec
# break
#
# if not is_in_model:
# random += 1
# model = models[0]
# if randomize_strategy == 1: # randomly set values following a continuous uniform distribution
# vec = numpy.random.random_sample(expected_emb_dim)
# if word_dist_scores is not None:
# vec = util.append_word_dist_features(vec, word, word_dist_scores)
# matrix[i] = vec
# elif randomize_strategy == 2: # randomly take a vector from the model
# if word in randomized_vectors.keys():
# vec = randomized_vectors[word]
# else:
# max = len(model.wv.vocab.keys()) - 1
# index = rn.randint(0, max)
# word = model.index2word[index]
# vec = model.wv[word]
# randomized_vectors[word] = vec
# if word_dist_scores is not None:
# vec = util.append_word_dist_features(vec, word, word_dist_scores)
# matrix[i] = vec
# count += 1
# if count % 100 == 0:
# print(count)
# models.clear()
# if randomize_strategy != 0:
# print("randomized={}".format(random))
# else:
# print("oov={}".format(random))
# return matrix
def build_pretrained_embedding_matrix(word_vocab: dict, models: list, expected_emb_dim, randomize_strategy
):
# logger.info("\tloading pre-trained embedding model... {}".format(datetime.datetime.now()))
# logger.info("\tloading complete. {}".format(datetime.datetime.now()))
randomized_vectors = {}
matrix = numpy.zeros((len(word_vocab), expected_emb_dim))
count = 0
random = 0
for word, i in word_vocab.items():
is_in_model = False
for model in models:
if word in model.wv.vocab.keys():
is_in_model = True
vec = model.wv[word]
matrix[i] = vec
break
if not is_in_model:
random += 1
model = models[0]
if randomize_strategy == '1' or randomize_strategy == 1: # randomly set values following a continuous uniform distribution
vec = numpy.random.random_sample(expected_emb_dim)
matrix[i] = vec
elif randomize_strategy == '2' or randomize_strategy == 2: # randomly take a vector from the model
if word in randomized_vectors.keys():
vec = randomized_vectors[word]
else:
max = len(model.wv.vocab.keys()) - 1
index = rn.randint(0, max)
word = model.index2word[index]
vec = model.wv[word]
randomized_vectors[word] = vec
matrix[i] = vec
count += 1
if count % 100 == 0:
print(count)
if randomize_strategy != '0':
print("randomized={}".format(random))
else:
print("oov={}".format(random))
models.clear()
return matrix
def build_word_dist_matrix(word_vocab: dict,
word_dist_scores_file):
word_dist_scores = util.read_word_dist_features(word_dist_scores_file)
expected_emb_dim = 2
matrix = numpy.zeros((len(word_vocab), expected_emb_dim))
count = 0
for word, i in word_vocab.items():
vec = util.build_word_dist_features(word, word_dist_scores)
matrix[i] = vec
count += 1
if count % 100 == 0:
print(count)
return matrix
def grid_search_dnn(dataset_name, outfolder, model_descriptor: str,
cpus, nfold, X_train, y_train, X_test, y_test, X_train_index, X_test_index,
embedding_layer_max_index, pretrained_embedding_matrix=None,
word_dist_matrix=None,
instance_tags_train=None, instance_tags_test=None,
accepted_ds_tags: list = None):
print("\t== Perform ANN ...")
subfolder = outfolder + "/models"
try:
os.stat(subfolder)
except:
os.mkdir(subfolder)
create_model_with_args = \
functools.partial(create_model, max_index=embedding_layer_max_index,
wemb_matrix=pretrained_embedding_matrix,
wdist_matrix=word_dist_matrix,
model_descriptor=model_descriptor)
# model = MyKerasClassifier(build_fn=create_model_with_args, verbose=0)
model = KerasClassifier(build_fn=create_model_with_args, verbose=0)
# model = KerasClassifier(build_fn=create_model_with_args, verbose=0, batch_size=100,
# nb_epoch=10)
#
# nfold_predictions = cross_val_predict(model, X_train, y_train, cv=nfold)
# define the grid search parameters
batch_size = [100]
epochs = [10]
param_grid = dict(batch_size=batch_size, nb_epoch=epochs)
#it seems that the default gridsearchcv can have problem with stratifiedkfold sometimes, on w and ws dataset when we add "mixed_data"
fold=StratifiedKFold(n_folds=nfold, y=y_train)
_classifier = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=cpus,
cv=fold)
#this is the original grid search cv object to replace the above
#_classifier = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=cpus,
# cv=nfold)
print("\tfitting model...{}".format(datetime.datetime.now()))
_classifier.fit(X_train, y_train)
print("\tcrossfold running...{}".format(datetime.datetime.now()))
nfold_predictions = cross_val_predict(_classifier.best_estimator_, X_train, y_train, cv=nfold)
best_param_ann = _classifier.best_params_
print("\tdone {}".format(datetime.datetime.now()))
print("\tbest params for {} model are:{}".format(model_descriptor, best_param_ann))
best_estimator = _classifier.best_estimator_
# util.save_classifier_model(best_estimator, ann_model_file)
# logger.info("testing on development set ....")
if (X_test is not None):
print("\tpredicting...{}".format(datetime.datetime.now()))
heldout_predictions_final = best_estimator.predict(X_test)
print("\tsaving...{}".format(datetime.datetime.now()))
util.save_scores(nfold_predictions, y_train, heldout_predictions_final, y_test,
X_train_index, X_test_index,
model_descriptor, dataset_name,
3, outfolder, instance_tags_train, instance_tags_test, accepted_ds_tags)
else:
print("\tsaving...{}".format(datetime.datetime.now()))
util.save_scores(nfold_predictions, y_train, None, y_test,X_train_index, X_test_index,
model_descriptor, dataset_name, 3,
outfolder, instance_tags_train, instance_tags_test, accepted_ds_tags)
# util.print_eval_report(best_param_ann, cv_score_ann, dev_data_prediction_ann,
# time_ann_predict_dev,
# time_ann_train, y_test)
def output_data_stats(X_train_data, y_train):
labels={}
for y in y_train:
if y in labels.keys():
labels[y]+=1
else:
labels[y]=1
print("training instances={}, training labels={}, training label distribution={}".
format(len(X_train_data), len(y_train),labels))
def gridsearch(input_data_file, dataset_name, sys_out, model_descriptor: str,
print_scores_per_class,
word_norm_option,
randomize_strategy,
pretrained_embedding_models=None, expected_embedding_dim=None,
word_dist_features_file=None, use_mixed_data=False):
raw_data = pd.read_csv(input_data_file, sep=',', encoding="utf-8")
M = get_word_vocab(raw_data.tweet, sys_out, word_norm_option)
# M=self.feature_scale(M)
M0 = M[0]
pretrained_word_matrix = None
if pretrained_embedding_models is not None:
pretrained_word_matrix = build_pretrained_embedding_matrix(M[1],
pretrained_embedding_models,
expected_embedding_dim,
randomize_strategy)
word_dist_matrix = None
if word_dist_features_file is not None:
word_dist_matrix = build_word_dist_matrix(M[1],
word_dist_features_file)
# split the dataset into two parts, 0.75 for train and 0.25 for testing
if 'ds' in raw_data.columns:
col_datasource=raw_data['ds']
else:
col_datasource=raw_data[raw_data.columns[0]]
X_train_data, X_test_data, y_train, y_test, ds_train, ds_test, index_train, index_test=\
train_test_split(M0, raw_data['class'], col_datasource,
list(raw_data.index.values),
test_size=0.25,
random_state=42)
accepted_ds_tags = None
if print_scores_per_class:
accepted_ds_tags = ["w"]
# using mixed data?
if use_mixed_data:
mixed_data_folder=input_data_file[0:input_data_file.rfind("/")]
mixed_data_file=mixed_data_folder+"/labeled_data_all_mixed.csv"
mixed_data = pd.read_csv(mixed_data_file, sep=',', encoding="utf-8")
MX = get_word_vocab(mixed_data.tweet, sys_out, word_norm_option)
# M=self.feature_scale(M)
MX0 = MX[0]
# split the dataset into two parts, 0.75 for train and 0.25 for testing
MX_X_train_data, MX_X_test_data, MX_y_train, MX_y_test, MX_ds_train, MX_ds_test = \
train_test_split(MX0, mixed_data['class'],
mixed_data['ds'],
test_size=0.25,
random_state=42)
X_train_data=numpy.concatenate((X_train_data, MX_X_train_data))
X_test_data = numpy.concatenate((X_test_data, MX_X_test_data))
y_train = y_train.append(MX_y_train, ignore_index=True) #numpy.concatenate((y_train, MX_y_train))
y_test = y_test.append(MX_y_test, ignore_index=True)
ds_train = ds_train.append(MX_ds_train, ignore_index=True)
ds_test = ds_test.append(MX_ds_test, ignore_index=True)
y_train = y_train.astype(int)
y_test = y_test.astype(int)
X_train_data = sequence.pad_sequences(X_train_data, maxlen=MAX_SEQUENCE_LENGTH)
X_test_data = sequence.pad_sequences(X_test_data, maxlen=MAX_SEQUENCE_LENGTH)
output_data_stats(X_train_data, y_train)
# exit(0)
grid_search_dnn(dataset_name, sys_out, model_descriptor,
CPUS, 5,
X_train_data,
y_train, X_test_data, y_test, index_train, index_test,
len(M[1]), pretrained_word_matrix, word_dist_matrix,
ds_train, ds_test, accepted_ds_tags)
print("complete {}".format(datetime.datetime.now()))
def cross_eval_dnn(dataset_name, outfolder, model_descriptor: str,
cpus, nfold, X_data, y_data,
embedding_layer_max_index, pretrained_embedding_matrix=None,
instance_data_source_tags=None, accepted_ds_tags: list = None):
print("== Perform ANN ...")
subfolder = outfolder + "/models"
try:
os.stat(subfolder)
except:
os.mkdir(subfolder)
create_model_with_args = \
functools.partial(create_model, max_index=embedding_layer_max_index,
wemb_matrix=pretrained_embedding_matrix,
model_descriptor=model_descriptor)
# model = MyKerasClassifier(build_fn=create_model_with_args, verbose=0)
model = KerasClassifier(build_fn=create_model_with_args, verbose=0, batch_size=100)
model.fit(X_data, y_data)
nfold_predictions = cross_val_predict(model, X_data, y_data, cv=nfold)
util.save_scores(nfold_predictions, y_data, None, None,
model_descriptor, dataset_name, 3,
outfolder, instance_data_source_tags, accepted_ds_tags)
# util.print_eval_report(best_param_ann, cv_score_ann, dev_data_prediction_ann,
# time_ann_predict_dev,
#
# def cross_fold_eval(input_data_file, dataset_name, sys_out, model_descriptor: str,
# print_scores_per_class,
# word_norm_option,
# randomize_strategy,
# pretrained_embedding_model=None, expected_embedding_dim=None):
# raw_data = pd.read_csv(input_data_file, sep=',', encoding="utf-8")
# M = get_word_vocab(raw_data.tweet, sys_out, word_norm_option)
# # M=self.feature_scale(M)
# M0 = M[0]
#
# pretrained_word_matrix = None
# if pretrained_embedding_model is not None:
# pretrained_word_matrix = pretrained_embedding(M[1], pretrained_embedding_model, expected_embedding_dim,
# randomize_strategy)
#
# # split the dataset into two parts, 0.75 for train and 0.25 for testing
# X_data = M0
# y_data = raw_data['class']
# y_data = y_data.astype(int)
#
# X_data = sequence.pad_sequences(X_data, maxlen=MAX_SEQUENCE_LENGTH)
#
# instance_data_source_column = None
# accepted_ds_tags = None
# if print_scores_per_class:
# instance_data_source_column = pd.Series(raw_data.ds)
# accepted_ds_tags = ["c", "td"]
#
# cross_eval_dnn(dataset_name, sys_out, model_descriptor,
# -1, 5,
# X_data,
# y_data,
# len(M[1]), pretrained_word_matrix,
# instance_data_source_column, accepted_ds_tags)
# print("complete {}".format(datetime.datetime.now()))
##############################################
##############################################
# /home/zqz/Work/data/GoogleNews-vectors-negative300.bin.gz
# 300
if __name__ == "__main__":
print("start {}".format(datetime.datetime.now()))
emb_model = None
emb_models = None
emb_dim = None
params = {}
sys_argv = sys.argv
if len(sys.argv) == 2:
sys_argv = sys.argv[1].split(" ")
for arg in sys_argv:
pv = arg.split("=", 1)
if (len(pv) == 1):
continue
params[pv[0]] = pv[1]
if "scoreperclass" not in params.keys():
params["scoreperclass"] = False
else:
params["scoreperclass"] = True
if "word_norm" not in params.keys():
params["word_norm"] = 1
if "oov_random" not in params.keys():
params["oov_random"] = 0
if "emb_model" in params.keys():
emb_models = []
print("===> use pre-trained embeddings...")
model_str = params["emb_model"].split(',')
for m_s in model_str:
gensimFormat = ".gensim" in m_s
if gensimFormat:
emb_models.append(gensim.models.KeyedVectors.load(m_s, mmap='r'))
else:
emb_models.append(gensim.models.KeyedVectors. \
load_word2vec_format(m_s, binary=True))
print("<===loaded {} models".format(len(emb_models)))
if "emb_dim" in params.keys():
emb_dim = int(params["emb_dim"])
if "gpu" in params.keys():
if params["gpu"] == "1":
print("using gpu...")
else:
print("using cpu...")
if "wdist" in params.keys():
wdist_file = params["wdist"]
else:
wdist_file = None
use_mixed_data=False
print("<<<<<< Using Mixed Data={} >>>>>>>".format(use_mixed_data))
gridsearch(params["input"],
params["dataset"], # dataset name
params["output"], # output
params["model_desc"], # model descriptor
params["scoreperclass"], # print scores per class
params["word_norm"], # 0-stemming, 1-lemma, other-do nothing
params["oov_random"], # 0-ignore oov; 1-random init by uniform dist; 2-random from embedding
emb_models,
emb_dim,
wdist_file,
use_mixed_data)
# K.clear_session()
# ... code
sys.exit(0)
# input=/home/zqz/Work/chase/data/ml/ml/rm/labeled_data_all.csv
# output=/home/zqz/Work/chase/output
# dataset=rm
# model_desc="dropout=0.2,conv1d=100-4,maxpooling1d=4,lstm=100-True,gmaxpooling1d,dense=2-softmax"
# emb_model=/home/zz/Work/data/glove.840B.300d.bin.gensim
# emb_dim=300 | 23,117 | 39.629174 | 137 | py |
chase | chase-master/python/src/ml/multiclassifier_dnn.py | import numpy
import pandas
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load dataset
dataframe = pandas.read_csv("iris.csv", header=None)
dataset = dataframe.values
X = dataset[:,0:4].astype(float)
Y = dataset[:,4]
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(encoded_Y)
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
estimator = KerasClassifier(build_fn=baseline_model, epochs=200, batch_size=5, verbose=0)
kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
results = cross_val_score(estimator, X, dummy_y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
| 1,381 | 31.139535 | 89 | py |
chase | chase-master/python/src/ml/dnn_model_creator.py | from keras.engine import Model
from keras.layers import Dropout, GlobalMaxPooling1D, Dense, Conv1D, MaxPooling1D, Bidirectional, Concatenate, Flatten, \
GRU
from keras.layers import LSTM
from keras import backend as K
from keras.models import Sequential
from keras.regularizers import L1L2
def create_regularizer(string):
if string=="none":
return None
string_array=string.split("_")
return L1L2(float(string_array[0]),float(string_array[1]))
def create_model_without_branch(embedding_layers, model_descriptor:str):
model = Sequential()
if len(embedding_layers)==1:
model.add(embedding_layers[0])
else:
concat_embedding_layers(embedding_layers, model)
for layer_descriptor in model_descriptor.split(","):
ld=layer_descriptor.split("=")
# if layer_descriptor.endswith("_"):
# continue
layer_name=ld[0]
params=None
if len(ld)>1:
params=ld[1].split("-")
if layer_name=="dropout":
model.add(Dropout(float(params[0])))
elif layer_name=="lstm":
if params[1]=="True":
return_seq=True
else:
return_seq=False
if len(params)==2:
model.add(LSTM(units=int(params[0]), return_sequences=return_seq))
if len(params)>2:
kernel_reg=create_regularizer(params[2])
activity_reg=create_regularizer(params[3])
if kernel_reg is not None and activity_reg is None:
model.add(LSTM(units=int(params[0]), return_sequences=return_seq,
kernel_regularizer=kernel_reg))
elif activity_reg is not None and kernel_reg is None:
model.add(LSTM(units=int(params[0]), return_sequences=return_seq,
activity_regularizer=activity_reg))
elif activity_reg is not None and kernel_reg is not None:
model.add(LSTM(units=int(params[0]), return_sequences=return_seq,
activity_regularizer=activity_reg, kernel_regularizer=kernel_reg))
elif layer_name=="gru":
if params[1]=="True":
return_seq=True
else:
return_seq=False
if len(params)==2:
model.add(GRU(units=int(params[0]), return_sequences=return_seq))
if len(params)>2:
kernel_reg=create_regularizer(params[2])
activity_reg=create_regularizer(params[3])
if kernel_reg is not None and activity_reg is None:
model.add(GRU(units=int(params[0]), return_sequences=return_seq,
kernel_regularizer=kernel_reg))
elif activity_reg is not None and kernel_reg is None:
model.add(GRU(units=int(params[0]), return_sequences=return_seq,
activity_regularizer=activity_reg))
elif activity_reg is not None and kernel_reg is not None:
model.add(GRU(units=int(params[0]), return_sequences=return_seq,
activity_regularizer=activity_reg, kernel_regularizer=kernel_reg))
elif layer_name=="bilstm":
model.add(Bidirectional(LSTM(units=int(params[0]), return_sequences=return_seq)))
elif layer_name=="conv1d":
if len(params)==2:
model.add(Conv1D(filters=int(params[0]),
kernel_size=int(params[1]), padding='same', activation='relu'))
if len(params)>2:
kernel_reg=create_regularizer(params[2])
activity_reg=create_regularizer(params[3])
if kernel_reg is not None and activity_reg is None:
model.add(Conv1D(filters=int(params[0]),
kernel_size=int(params[1]), padding='same', activation='relu', kernel_regularizer=kernel_reg))
elif activity_reg is not None and kernel_reg is None:
model.add(Conv1D(filters=int(params[0]),
kernel_size=int(params[1]), padding='same', activation='relu',activity_regularizer=activity_reg))
elif activity_reg is not None and kernel_reg is not None:
model.add(Conv1D(filters=int(params[0]),
kernel_size=int(params[1]), padding='same', activation='relu', kernel_regularizer=kernel_reg, activity_regularizer=activity_reg))
elif layer_name=="maxpooling1d":
model.add(MaxPooling1D(pool_size=int(params[0])))
elif layer_name=="gmaxpooling1d":
model.add(GlobalMaxPooling1D())
elif layer_name=="dense":
if len(params)==2:
model.add(Dense(int(params[0]), activation=params[1]))
elif len(params)>2:
kernel_reg=create_regularizer(params[2])
activity_reg=create_regularizer(params[3])
if kernel_reg is not None and activity_reg is None:
model.add(Dense(int(params[0]), activation=params[1],
kernel_regularizer=kernel_reg))
elif activity_reg is not None and kernel_reg is None:
model.add(Dense(int(params[0]), activation=params[1],
activity_regularizer=activity_reg))
elif activity_reg is not None and kernel_reg is not None:
model.add(Dense(int(params[0]), activation=params[1],
activity_regularizer=activity_reg,
kernel_regularizer=kernel_reg))
else:
model.add(Dense(int(params[0])))
elif layer_name=="flatten":
model.add(Flatten())
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#model.summary()
return model
def create_final_model_with_concat_cnn(embedding_layers, model_descriptor:str):
#model_desc=(conv1d=100-[3,4,5],so),lstm=100-True,gmaxpooling1d,dense=2-softmax
target_grams=model_descriptor[model_descriptor.index("[")+1: model_descriptor.index("]")]
submodels = []
if ",so" in model_descriptor:
skip_layers_only=True
else:
skip_layers_only=False
for n in target_grams.split(","):
for mod in create_skipped_conv1d_submodels(embedding_layers, int(n), skip_layers_only):
submodels.append(mod)
submodel_outputs = [model.output for model in submodels]
if len(submodel_outputs)>1:
x = Concatenate(axis=1)(submodel_outputs)
else:
x= submodel_outputs[0]
parallel_layers=Model(inputs=embedding_layers[0].input, outputs=x)
#print("submodel:")
#parallel_layers.summary()
#print("\n")
outter_model_descriptor=model_descriptor[model_descriptor.index(")")+2:]
big_model = Sequential()
big_model.add(parallel_layers)
for layer_descriptor in outter_model_descriptor.split(","):
ld=layer_descriptor.split("=")
layer_name=ld[0]
params=None
if len(ld)>1:
params=ld[1].split("-")
if layer_name=="dropout":
big_model.add(Dropout(float(params[0])))
elif layer_name=="lstm":
if params[1]=="True":
return_seq=True
else:
return_seq=False
if len(params)==2:
big_model.add(LSTM(units=int(params[0]), return_sequences=return_seq))
if len(params)>2:
kernel_reg=create_regularizer(params[2])
activity_reg=create_regularizer(params[3])
if kernel_reg is not None and activity_reg is None:
big_model.add(LSTM(units=int(params[0]), return_sequences=return_seq,
kernel_regularizer=kernel_reg))
elif activity_reg is not None and kernel_reg is None:
big_model.add(LSTM(units=int(params[0]), return_sequences=return_seq,
activity_regularizer=activity_reg))
elif activity_reg is not None and kernel_reg is not None:
big_model.add(LSTM(units=int(params[0]), return_sequences=return_seq,
activity_regularizer=activity_reg, kernel_regularizer=kernel_reg))
elif layer_name=="gru":
if params[1]=="True":
return_seq=True
else:
return_seq=False
if len(params)==2:
big_model.add(GRU(units=int(params[0]), return_sequences=return_seq))
if len(params)>2:
kernel_reg=create_regularizer(params[2])
activity_reg=create_regularizer(params[3])
if kernel_reg is not None and activity_reg is None:
big_model.add(GRU(units=int(params[0]), return_sequences=return_seq,
kernel_regularizer=kernel_reg))
elif activity_reg is not None and kernel_reg is None:
big_model.add(GRU(units=int(params[0]), return_sequences=return_seq,
activity_regularizer=activity_reg))
elif activity_reg is not None and kernel_reg is not None:
big_model.add(GRU(units=int(params[0]), return_sequences=return_seq,
activity_regularizer=activity_reg, kernel_regularizer=kernel_reg))
elif layer_name=="bilstm":
big_model.add(Bidirectional(LSTM(units=int(params[0]), return_sequences=return_seq)))
elif layer_name=="conv1d":
if len(params)==2:
big_model.add(Conv1D(filters=int(params[0]),
kernel_size=int(params[1]), padding='same', activation='relu'))
if len(params)>2:
kernel_reg=create_regularizer(params[2])
activity_reg=create_regularizer(params[3])
if kernel_reg is not None and activity_reg is None:
big_model.add(Conv1D(filters=int(params[0]),
kernel_size=int(params[1]), padding='same', activation='relu', kernel_regularizer=kernel_reg))
elif activity_reg is not None and kernel_reg is None:
big_model.add(Conv1D(filters=int(params[0]),
kernel_size=int(params[1]), padding='same', activation='relu',activity_regularizer=activity_reg))
elif activity_reg is not None and kernel_reg is not None:
big_model.add(Conv1D(filters=int(params[0]),
kernel_size=int(params[1]), padding='same', activation='relu', kernel_regularizer=kernel_reg, activity_regularizer=activity_reg))
elif layer_name=="maxpooling1d":
big_model.add(MaxPooling1D(pool_size=int(params[0])))
elif layer_name=="gmaxpooling1d":
big_model.add(GlobalMaxPooling1D())
elif layer_name=="dense":
if len(params)==2:
big_model.add(Dense(int(params[0]), activation=params[1]))
elif len(params)>2:
kernel_reg=create_regularizer(params[2])
activity_reg=create_regularizer(params[3])
if kernel_reg is not None and activity_reg is None:
big_model.add(Dense(int(params[0]), activation=params[1],
kernel_regularizer=kernel_reg))
elif activity_reg is not None and kernel_reg is None:
big_model.add(Dense(int(params[0]), activation=params[1],
activity_regularizer=activity_reg))
elif activity_reg is not None and kernel_reg is not None:
big_model.add(Dense(int(params[0]), activation=params[1],
activity_regularizer=activity_reg,
kernel_regularizer=kernel_reg))
else:
big_model.add(Dense(int(params[0])))
elif layer_name=="flatten":
big_model.add(Flatten())
big_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#big_model.summary()
return big_model
def create_skipped_conv1d_submodels(embedding_layers, cnn_ks, skip_layer_only:bool):
models=[]
conv_layers=[]
if cnn_ks<3:
if not skip_layer_only:
conv1d_3=Conv1D(filters=100,kernel_size=cnn_ks, padding='same', activation='relu')
conv_layers.append(conv1d_3)
elif cnn_ks==3:
if not skip_layer_only:
conv1d_3=Conv1D(filters=100,kernel_size=3, padding='same', activation='relu')
conv_layers.append(conv1d_3)
#2skip1
ks_and_masks=generate_ks_and_masks(2, 1)
for mask in ks_and_masks[1]:
conv_layers.append(SkipConv1D(filters=100,
kernel_size=int(ks_and_masks[0]), validGrams=mask,
padding='same', activation='relu'))
add_skipped_conv1d_submodel_other_layers(conv_layers,embedding_layers,models)
elif cnn_ks==4:
if not skip_layer_only:
conv1d_4=Conv1D(filters=100,kernel_size=4, padding='same', activation='relu')
conv_layers.append(conv1d_4)
#2skip2
ks_and_masks=generate_ks_and_masks(2, 2)
for mask in ks_and_masks[1]:
conv_layers.append(SkipConv1D(filters=100,
kernel_size=int(ks_and_masks[0]), validGrams=mask,
padding='same', activation='relu'))
#3skip1
ks_and_masks=generate_ks_and_masks(3, 1)
for mask in ks_and_masks[1]:
conv_layers.append(SkipConv1D(filters=100,
kernel_size=int(ks_and_masks[0]), validGrams=mask,
padding='same', activation='relu'))
add_skipped_conv1d_submodel_other_layers(conv_layers,embedding_layers,models)
elif cnn_ks==5:
if not skip_layer_only:
conv1d_5=Conv1D(filters=100,kernel_size=5, padding='same', activation='relu')
conv_layers.append(conv1d_5)
#2skip3
ks_and_masks=generate_ks_and_masks(2, 3)
for mask in ks_and_masks[1]:
conv_layers.append(SkipConv1D(filters=100,
kernel_size=int(ks_and_masks[0]), validGrams=mask,
padding='same', activation='relu'))
#3skip2
ks_and_masks=generate_ks_and_masks(3, 2)
for mask in ks_and_masks[1]:
conv_layers.append(SkipConv1D(filters=100,
kernel_size=int(ks_and_masks[0]), validGrams=mask,
padding='same', activation='relu'))
#4skip1
ks_and_masks=generate_ks_and_masks(4, 1)
for mask in ks_and_masks[1]:
conv_layers.append(SkipConv1D(filters=100,
kernel_size=int(ks_and_masks[0]), validGrams=mask,
padding='same', activation='relu'))
#3dilate1
conv_layers.append(Conv1D(filters=100,
kernel_size=3, dilation_rate=1,
padding='same', activation='relu'))
add_skipped_conv1d_submodel_other_layers(conv_layers,embedding_layers,models)
return models
def add_skipped_conv1d_submodel_other_layers(conv_layers, embedding_layers,models:list):
for conv_layer in conv_layers:
model = Sequential()
if len(embedding_layers)==1:
model.add(embedding_layers[0])
else:
concat_embedding_layers(embedding_layers, model)
model.add(Dropout(0.2))
model.add(conv_layer)
model.add(MaxPooling1D(pool_size=4))
models.append(model)
#warning: concat embedding layers currently does not work!
def concat_embedding_layers(embedding_layers, big_model):
submodels = []
for el in embedding_layers:
m = Sequential()
m.add(el)
submodels.append(m)
submodel_outputs = [model.output for model in submodels]
if len(submodel_outputs) > 1:
x = Concatenate(axis=2)(submodel_outputs)
else:
x = submodel_outputs[0]
parallel_layers = Model(inputs=[embedding_layers[0].input, embedding_layers[1].input], outputs=x)
big_model.add(parallel_layers)
def create_model_with_branch(embedding_layers, model_descriptor:str):
"sub_conv[2,3,4](dropout=0.2,conv1d=100-v,)"
submod_str_start=model_descriptor.index("sub_conv")
submod_str_end=model_descriptor.index(")")
submod_str=model_descriptor[submod_str_start: submod_str_end]
kernel_str=submod_str[submod_str.index("[")+1: submod_str.index("]")]
dilation_rates=[]
if "{" in submod_str:
dilation_str=submod_str[submod_str.index("{")+1:submod_str.index("}")]
dilation_rates=dilation_str.split(",")
skipgrams=[]
if "<" in submod_str: #skipconv1d
skipgram_str=submod_str[submod_str.index("<")+1:submod_str.index(">")]
skipgrams=skipgram_str.split(",")
submod_layer_descriptor = submod_str[submod_str.index("(")+1:]
submodels = []
for ks in kernel_str.split(","):
submodels.append(create_submodel(embedding_layers, submod_layer_descriptor, ks))
for dr in dilation_rates:
for ks in kernel_str.split(","):
submodels.append(create_submodel(embedding_layers, submod_layer_descriptor, ks, dr))
for sk in skipgrams:
for ks in kernel_str.split(","):
skipconv_submodels=(
create_submodel_with_skipconv1d(embedding_layers, submod_layer_descriptor, int(ks),int(sk)))
for sm in skipconv_submodels:
submodels.append(sm)
submodel_outputs = [model.output for model in submodels]
if len(submodel_outputs)>1:
x = Concatenate(axis=1)(submodel_outputs)
else:
x=submodel_outputs[0]
parallel_layers=Model(inputs=embedding_layers[0].input, outputs=x)
#Howprint("submodel:")
#parallel_layers.summary()
#print("\n")
outter_model_descriptor=model_descriptor[model_descriptor.index(")")+2:]
big_model = Sequential()
big_model.add(parallel_layers)
for layer_descriptor in outter_model_descriptor.split(","):
ld=layer_descriptor.split("=")
layer_name=ld[0]
params=None
if len(ld)>1:
params=ld[1].split("-")
if layer_name=="dropout":
big_model.add(Dropout(float(params[0])))
elif layer_name=="lstm":
if params[1]=="True":
return_seq=True
else:
return_seq=False
big_model.add(LSTM(units=int(params[0]), return_sequences=return_seq))
elif layer_name=="gru":
if params[1]=="True":
return_seq=True
else:
return_seq=False
big_model.add(GRU(units=int(params[0]), return_sequences=return_seq))
elif layer_name=="bilstm":
if params[1]=="True":
return_seq=True
else:
return_seq=False
big_model.add(Bidirectional(LSTM(units=int(params[0]), return_sequences=return_seq)))
elif layer_name=="conv1d":
if len(params)==2:
big_model.add(Conv1D(filters=int(params[0]),
kernel_size=int(params[1]), padding='same', activation='relu'))
elif len(params)==3:
print("dilated cnn")
big_model.add(Conv1D(filters=int(params[0]),
kernel_size=int(params[1]), dilation_rate=int(params[2]),padding='same', activation='relu'))
elif layer_name=="maxpooling1d":
big_model.add(MaxPooling1D(pool_size=int(params[0])))
elif layer_name=="gmaxpooling1d":
big_model.add(GlobalMaxPooling1D())
elif layer_name == "dense":
if len(params) == 2:
big_model.add(Dense(int(params[0]), activation=params[1]))
elif len(params) > 2:
kernel_reg = create_regularizer(params[2])
activity_reg = create_regularizer(params[3])
if kernel_reg is not None and activity_reg is None:
big_model.add(Dense(int(params[0]), activation=params[1],
kernel_regularizer=kernel_reg))
elif activity_reg is not None and kernel_reg is None:
big_model.add(Dense(int(params[0]), activation=params[1],
activity_regularizer=activity_reg))
elif activity_reg is not None and kernel_reg is not None:
big_model.add(Dense(int(params[0]), activation=params[1],
activity_regularizer=activity_reg,
kernel_regularizer=kernel_reg))
elif layer_name=="flatten":
big_model.add(Flatten())
big_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#big_model.summary()
return big_model
def create_submodel(embedding_layers, submod_layer_descriptor, cnn_ks, cnn_dilation=None):
model = Sequential()
if len(embedding_layers)==1:
model.add(embedding_layers[0])
else:
concat_embedding_layers(embedding_layers, model)
for layer_descriptor in submod_layer_descriptor.split(","):
if "=" not in layer_descriptor:
continue
ld=layer_descriptor.split("=")
layer_name=ld[0]
params=None
if len(ld)>1:
params=ld[1].split("-")
if layer_name=="dropout":
model.add(Dropout(float(params[0])))
elif layer_name=="lstm":
if params[1]=="True":
return_seq=True
else:
return_seq=False
model.add(LSTM(units=int(params[0]), return_sequences=return_seq))
elif layer_name=="gru":
if params[1]=="True":
return_seq=True
else:
return_seq=False
model.add(GRU(units=int(params[0]), return_sequences=return_seq))
elif layer_name=="bilstm":
if params[1]=="True":
return_seq=True
else:
return_seq=False
model.add(Bidirectional(LSTM(units=int(params[0]), return_sequences=return_seq)))
elif layer_name=="conv1d":
if cnn_dilation is None:
model.add(Conv1D(filters=int(params[0]),
kernel_size=int(cnn_ks), padding='same', activation='relu'))
else:
model.add(Conv1D(filters=int(params[0]),
kernel_size=int(cnn_ks), dilation_rate=int(cnn_dilation),
padding='same', activation='relu'))
elif layer_name=="maxpooling1d":
size=params[0]
if size=="v":
size=int(cnn_ks)
else:
size=int(params[0])
model.add(MaxPooling1D(pool_size=size))
elif layer_name=="gmaxpooling1d":
model.add(GlobalMaxPooling1D())
elif layer_name=="dense":
model.add(Dense(int(params[0]), activation=params[1]))
return model
def generate_ks_and_masks(target_cnn_ks, skip):
masks=[]
real_cnn_ks=target_cnn_ks+skip
for gap_index in range(1, real_cnn_ks):
mask=[]
for ones in range(0,gap_index):
mask.append(1)
for zeros in range(gap_index,gap_index+skip):
if zeros<real_cnn_ks:
mask.append(0)
for ones in range(gap_index+skip, real_cnn_ks):
if ones <real_cnn_ks:
mask.append(1)
if mask[len(mask)-1]!=0:
masks.append(mask)
return [real_cnn_ks,masks]
def create_submodel_with_skipconv1d(embedding_layer, submod_layer_descriptor, target_cnn_ks, skip
):
submodels=[]
ks_and_masks=generate_ks_and_masks(target_cnn_ks, skip)
for mask in ks_and_masks[1]:
model = Sequential()
model.add(embedding_layer)
for layer_descriptor in submod_layer_descriptor.split(","):
if layer_descriptor.endswith("_"):
continue
ld=layer_descriptor.split("=")
layer_name=ld[0]
params=None
if len(ld)>1:
params=ld[1].split("-")
if layer_name=="dropout":
model.add(Dropout(float(params[0])))
elif layer_name=="lstm":
if params[1]=="True":
return_seq=True
else:
return_seq=False
model.add(LSTM(units=int(params[0]), return_sequences=return_seq))
elif layer_name=="gru":
if params[1]=="True":
return_seq=True
else:
return_seq=False
model.add(GRU(units=int(params[0]), return_sequences=return_seq))
elif layer_name=="bilstm":
if params[1]=="True":
return_seq=True
else:
return_seq=False
model.add(Bidirectional(LSTM(units=int(params[0]), return_sequences=return_seq)))
elif layer_name=="conv1d":
model.add(SkipConv1D(filters=int(params[0]),
kernel_size=int(ks_and_masks[0]), validGrams=mask,
padding='same', activation='relu'))
elif layer_name=="maxpooling1d":
size=params[0]
if size=="v":
size=int(ks_and_masks[0])
else:
size=int(params[0])
model.add(MaxPooling1D(pool_size=size))
elif layer_name=="gmaxpooling1d":
model.add(GlobalMaxPooling1D())
elif layer_name=="dense":
model.add(Dense(int(params[0]), activation=params[1]))
submodels.append(model)
return submodels
def create_lstm_type1(embedding_layer):#start from simple model
return create_model_without_branch(embedding_layer, "dropout=0.2,lstm=100-True,gmaxpooling1d,"
"dropout=0.2,dense=2-softmax")
def create_model_conv_lstm_type1(embedding_layer):
return create_model_without_branch(embedding_layer,
"dropout=0.2,conv1d=100-4,maxpooling1d=4,"
"lstm=100-True,gmaxpooling1d,dense=2-softmax")
#a 1D convolution that skips some entries
class SkipConv1D(Conv1D):
#in the init, let's just add a parameter to tell which grams to skip
def __init__(self, validGrams, **kwargs):
#for this example, I'm assuming validGrams is a list
#it should contain zeros and ones, where 0's go on the skip positions
#example: [1,1,0,1] will skip the third gram in the window of 4 grams
assert len(validGrams) == kwargs.get('kernel_size')
self.validGrams = K.reshape(K.constant(validGrams),(len(validGrams),1,1))
#the chosen shape matches the dimensions of the kernel
#the first dimension is the kernel size, the others are input and ouptut channels
#initialize the regular conv layer:
super(SkipConv1D,self).__init__(**kwargs)
#here, the filters, size, etc, go inside kwargs, so you should use them named
#but you may make them explicit in this __init__ definition
#if you think it's more comfortable to use it like this
#in the build method, let's replace the original kernel:
def build(self, input_shape):
#build as the original layer:
super(SkipConv1D,self).build(input_shape)
#replace the kernel
self.originalKernel = self.kernel
self.kernel = self.validGrams * self.originalKernel
| 28,293 | 42.866667 | 158 | py |
nussl | nussl-master/nussl/core/migration.py | import torch
import json
from .. import __version__, STFTParams
from ..separation.base import SeparationException
from ..datasets import transforms as tfm
from ..evaluation import BSSEvalV4, BSSEvalScale
class SafeModelLoader(object):
"""
Loads a nussl model and populates the metadata with defaults if
"""
# Expected topology of the model's metadata as of nussl version 1.1.3
_v1_1_3_metadata = {
'config': {
'connections': list,
'modules': dict,
'name': str,
'output': list
},
'evaluation': None,
'loss_dictionary': dict,
'num_channels': int,
'nussl_version': str,
'optimizer': {
'name': str,
'params': dict
},
'sample_rate': int,
'stft_params': STFTParams,
'train_dataset': {
'folder': str,
'name': str,
'num_channels': int,
'sample_rate': int,
'stft_params': STFTParams,
'transforms': tfm.Compose
},
'trainer.state.epoch_history': dict,
'trainer.state_dict': {
'epoch': int,
'epoch_length': int,
'max_epochs': int,
'metrics': dict,
'output': dict,
'seed': None
},
'val_dataset': {
'folder': str,
'name': str,
'num_channels': int,
'sample_rate': int,
'stft_params': STFTParams,
'transforms': tfm.Compose
},
}
expected_metadata = _v1_1_3_metadata
def __init__(self):
"""
Drop in replacement for torch.load(). Will load a model and return
a model_dict that is populated
Args:
model_path (str): Path to a nussl-saved model.
device (str):
expected_eval (str): Either 'BSSEvalScale' or 'BSSEvalV4'. Will
look for & populate missing eval keys with the format of either
of those eval methods.
Returns:
model_dict (dict):
"""
self.current_version = __version__
self.eval = None
def load(self, model_path, device='cpu', expected_eval='BssEvalScale'):
model_dict = torch.load(model_path, map_location=device)
metadata = model_dict['metadata']
metadata = self._get_moved(metadata, model_dict, model_path)
self.eval = expected_eval
metadata = self._validate_and_populate(metadata)
metadata['config'] = json.dumps(metadata['config'])
model_dict['metadata'] = metadata
return model_dict
@staticmethod
def _get_moved(metadata, model_dict, model_path):
model_dict_version = model_dict.get('nussl_version', None)
metadata_version = metadata.get('nussl_version', None)
if metadata_version is not None:
saved_version = metadata_version
else:
saved_version = model_dict_version
if saved_version is None:
raise SeparationException(f"Failed loading model. Expected to find "
f"'nussl_version' in {model_path}.")
metadata['nussl_version'] = saved_version
if 'config' in model_dict:
metadata['config'] = json.loads(model_dict['config'])
if 'transforms' in metadata:
if 'train_dataset' not in metadata:
metadata['train_dataset'] = {}
metadata['train_dataset']['transforms'] = metadata['transforms']
return metadata
def _load_eval(self, eval_dict):
"""Helper function to load eval dictionary safely."""
if self.eval.lower() == 'bssevalv4':
keys = BSSEvalV4.keys
else:
keys = BSSEvalScale.keys
stats_keys = ['mean', 'median', 'std']
result = {}
for k in keys:
if k not in eval_dict:
stats = {s: 'UNAVAILABLE' for s in stats_keys}
else:
stats = {}
for s in stats_keys:
if s in eval_dict[k]:
stats[s] = eval_dict[k][s]
else:
stats[s] = 'UNAVAILABLE'
result[k] = stats
return result
@staticmethod
def _load_types(expected_type, key, val):
"""Safe load for values where the value is a type in self.expected_metadata"""
if val is not None:
if type(val) != expected_type:
raise SeparationException(f'Expected type {expected_type} '
f'for key {key} but got {type(val)}!')
return val
else:
return 'UNAVAILABLE'
def _validate_and_populate(self, received):
"""Safe load for metadata according to the expected metadata."""
result = {}
for key, expected_val in self.expected_metadata.items():
val = received.get(key, None)
if key == 'evaluation':
eval_dict = received.get('evaluation', {})
result['evaluation'] = self._load_eval(eval_dict)
elif type(expected_val) == type:
result[key] = self._load_types(expected_val, key, val)
elif type(expected_val) == dict:
next_dict = {} if val is None else val
sub_result = {}
for sub_key, type_ in expected_val.items():
sub_val = next_dict.get(sub_key, None)
sub_result[sub_key] = self._load_types(type_, sub_key, sub_val)
result[key] = sub_result
return result | 5,681 | 32.423529 | 86 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.