repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
sampling_cf | sampling_cf-main/pytorch_models/pop_rec.py | from torch_utils import FloatTensor
class PopRec:
def __init__(self, hyper_params, item_count):
self.hyper_params = hyper_params
self.top_items = FloatTensor([ item_count[i] for i in range(hyper_params['total_items']) ]).unsqueeze(0)
def __call__(self, data, eval = False):
users, _, _ = data
return self.top_items.repeat(users.shape[0], 1)
def eval(self):
pass
| 414 | 28.642857 | 112 | py |
sampling_cf | sampling_cf-main/data_loaders/base.py | import torch
import numpy as np
from collections import defaultdict
from torch.multiprocessing import Process, Queue, Event
class CombinedBase:
def __init__(self): pass
def __len__(self): return (self.num_interactions // self.batch_size) + 1
def __del__(self):
try:
self.p.terminate() ; self.p.join()
except: pass
def make_user_history(self, data):
user_history = [ [] for _ in range(self.num_users) ]
for u, i, r in data: user_history[u].append(i)
return user_history
def pad(self, arr, max_len = None, pad_with = -1, side = 'right'):
seq_len = max_len if max_len is not None else max(map(len, arr))
seq_len = min(seq_len, 200) # You don't need more than this
for i in range(len(arr)):
while len(arr[i]) < seq_len:
pad_elem = arr[i][-1] if len(arr[i]) > 0 else 0
pad_elem = pad_elem if pad_with == -1 else pad_with
if side == 'right': arr[i].append(pad_elem)
else: arr[i] = [ pad_elem ] + arr[i]
arr[i] = arr[i][-seq_len:] # Keep last `seq_len` items
return arr
def sequential_pad(self, arr, hyper_params):
# Padding left side so that we can simply take out [:, -1, :] in the output
return self.pad(
arr, max_len = hyper_params['max_seq_len'],
pad_with = hyper_params['total_items'], side = 'left'
)
def scatter(self, batch, tensor_kind, last_dimension):
ret = tensor_kind(len(batch), last_dimension).zero_()
if not torch.is_tensor(batch):
if ret.is_cuda: batch = torch.cuda.LongTensor(batch)
else: batch = torch.LongTensor(batch)
return ret.scatter_(1, batch, 1)
# NOTE: is_negative(user, item) is a function which tells
# if the item is a negative item for the user
def sample_negatives(self, num_points, num_negs, is_negative):
# Sample all the random numbers you need at once as this is much faster than
# calling random.randint() once everytime
random_numbers = np.random.randint(
self.num_items,
size = int(num_points * num_negs * 1.5)
)
negatives, at = [], 0
for u in range(num_points):
temp_negatives = []
while len(temp_negatives) < num_negs:
## Negatives not possible
if at >= len(random_numbers):
temp_negatives.append(0)
continue
random_item = random_numbers[at] ; at += 1
if is_negative(u, random_item):
# allowing duplicates, rare possibility
temp_negatives.append(random_item)
negatives.append(temp_negatives)
return negatives
# So that training, GPU copying etc.
# doesn't have to wait for negative sampling
def init_background_sampler(self, function):
self.event = Event()
self.result_queue = Queue(maxsize=4)
def sample(result_queue):
try:
while True:
result_queue.put(function())
self.event.wait()
except KeyboardInterrupt: pass
self.p = Process(target = sample, args=(self.result_queue, ))
self.p.daemon = True ; self.p.start()
class BaseTrainDataset(CombinedBase):
def __init__(self, data, hyper_params):
self.hyper_params = hyper_params
self.batch_size = hyper_params['batch_size']
self.implicit_task = hyper_params['task'] in [ 'implicit', 'sequential' ]
self.data = data
self.num_users, self.num_items = hyper_params['total_users'], hyper_params['total_items']
## Making user histories because sequential models require this
self.user_history = self.make_user_history(data)
## Making sets of history for easier finding
self.user_history_set = list(map(set, self.user_history))
## For computing PSP-metrics
self.item_propensity = self.get_item_propensity()
def get_item_count_map(self):
item_count = defaultdict(int)
for u, i, r in self.data: item_count[i] += 1
return item_count
def get_item_propensity(self, A = 0.55, B = 1.5):
item_freq_map = self.get_item_count_map()
item_freq = [ item_freq_map[i] for i in range(self.num_items) ]
num_instances = len(self.data)
C = (np.log(num_instances)-1)*np.power(B+1, A)
wts = 1.0 + C*np.power(np.array(item_freq)+B, -A)
return np.ravel(wts)
class BaseTestDataset(CombinedBase):
def __init__(self, data, train_data, hyper_params, val_data):
self.hyper_params = hyper_params
self.batch_size = hyper_params['batch_size']
self.implicit_task = hyper_params['task'] in [ 'implicit', 'sequential' ]
self.data, self.train_data = data, train_data
self.num_users, self.num_items = hyper_params['total_users'], hyper_params['total_items']
## Making user histories because sequential models require this
self.train_user_history = self.make_user_history(train_data)
if val_data is not None:
self.val_user_history = self.make_user_history(val_data)
for u in range(self.num_users): self.train_user_history[u] += self.val_user_history[u]
self.test_user_history = self.make_user_history(data)
## Making sets of history for easier finding
self.train_user_history_set = list(map(set, self.train_user_history))
self.test_user_history_set = list(map(set, self.test_user_history))
| 5,688 | 39.347518 | 98 | py |
sampling_cf | sampling_cf-main/data_loaders/SASRec.py | import torch
import numpy as np
from data_loaders.base import BaseTrainDataset, BaseTestDataset
from torch_utils import LongTensor, is_cuda_available
class TrainDataset(BaseTrainDataset):
def __init__(self, data, hyper_params, track_events):
super(TrainDataset, self).__init__(data, hyper_params)
self.shuffle_allowed = not track_events
self.user_history = LongTensor(self.sequential_pad(self.user_history, hyper_params))
self.num_interactions = self.num_users
self.init_background_sampler(
lambda : torch.LongTensor(self.sample_negatives(
self.num_users, (self.hyper_params['max_seq_len'] - 1) * self.hyper_params['num_train_negs'],
lambda point, random_neg: random_neg not in self.user_history_set[point]
)).view(self.num_users, self.hyper_params['max_seq_len'] - 1, self.hyper_params['num_train_negs'])
)
def __iter__(self):
# Important for optimal and stable performance
indices = np.arange(self.num_interactions)
if self.shuffle_allowed:
np.random.shuffle(indices)
temp = self.user_history[indices]
negatives = self.result_queue.get()[indices]
if is_cuda_available: negatives = negatives.cuda()
self.event.set()
for u in range(0, self.num_interactions, self.batch_size):
sequence = temp[u:u+self.batch_size] # self.user_history
x = sequence[:, :-1]
y = sequence[:, 1:]
neg = negatives[u:u+self.batch_size, :, :]
yield [ x, y, neg ], y
class TestDataset(BaseTestDataset):
def __init__(self, data, train_data, hyper_params, val_data = None, test_set = False):
super(TestDataset, self).__init__(data, train_data, hyper_params, val_data)
self.test_set = test_set
# Padding for easier scattering
self.train_user_history_full = list(map(lambda x: LongTensor(x), self.train_user_history))
self.train_user_history = LongTensor(self.sequential_pad(self.train_user_history, hyper_params))
self.test_user_history = LongTensor(self.pad(self.test_user_history))
# Total number of interactions
self.num_interactions = self.num_users
self.partial_eval = (not self.test_set) and self.hyper_params['partial_eval']
def one_sample():
negatives = self.sample_negatives(
self.num_users, self.hyper_params['num_test_negs'],
lambda point, random_neg: random_neg not in self.train_user_history_set[point] and \
random_neg not in self.test_user_history_set[point]
)
if self.partial_eval: negatives = torch.LongTensor(negatives) # Sampled ranking
else: negatives = np.array(negatives) # Sampled AUC
return negatives
self.init_background_sampler(one_sample)
def __iter__(self):
negatives = self.result_queue.get() ; self.event.set()
if self.partial_eval and is_cuda_available: negatives = negatives.cuda()
for u in range(0, self.num_interactions, self.batch_size):
train_positive = self.train_user_history[u:u+self.batch_size]
train_positive_full = self.train_user_history_full[u:u+self.batch_size]
test_positive = self.test_user_history[u:u+self.batch_size]
test_positive_set = self.test_user_history_set[u:u+self.batch_size]
test_negative = negatives[u:u+self.batch_size]
yield [ train_positive, test_positive if self.partial_eval else None, test_negative ], [
train_positive_full,
test_positive_set,
]
| 3,743 | 44.108434 | 110 | py |
sampling_cf | sampling_cf-main/data_loaders/SVAE.py | import torch
import numpy as np
from data_loaders.base import BaseTrainDataset, BaseTestDataset
from torch_utils import LongTensor, FloatTensor, is_cuda_available
class TrainDataset(BaseTrainDataset):
def __init__(self, data, hyper_params, track_events):
super(TrainDataset, self).__init__(data, hyper_params)
self.shuffle_allowed = not track_events
self.user_history = LongTensor(self.sequential_pad(self.user_history, hyper_params))
self.num_interactions = self.num_users
def __iter__(self):
# Important for optimal and stable performance
indices = np.arange(self.num_interactions)
if self.shuffle_allowed: np.random.shuffle(indices)
temp = self.user_history[indices]
for u in range(0, self.num_interactions, self.batch_size):
sequence = temp[u:u+self.batch_size]
x = sequence[:, :-1]
y = sequence[:, 1:]
y_true_s = FloatTensor(y.shape[0], y.shape[1], self.num_items+1).zero_().scatter_(
-1, y.unsqueeze(-1), 1.0
)
for k in range(2, self.hyper_params['num_next'] + 1):
padding = torch.zeros(y.shape[0], k-1, dtype = torch.int32).fill_(self.num_items)
if is_cuda_available: padding = padding.cuda()
positions_to_add = torch.cat([ sequence[:, k:], padding ], dim = -1).unsqueeze(-1)
y_true_s.scatter_add_(
-1, positions_to_add, torch.ones_like(positions_to_add).float()
)
yield [ x, None, None ], [ y_true_s[:, :, :-1], y ]
class TestDataset(BaseTestDataset):
def __init__(self, data, train_data, hyper_params, val_data = None, test_set = False):
super(TestDataset, self).__init__(data, train_data, hyper_params, val_data)
self.test_set = test_set
# Padding for easier scattering
self.train_user_history_full = list(map(lambda x: LongTensor(x), self.train_user_history))
self.train_user_history = LongTensor(self.sequential_pad(self.train_user_history, hyper_params))
# Total number of interactions
self.num_interactions = self.num_users
self.init_background_sampler(
lambda : np.array(self.sample_negatives(
self.num_users, self.hyper_params['num_test_negs'],
lambda point, random_neg: random_neg not in self.train_user_history_set[point] and \
random_neg not in self.test_user_history_set[point]
))
)
def __iter__(self):
## No sampled ranking required as model by default needs to compute score over all items
## Will only be used for AUC computation
negatives = self.result_queue.get() ; self.event.set()
for u in range(0, self.num_interactions, self.batch_size):
train_positive = self.train_user_history[u:u+self.batch_size]
train_positive_full = self.train_user_history_full[u:u+self.batch_size]
test_positive_set = self.test_user_history_set[u:u+self.batch_size]
test_negative = negatives[u:u+self.batch_size]
yield [ train_positive, None, test_negative ], [
train_positive_full,
test_positive_set,
]
| 3,329 | 43.4 | 104 | py |
sampling_cf | sampling_cf-main/data_loaders/MVAE.py | import numpy as np
from data_loaders.base import BaseTrainDataset, BaseTestDataset
from torch_utils import LongTensor, FloatTensor
class TrainDataset(BaseTrainDataset):
def __init__(self, data, hyper_params, track_events):
super(TrainDataset, self).__init__(data, hyper_params)
self.shuffle_allowed = not track_events
self.user_history = np.array(self.pad(self.user_history))
self.num_interactions = self.num_users
def __iter__(self):
# Important for optimal and stable performance
indices = np.arange(self.num_interactions)
if self.shuffle_allowed: np.random.shuffle(indices)
temp = self.user_history[indices].tolist()
for u in range(0, self.num_interactions, self.batch_size):
batch = temp[u:u+self.batch_size]
x_and_y = self.scatter(batch, FloatTensor, self.num_items)
yield [ x_and_y, None, None ], x_and_y
class TestDataset(BaseTestDataset):
def __init__(self, data, train_data, hyper_params, val_data = None, test_set = False):
super(TestDataset, self).__init__(data, train_data, hyper_params, val_data)
self.test_set = test_set
# Padding for easier scattering
self.train_user_history_full = list(map(lambda x: LongTensor(x), self.train_user_history))
self.train_user_history = self.pad(self.train_user_history)
self.num_interactions = self.num_users
self.init_background_sampler(
lambda : np.array(self.sample_negatives(
self.num_users, self.hyper_params['num_test_negs'],
lambda point, random_neg: random_neg not in self.train_user_history_set[point] and \
random_neg not in self.test_user_history_set[point]
))
)
def __iter__(self):
## No sampled ranking required as model by default needs to compute score over all items
## Will only be used for AUC computation
negatives = self.result_queue.get() ; self.event.set()
for u in range(0, self.num_interactions, self.batch_size):
train_positive_full = self.train_user_history_full[u:u+self.batch_size]
train_positive = self.train_user_history[u:u+self.batch_size]
test_positive_set = self.test_user_history_set[u:u+self.batch_size]
test_negative = negatives[u:u+self.batch_size]
yield [ self.scatter(train_positive, FloatTensor, self.num_items), None, test_negative ], [
train_positive_full,
test_positive_set,
]
| 2,616 | 44.12069 | 104 | py |
sampling_cf | sampling_cf-main/data_loaders/MF.py | import torch
import numpy as np
from data_loaders.base import BaseTrainDataset, BaseTestDataset
from torch_utils import LongTensor, FloatTensor, is_cuda_available
class TrainDataset(BaseTrainDataset):
def __init__(self, data, hyper_params, track_events):
super(TrainDataset, self).__init__(data, hyper_params)
self.shuffle_allowed = not track_events
# Copying ENTIRE dataset to GPU
self.users_cpu = list(map(lambda x: x[0], data))
self.users = LongTensor(self.users_cpu)
self.items = LongTensor(list(map(lambda x: x[1], data)))
self.ratings = FloatTensor(list(map(lambda x: x[2], data)))
self.num_interactions = len(data)
self.init_background_sampler(
lambda : torch.LongTensor(self.sample_negatives(
len(self.data), self.hyper_params['num_train_negs'],
lambda point, random_neg: random_neg not in self.user_history_set[self.users_cpu[point]]
))
)
def __iter__(self):
# Important for optimal and stable performance
indices = np.arange(self.num_interactions)
if self.shuffle_allowed: np.random.shuffle(indices)
temp_users = self.users[indices] ; temp_items = self.items[indices] ; temp_ratings = self.ratings[indices]
if self.implicit_task:
negatives = self.result_queue.get()[indices]
if is_cuda_available: negatives = negatives.cuda()
self.event.set()
for i in range(0, self.num_interactions, self.batch_size):
yield [
temp_users[i:i+self.batch_size],
temp_items[i:i+self.batch_size].unsqueeze(-1),
negatives[i:i+self.batch_size] if self.implicit_task else None,
], temp_ratings[i:i+self.batch_size]
class TestDataset(BaseTestDataset):
def __init__(self, data, train_data, hyper_params, val_data = None, test_set = False):
super(TestDataset, self).__init__(data, train_data, hyper_params, val_data)
self.test_set = test_set
if self.implicit_task:
# Padding for easier scattering
self.test_user_history = LongTensor(self.pad(self.test_user_history))
self.train_user_history = list(map(lambda x: LongTensor(x), self.train_user_history))
# Copying all user-IDs to GPU
self.all_users = LongTensor(list(range(self.num_users)))
self.partial_eval = (not test_set) and hyper_params['partial_eval']
def one_sample():
negatives = self.sample_negatives(
self.num_users, self.hyper_params['num_test_negs'],
lambda point, random_neg: random_neg not in self.train_user_history_set[point] and \
random_neg not in self.test_user_history_set[point]
)
if self.partial_eval: negatives = torch.LongTensor(negatives) # Sampled ranking
else: negatives = np.array(negatives) # Sampled AUC
return negatives
self.init_background_sampler(one_sample)
else:
self.users = LongTensor(list(map(lambda x: x[0], data)))
self.items = LongTensor(list(map(lambda x: x[1], data)))
self.ratings = FloatTensor(list(map(lambda x: x[2], data)))
self.num_interactions = self.num_users if self.implicit_task else len(data)
def __iter__(self):
if self.implicit_task:
negatives = self.result_queue.get() ; self.event.set()
if self.partial_eval and is_cuda_available: negatives = negatives.cuda()
for u in range(0, self.num_interactions, self.batch_size):
if self.implicit_task:
batch = self.all_users[u:u+self.batch_size]
train_positive = self.train_user_history[u:u+self.batch_size]
test_positive = self.test_user_history[u:u+self.batch_size]
test_positive_set = self.test_user_history_set[u:u+self.batch_size]
test_negative = negatives[u:u+self.batch_size]
yield [ batch, test_positive if self.partial_eval else None, test_negative ], [
train_positive,
test_positive_set,
]
else:
yield [
self.users[u:u+self.batch_size],
self.items[u:u+self.batch_size].unsqueeze(-1),
None,
], self.ratings[u:u+self.batch_size]
| 4,575 | 43.862745 | 114 | py |
neuralTPPs | neuralTPPs-master/debug/cumulative_attention.py | import torch as th
import matplotlib.pyplot as plt
from torch import nn
from pprint import pprint
from tqdm import tqdm
from tpp.models.base.enc_dec import EncDecProcess
from tpp.models.encoders.mlp_variable import MLPVariableEncoder
from tpp.models.decoders.self_attention_cm import SelfAttentionCmDecoder
from tpp.models.decoders.mlp_cm import MLPCmDecoder
from tpp.utils.events import get_events, get_window
th.manual_seed(0)
times = th.Tensor([1, 2, 6]).float().reshape(1, -1)
query = th.linspace(start=0.0, end=10.1, steps=10).float().reshape(1, -1)
window_start, window_end = get_window(times=times, window=10.)
events = get_events(
times=times,
mask=th.ones_like(times),
window_start=window_start,
window_end=window_end)
# dec = SelfAttentionCmDecoder(
# encoding="temporal",
# units_mlp=[32, 1],
# constraint_mlp="nonneg",
# activation_final_mlp="softplus",
# attn_activation="sigmoid")
dec = MLPCmDecoder(
encoding="times_only",
units_mlp=[32, 1],
constraint_mlp="nonneg",
activation_final_mlp="softplus",
attn_activation="sigmoid")
enc = MLPVariableEncoder(units_mlp=[dec.input_size], encoding="marks_only")
process = EncDecProcess(encoder=enc, decoder=dec)
(_, _, _, artifacts) = process.artifacts(query=query, events=events)
intensity_integrals = artifacts["decoder"]["intensity_integrals"]
#
# optimiser = th.optim.Adam(params=process.parameters())
# for _ in tqdm(range(1000)):
# optimiser.zero_grad()
# nll, nll_mask, _ = process.neg_log_likelihood(events=events)
# nll = nll * nll_mask
# nll = sum(nll) / sum(nll_mask)
# nll.backward()
# optimiser.step()
# print(float(nll.detach().cpu().numpy()))
# dict(process.named_parameters())
intensity_integrals = artifacts["decoder"]["intensity_integrals"]
intensity_mask = artifacts["decoder"]["intensity_mask"]
x = query.detach().cpu().numpy()
x = x[intensity_mask != 0]
y = intensity_integrals.detach().cpu().numpy()
y = y[intensity_mask != 0]
plt.figure()
plt.plot(x.reshape(-1), y.reshape(-1))
for t in times.reshape(-1):
plt.axvline(x=t, color="red")
plt.title("cumulative intensity")
plt.show()
intensity, intensity_mask = process.intensity(query=query, events=events)
x = query.detach().cpu().numpy()
x = x[intensity_mask != 0]
y = intensity.detach().cpu().numpy()
y = y[intensity_mask != 0]
plt.plot(x.reshape(-1), y.reshape(-1))
for t in times.reshape(-1):
plt.axvline(x=t, color="red")
plt.title("intensity")
plt.show()
| 2,491 | 30.544304 | 75 | py |
neuralTPPs | neuralTPPs-master/debug/batchnorm.py | import torch as th
from torch import nn
from tpp.pytorch.layers import NonNegLinear
from tpp.pytorch.layers import BatchNorm1d
def multidim_grad(a, b):
a_split = th.split(a, split_size_or_sections=1, dim=-1)
grads = [th.autograd.grad(
outputs=a_split[i],
inputs=b,
grad_outputs=th.ones_like(a_split[i]),
retain_graph=True,
create_graph=True)[0] for i in range(a.shape[-1])]
grads = th.stack(grads, dim=-1)
return grads
th.manual_seed(0)
dense1 = NonNegLinear(1, 3, bias=True)
batchnorm1 = nn.BatchNorm1d(3)
batchnorm2 = BatchNorm1d(3)
x = th.rand(10, 1)
y1 = batchnorm1(dense1(x))
y2 = batchnorm2(dense1(x))
assert th.allclose(y1, y2, rtol=1.e-3)
x = th.rand(10, 4, 1)
batchnorm1 = nn.BatchNorm1d(4)
batchnorm2 = BatchNorm1d(4)
batchnorm3 = BatchNorm1d(4, normalise_over_final=True)
y1 = batchnorm1(dense1(x))
y2 = batchnorm2(dense1(x))
y3 = batchnorm3(dense1(x).transpose(1, 2))
assert th.allclose(y1, y2, rtol=1.e-3)
assert th.allclose(y1, y3.transpose(1, 2), rtol=1.e-3)
| 1,043 | 22.2 | 59 | py |
neuralTPPs | neuralTPPs-master/debug/layernorm.py | import torch as th
from torch import nn
from tpp.pytorch.layers import LayerNorm
from debug.batchnorm import multidim_grad
th.manual_seed(0)
pytorch_norm = nn.LayerNorm(3)
my_norm = LayerNorm(3, use_running_stats=True)
x = th.rand([3]).reshape(1, -1).float().repeat(2, 1)
x.requires_grad = True
pytorch_y = pytorch_norm(x)
my_y = my_norm(x)
# assert th.allclose(pytorch_y, my_y, rtol=1.e-3)
pytorch_y
my_y
pytorch_y.var(-1)
my_y.var(-1)
multidim_grad(pytorch_y, x)
multidim_grad(my_y, x)
x = th.rand(10, 2, 4)
norm1 = nn.LayerNorm(4)
for i in range(1,10):
for j in range(1, 10):
for k in range(1, 10):
norm = LayerNorm(k, use_running_stats=True)
for _ in range(3):
x = th.rand(i, j, k)
y = norm(x)
assert list(x.shape) == list(y.shape)
| 831 | 19.8 | 55 | py |
neuralTPPs | neuralTPPs-master/debug/regression.py | import numpy as np
import torch as th
import torch.nn
import matplotlib.pyplot as plt
from tpp.pytorch.models import MLP
def detach(x: th.Tensor) -> np.ndarray:
return x.detach().cpu().numpy()
th.manual_seed(0)
x_min, x_max, steps = 0., 100., 3000
alpha = 1.
beta = 1.
n_events = 20
epochs = 1000
cumulative = True
units = [32, 32, 32, 1]
x_train = th.linspace(x_min, x_max, steps=steps)
events = th.randperm(steps)[:n_events]
events = x_train[events]
events = th.sort(events).values
x_train = x_train[x_train > th.min(events)]
y_train = th.zeros_like(x_train)
for e in events:
delta_t = x_train - e
intensity_delta = alpha * th.exp(-beta * delta_t)
intensity_delta[delta_t <= 0] = th.zeros_like(intensity_delta[
delta_t <= 0])
y_train = y_train + intensity_delta
prev_times = th.zeros_like(x_train)
for i, t in enumerate(x_train):
prev_time = t - events
prev_time[prev_time < 0] = x_max + 1.
prev_time = th.argmin(prev_time)
prev_time = events[prev_time]
prev_times[i] = prev_time
tau = x_train - prev_times
activations, constraint, activation_final = "relu", None, None
if cumulative:
(activations, constraint,
activation_final) = "gumbel", "nonneg", "parametric_softplus"
mlp = MLP(
units=units,
input_shape=1,
activations=activations,
constraint=constraint,
activation_final=activation_final)
optimiser = th.optim.Adam(params=mlp.parameters(), lr=1.e-3)
mse = torch.nn.MSELoss()
tau_r = tau.reshape(-1, 1)
y_train_r = y_train.reshape(-1, 1)
if cumulative:
tau_r.requires_grad = True
for i in range(epochs):
optimiser.zero_grad()
y_pred = mlp(tau_r)
if cumulative:
y_pred = th.autograd.grad(
y_pred, tau_r,
grad_outputs=th.ones_like(y_pred),
retain_graph=True,
create_graph=True)[0]
loss = mse(y_train_r, y_pred)
loss = th.sum(loss)
loss.backward()
optimiser.step()
print("epoch: {} loss: {}".format(i, float(loss)))
plt.figure(figsize=(18, 4))
plt.plot(x_train, y_train, label="true")
plt.plot(x_train, detach(y_pred), label="pred")
plt.xlabel("time")
plt.ylabel("intensity")
plt.legend()
plt.show()
| 2,239 | 23.086022 | 68 | py |
neuralTPPs | neuralTPPs-master/profiling/r_terms_for_pytorch_profile.py | import torch as th
from tpp.processes.hawkes.r_terms_recursive_v import get_r_terms
from tpp.utils.test import get_test_events_query
def run_test():
marks = 3
events, query = get_test_events_query(marks=marks)
beta = th.rand([marks, marks])
get_r_terms(events=events, beta=beta)
if __name__ == '__main__':
run_test()
| 343 | 19.235294 | 64 | py |
neuralTPPs | neuralTPPs-master/profiling/get_r_terms_profile.py | import time
import matplotlib.pyplot as plt
import numpy as np
import torch as th
# from tpp.processes.hawkes.r_terms import get_r_terms as naive
from tpp.processes.hawkes.r_terms_recursive import get_r_terms as recursive
from tpp.processes.hawkes.r_terms_recursive_v import get_r_terms as recursive_v
from tpp.utils.test import get_test_events_query
def get_times(batch_size=1, marks=1, seq_len=16, n_queries=16, n_iters=100):
events, _ = get_test_events_query(
marks=marks, batch_size=batch_size, max_seq_len=seq_len,
queries=n_queries)
beta = th.rand([marks, marks], dtype=th.float32).to(th.device("cpu"))
# t1 = time.time()
# for _ in range(n_iters):
# naive(events=events, beta=beta)
# t1 = time.time() - t1
# t1 = t1 / n_iters
t2 = time.time()
for _ in range(n_iters):
recursive(events=events, beta=beta)
t2 = time.time() - t2
t2 = t2 / n_iters
t3 = time.time()
for _ in range(n_iters):
recursive_v(events=events, beta=beta)
t3 = time.time() - t3
t3 = t3 / n_iters
if th.cuda.is_available():
events, query = get_test_events_query(
marks=marks, batch_size=batch_size, max_seq_len=seq_len,
queries=n_queries, device=th.device("cuda"))
beta = beta.to(th.device("cuda"))
t4 = time.time()
for _ in range(n_iters):
recursive_v(events=events, beta=beta)
t4 = time.time() - t4
t4 = t4 / n_iters
else:
t4 = None
return None, t2, t3, t4
def main():
seq_lens = np.arange(3, 6)
seq_lens = np.power(2, seq_lens)
batch_sizes = np.arange(0, 9)
batch_sizes = np.power(2, batch_sizes)
fig, ax = plt.subplots(
nrows=3,
ncols=3,
figsize=[7, 7])
for b, a in zip(batch_sizes, ax.flatten()):
times = {
seq_len: get_times(
batch_size=int(b), seq_len=int(seq_len), n_iters=10)
for seq_len in seq_lens}
naive, recursive, recursive_v, recursive_v_cuda = zip(*times.values())
# a.plot(seq_lens, naive, label="naive")
a.plot(seq_lens, recursive, label="recursive")
a.plot(seq_lens, recursive_v, label="recursive_v")
if th.cuda.is_available():
a.plot(seq_lens, recursive_v_cuda, label="recursive_v_cuda")
a.legend()
a.set_xlabel("Sequence length")
a.set_ylabel("Query time")
a.set_yscale("log")
a.set_title("batch size {}".format(b))
fig.show()
if __name__ == "__main__":
main()
| 2,568 | 28.872093 | 79 | py |
neuralTPPs | neuralTPPs-master/profiling/get_prev_times_profile.py | import time
import torch as th
import numpy as np
import matplotlib.pyplot as plt
from tpp.utils.events import get_events, get_window
from tpp.utils.history import get_prev_times
from tpp.utils import history_bst
def get_test_events_query(
batch_size=16, seq_len=16, n_queries=16, device=th.device('cpu'),
dtype=th.float32):
marks = 1
padding_id = -1.
times = np.random.uniform(
low=0.01, high=1., size=[batch_size, seq_len]).astype(np.float32)
query = np.random.uniform(
low=0.01, high=1., size=[batch_size, n_queries]).astype(np.float32)
mask = times != padding_id
times, query = th.from_numpy(times), th.from_numpy(query)
times, query = times.type(dtype), query.type(dtype)
mask = th.from_numpy(mask).type(times.dtype)
times, query, mask = times.to(device), query.to(device), mask.to(device)
window_start, window_end = get_window(times=times, window=1.)
events = get_events(
times=times,
mask=mask,
window_start=window_start,
window_end=window_end)
(prev_times, _), is_event, _ = get_prev_times(
query=query, events=events, allow_window=True)
alpha = th.from_numpy(np.array([[0.1]], dtype=np.float32))
beta = th.from_numpy(np.array([[1.0]], dtype=np.float32))
mu = th.from_numpy(np.array([0.05], dtype=np.float32))
return marks, query, events, prev_times, is_event, alpha, beta, mu
def get_times(batch_size=1, seq_len=16, n_queries=16, n_iters=100):
(marks, query, events, prev_times,
is_event, alpha, beta, mu) = get_test_events_query(
batch_size=batch_size, seq_len=seq_len, n_queries=n_queries)
t1 = time.time()
for _ in range(n_iters):
get_prev_times(query=events.times, events=events)
t1 = time.time() - t1
t1 = t1 / n_iters
t2 = time.time()
for _ in range(n_iters):
history_bst.get_prev_times(query=events.times, events=events)
t2 = time.time() - t2
t2 = t2 / n_iters
if th.cuda.is_available():
(marks, query, events, prev_times,
is_event, alpha, beta, mu) = get_test_events_query(
batch_size=batch_size, seq_len=seq_len, n_queries=n_queries,
device=th.device("cuda"))
t3 = time.time()
for _ in range(n_iters):
history_bst.get_prev_times(query=events.times, events=events)
t3 = time.time() - t3
t3 = t3 / n_iters
else:
t3 = None
return t1, t2, t3
def main():
from importlib import reload
reload(history_bst)
seq_lens = np.arange(3, 6)
seq_lens = np.power(2, seq_lens)
batch_sizes = np.arange(0, 9)
batch_sizes = np.power(2, batch_sizes)
fig, ax = plt.subplots(
nrows=3,
ncols=3,
figsize=[7, 7])
for b, a in zip(batch_sizes, ax.flatten()):
times = {
seq_len: get_times(
batch_size=b, seq_len=seq_len, n_iters=20)
for seq_len in seq_lens}
normal_time, bst_time, bst_cuda_time = zip(*times.values())
a.plot(seq_lens, normal_time, label="normal")
a.plot(seq_lens, bst_time, label="bst")
if th.cuda.is_available():
a.plot(seq_lens, bst_cuda_time, label="bst_cuda_32")
a.legend()
a.set_xlabel("Sequence length")
a.set_ylabel("Query time")
a.set_yscale("log")
a.set_title("batch size {}".format(b))
fig.show()
if __name__ == "__main__":
main()
| 3,472 | 29.464912 | 76 | py |
neuralTPPs | neuralTPPs-master/scripts/evaluate.py | import json
import numpy as np
import os
import torch as th
from argparse import ArgumentParser, Namespace
from distutils.util import strtobool
from pathlib import Path
from scripts.train import evaluate
from tpp.processes.multi_class_dataset import MultiClassDataset as Dataset
from tpp.utils.data import get_loader
from tpp.utils.plot import log_figures
def parse_args():
parser = ArgumentParser(allow_abbrev=False)
# Model dir
parser.add_argument("--model-dir", type=str, required=True,
help="Directory of the saved model")
# Run configuration
parser.add_argument("--seed", type=int, default=0, help="The random seed.")
parser.add_argument("--padding-id", type=float, default=-1.,
help="The value used in the temporal sequences to "
"indicate a non-event.")
# Simulator configuration
parser.add_argument("--mu", type=float, default=[0.05, 0.05],
nargs="+", metavar='N',
help="The baseline intensity for the data generator.")
parser.add_argument("--alpha", type=float, default=[0.1, 0.2, 0.2, 0.1],
nargs="+", metavar='N',
help="The event parameter for the data generator. "
"This will be reshaped into a matrix the size of "
"[mu,mu].")
parser.add_argument("--beta", type=float, default=[1.0, 1.0, 1.0, 1.0],
nargs="+", metavar='N',
help="The decay parameter for the data generator. "
"This will be reshaped into a matrix the size of "
"[mu,mu].")
parser.add_argument("--marks", type=int, default=None,
help="Generate a process with this many marks. "
"Defaults to `None`. If this is set to an "
"integer, it will override `alpha`, `beta` and "
"`mu` with randomly generated values "
"corresponding to the number of requested marks.")
parser.add_argument("--window", type=int, default=100,
help="The window of the simulated process.py. Also "
"taken as the window of any parametric Hawkes "
"model if chosen.")
parser.add_argument("--val-size", type=int, default=128,
help="The number of unique sequences in each of the "
"validation dataset.")
parser.add_argument("--test-size", type=int, default=128,
help="The number of unique sequences in each of the "
"test dataset.")
# Common model hyperparameters
parser.add_argument("--batch-size", type=int, default=32,
help="The batch size to use for parametric model"
" training and evaluation.")
parser.add_argument("--time-scale", type=float, default=1.,
help='Time scale used to prevent overflow')
parser.add_argument("--multi-labels",
type=lambda x: bool(strtobool(x)), default=False,
help="Whether the likelihood is computed on "
"multi-labels events or not")
# Metrics
parser.add_argument("--eval-metrics",
type=lambda x: bool(strtobool(x)), default=False,
help="The model is evaluated using several metrics")
parser.add_argument("--eval-metrics-per-class",
type=lambda x: bool(strtobool(x)), default=False,
help="The model is evaluated using several metrics "
"per class")
# Dirs
parser.add_argument("--load-from-dir", type=str, default=None,
help="If not None, load data from a directory")
parser.add_argument("--plots-dir", type=str,
default="~/neural-tpps/plots",
help="Directory to save the plots")
parser.add_argument("--data-dir", type=str, default="~/neural-tpps/data",
help="Directory to save the preprocessed data")
args, _ = parser.parse_known_args()
if args.marks is None:
args.mu = np.array(args.mu, dtype=np.float32)
args.alpha = np.array(args.alpha, dtype=np.float32).reshape(
args.mu.shape * 2)
args.beta = np.array(args.beta, dtype=np.float32).reshape(
args.mu.shape * 2)
args.marks = len(args.mu)
else:
np.random.seed(args.hawkes_seed)
args.mu = np.random.uniform(
low=0.01, high=0.2, size=[args.marks]).astype(dtype=np.float32)
args.alpha = np.random.uniform(
low=0.01, high=0.2, size=[args.marks] * 2).astype(dtype=np.float32)
args.beta = np.random.uniform(
low=1.01, high=1.3, size=[args.marks] * 2).astype(dtype=np.float32)
args.mu /= float(args.marks)
args.alpha /= float(args.marks)
if args.load_from_dir is not None:
args.data_dir = os.path.expanduser(args.data_dir)
args.save_dir = os.path.join(args.data_dir, args.load_from_dir)
with open(os.path.join(args.save_dir, 'args.json'), 'r') as fp:
args_dict_json = json.load(fp)
args_dict = vars(args)
print("Warning: overriding some args from json:")
shared_keys = set(args_dict_json).intersection(set(args_dict))
for k in shared_keys:
v1, v2 = args_dict[k], args_dict_json[k]
is_equal = np.allclose(v1, v2) if isinstance(
v1, np.ndarray) else v1 == v2
if not is_equal:
print(f" {k}: {v1} -> {v2}")
args_dict.update(args_dict_json)
args = Namespace(**args_dict)
args.mu = np.array(args.mu, dtype=np.float32)
args.alpha = np.array(
args.alpha, dtype=np.float32).reshape(
args.mu.shape * 2)
args.beta = np.array(
args.beta, dtype=np.float32).reshape(
args.mu.shape * 2)
else:
args.data_dir = os.path.expanduser(args.data_dir)
args.save_dir = os.path.join(args.data_dir, "None")
Path(args.save_dir).mkdir(parents=True, exist_ok=True)
args.device = th.device('cpu')
args.verbose = False
return args
def main(args: Namespace):
model = th.load(args.model_dir, map_location=args.device)
dataset = Dataset(
args=args, size=args.test_size, seed=args.seed, name="test")
loader = get_loader(dataset, args=args, shuffle=False)
log_figures(
model=model,
test_loader=loader,
args=args,
save_on_mlflow=False)
metrics = evaluate(model=model, args=args, loader=loader)
print(metrics)
if __name__ == "__main__":
parsed_args = parse_args()
main(args=parsed_args)
| 7,022 | 43.732484 | 79 | py |
neuralTPPs | neuralTPPs-master/scripts/train.py | import mlflow
import mlflow.pytorch
import imageio
import json
import numpy as np
import os
import stat
import time
import torchvision
import torch as th
from torch.optim import Adam
from torch.utils.data import DataLoader
from argparse import Namespace
from copy import deepcopy
from typing import Dict, Tuple, Optional
from pathlib import Path
from tqdm import tqdm
from tpp.utils.events import get_events, get_window
from tpp.utils.mlflow import params_log_dict, get_epoch_str, log_metrics
from tpp.models import get_model
from tpp.models.base.process import Process
from tpp.utils.cli import parse_args
from tpp.utils.metrics import eval_metrics
from tpp.utils.plot import log_figures
from tpp.utils.data import get_loader, load_data
from tpp.utils.logging import get_status
from tpp.utils.lr_scheduler import create_lr_scheduler
from tpp.utils.run import make_deterministic
from tpp.utils.stability import check_tensor
torchvision.__version__ = '0.4.0'
def get_loss(
model: Process,
batch: Dict[str, th.Tensor],
args: Namespace,
eval_metrics: Optional[bool] = False,
dynamic_batch_length: Optional[bool] = True,
) -> Tuple[th.Tensor, th.Tensor, Dict]:
times, labels = batch["times"], batch["labels"]
labels = (labels != 0).type(labels.dtype)
if dynamic_batch_length:
seq_lens = batch["seq_lens"]
max_seq_len = seq_lens.max()
times, labels = times[:, :max_seq_len], labels[:, :max_seq_len]
mask = (times != args.padding_id).type(times.dtype)
times = times * args.time_scale
window_start, window_end = get_window(times=times, window=args.window)
events = get_events(
times=times, mask=mask, labels=labels,
window_start=window_start, window_end=window_end)
loss, loss_mask, artifacts = model.neg_log_likelihood(events=events) # [B]
if eval_metrics:
events_times = events.get_times()
log_p, y_pred_mask = model.log_density(
query=events_times, events=events) # [B,L,M], [B,L]
if args.multi_labels:
y_pred = log_p # [B,L,M]
labels = events.labels
else:
y_pred = log_p.argmax(-1).type(log_p.dtype) # [B,L]
labels = events.labels.argmax(-1).type(events.labels.dtype)
artifacts['y_pred'] = y_pred
artifacts['y_true'] = labels
artifacts['y_pred_mask'] = y_pred_mask
return loss, loss_mask, artifacts
def detach(x: th.Tensor):
return x.cpu().detach().numpy()
def evaluate(model: Process, args: Namespace, loader: DataLoader
) -> Dict[str, float]:
"""Evaluate a model on a specific dataset.
Args:
model: The model to evaluate.
args: Arguments for evaluation
loader: The loader corresponding to the dataset to evaluate on.
Returns:
Dictionary containing all metrics evaluated and averaged over total
sequences.
"""
model.eval()
t0, epoch_loss, epoch_loss_per_time, n_seqs = time.time(), 0., 0., 0.
pred_labels, gold_labels, mask_labels = [], [], []
results, count = {}, 0
for batch in tqdm(loader) if args.verbose else loader:
loss, loss_mask, artifacts = get_loss( # [B]
model, batch=batch, eval_metrics=args.eval_metrics, args=args,
# For eval, use padded data for metrics evaluation
dynamic_batch_length=False)
if count == int(args.val_size / args.batch_size):
break
count += 1
loss = loss * loss_mask # [B]
epoch_loss += detach(th.sum(loss))
loss_per_time = loss / artifacts["interval"]
epoch_loss_per_time += detach(th.sum(loss_per_time))
n_seqs_batch = detach(th.sum(loss_mask))
n_seqs += n_seqs_batch
if args.eval_metrics:
pred_labels.append(detach(artifacts['y_pred']))
gold_labels.append(detach(artifacts['y_true']))
mask_labels.append(detach(artifacts['y_pred_mask']))
if args.eval_metrics:
results = eval_metrics(
pred=pred_labels,
gold=gold_labels,
mask=mask_labels,
results=results,
n_class=args.marks,
multi_labels=args.multi_labels)
dur = time.time() - t0
results["dur"] = dur
results["loss"] = float(epoch_loss / n_seqs)
results["loss_per_time"] = float(epoch_loss_per_time / n_seqs)
return results
def train(
model: Process,
args: Namespace,
loader: DataLoader,
val_loader: DataLoader,
test_loader: DataLoader) -> Tuple[Process, dict]:
"""Train a model.
Args:
model: Model to be trained.
args: Arguments for training.
loader: The dataset for training.
val_loader: The dataset for evaluation.
test_loader: The dataset for testing
Returns:
Best trained model from early stopping.
"""
if args.include_poisson:
processes = model.processes.keys()
modules = []
for p in processes:
if p != 'poisson':
modules.append(getattr(model, p))
optimizer = Adam(
[{'params': m.parameters()} for m in modules] + [
{'params': model.alpha}] + [
{'params': model.poisson.parameters(),
'lr': args.lr_poisson_rate_init}
], lr=args.lr_rate_init)
else:
optimizer = Adam(model.parameters(), lr=args.lr_rate_init)
lr_scheduler = create_lr_scheduler(optimizer=optimizer, args=args)
parameters = dict(model.named_parameters())
lr_wait, cnt_wait, best_loss, best_epoch = 0, 0, 1e9, 0
best_state = deepcopy(model.state_dict())
train_dur, val_dur, images_urls = list(), list(), dict()
images_urls['intensity'] = list()
images_urls['src_attn'] = list()
images_urls['tgt_attn'] = list()
epochs = range(args.train_epochs)
if args.verbose:
epochs = tqdm(epochs)
for epoch in epochs:
t0, _ = time.time(), model.train()
if args.lr_scheduler != 'plateau':
lr_scheduler.step()
for i, batch in enumerate((tqdm(loader)) if args.verbose else loader):
optimizer.zero_grad()
loss, loss_mask, _ = get_loss(model, batch=batch, args=args) # [B]
loss = loss * loss_mask
loss = th.sum(loss)
check_tensor(loss)
loss.backward()
optimizer.step()
train_dur.append(time.time() - t0)
train_metrics = evaluate(model, args=args, loader=loader)
val_metrics = evaluate(model, args=args, loader=val_loader)
val_dur.append(val_metrics["dur"])
if args.lr_scheduler == 'plateau':
lr_scheduler.step(metrics=val_metrics["loss"])
new_best = val_metrics["loss"] < best_loss
if args.loss_relative_tolerance is not None:
abs_rel_loss_diff = (val_metrics["loss"] - best_loss) / best_loss
abs_rel_loss_diff = abs(abs_rel_loss_diff)
above_numerical_tolerance = (abs_rel_loss_diff >
args.loss_relative_tolerance)
new_best = new_best and above_numerical_tolerance
if new_best:
best_loss, best_t = val_metrics["loss"], epoch
cnt_wait, lr_wait = 0, 0
best_state = deepcopy(model.state_dict())
else:
cnt_wait, lr_wait = cnt_wait + 1, lr_wait + 1
if cnt_wait == args.patience:
print("Early stopping!")
break
if epoch % args.save_model_freq == 0 and parsed_args.use_mlflow:
current_state = deepcopy(model.state_dict())
model.load_state_dict(best_state)
epoch_str = get_epoch_str(epoch=epoch,
max_epochs=args.train_epochs)
mlflow.pytorch.log_model(model, "models/epoch_" + epoch_str)
images_urls = log_figures(
model=model,
test_loader=test_loader,
epoch=epoch,
args=args,
images_urls=images_urls)
model.load_state_dict(current_state)
lr = optimizer.param_groups[0]['lr']
train_metrics["lr"] = lr
if args.include_poisson:
lr_poisson = optimizer.param_groups[-1]['lr']
else:
lr_poisson = lr
status = get_status(
args=args, epoch=epoch, lr=lr, lr_poisson=lr_poisson,
parameters=parameters, train_loss=train_metrics["loss"],
val_metrics=val_metrics, cnt_wait=cnt_wait)
print(status)
if args.use_mlflow and epoch % args.logging_frequency == 0:
loss_metrics = {
"lr": train_metrics["lr"],
"train_loss": train_metrics["loss"],
"train_loss_per_time": train_metrics["loss_per_time"],
"valid_loss": val_metrics["loss"],
"valid_loss_per_time": val_metrics["loss_per_time"]}
log_metrics(
model=model,
metrics=loss_metrics,
val_metrics=val_metrics,
args=args,
epoch=epoch)
model.load_state_dict(best_state)
return model, images_urls
def main(args: Namespace):
if args.verbose:
print(args)
datasets = load_data(args=args)
loaders = dict()
loaders["train"] = get_loader(datasets["train"], args=args, shuffle=True)
loaders["val"] = get_loader(datasets["val"], args=args, shuffle=False)
loaders["test"] = get_loader(datasets["test"], args=args, shuffle=False)
model = get_model(args)
if args.mu_cheat and "poisson" in model.processes:
poisson = model.processes["poisson"].decoder
mu = th.from_numpy(args.mu).type(
poisson.mu.dtype).to(poisson.mu.device)
poisson.mu.data = mu
model, images_urls = train(
model, args=args, loader=loaders["train"],
val_loader=loaders["val"], test_loader=loaders["test"])
metrics = {
k: evaluate(model=model, args=args, loader=l)
for k, l in loaders.items()}
if args.verbose:
print(metrics)
if args.use_mlflow:
loss_metrics = {
"train_loss": metrics["train"]["loss"],
"train_loss_per_time": metrics["train"]["loss_per_time"],
"valid_loss": metrics["test"]["loss"],
"valid_loss_per_time": metrics["test"]["loss_per_time"]}
log_metrics(
model=model,
metrics=loss_metrics,
val_metrics=metrics["test"],
args=args,
epoch=args.train_epochs)
mlflow.pytorch.log_model(model, "models")
if __name__ == "__main__":
parsed_args = parse_args()
if parsed_args.load_from_dir is not None:
parsed_args.data_dir = os.path.expanduser(parsed_args.data_dir)
parsed_args.save_dir = os.path.join(parsed_args.data_dir,
parsed_args.load_from_dir)
with open(os.path.join(parsed_args.save_dir, 'args.json'), 'r') as fp:
args_dict_json = json.load(fp)
args_dict = vars(parsed_args)
print("Warning: overriding some args from json:")
shared_keys = set(args_dict_json).intersection(set(args_dict))
for k in shared_keys:
v1, v2 = args_dict[k], args_dict_json[k]
is_equal = np.allclose(v1, v2) if isinstance(
v1, np.ndarray) else v1 == v2
if not is_equal:
print(f" {k}: {v1} -> {v2}")
args_dict.update(args_dict_json)
parsed_args = Namespace(**args_dict)
parsed_args.mu = np.array(parsed_args.mu, dtype=np.float32)
parsed_args.alpha = np.array(
parsed_args.alpha, dtype=np.float32).reshape(
parsed_args.mu.shape * 2)
parsed_args.beta = np.array(
parsed_args.beta, dtype=np.float32).reshape(
parsed_args.mu.shape * 2)
else:
parsed_args.data_dir = os.path.expanduser(parsed_args.data_dir)
parsed_args.save_dir = os.path.join(parsed_args.data_dir, "None")
Path(parsed_args.save_dir).mkdir(parents=True, exist_ok=True)
cuda = th.cuda.is_available() and not parsed_args.disable_cuda
if cuda:
parsed_args.device = th.device('cuda')
else:
parsed_args.device = th.device('cpu')
# check_repo(allow_uncommitted=not parsed_args.use_mlflow)
make_deterministic(seed=parsed_args.seed)
# Create paths for plots
parsed_args.plots_dir = os.path.expanduser(parsed_args.plots_dir)
Path(parsed_args.plots_dir).mkdir(parents=True, exist_ok=True)
Path(os.path.join(parsed_args.plots_dir, "src_attn")).mkdir(
parents=True, exist_ok=True)
Path(os.path.join(parsed_args.plots_dir, "tgt_attn")).mkdir(
parents=True, exist_ok=True)
Path(os.path.join(parsed_args.plots_dir, "intensity")).mkdir(
parents=True, exist_ok=True)
if parsed_args.use_mlflow:
mlflow.set_tracking_uri(parsed_args.remote_server_uri)
mlflow.set_experiment(parsed_args.experiment_name)
mlflow.start_run(run_name=parsed_args.run_name)
params_to_log = params_log_dict(parsed_args)
mlflow.log_params(params_to_log)
main(args=parsed_args)
| 13,355 | 33.511628 | 79 | py |
neuralTPPs | neuralTPPs-master/tests/test_nll.py | import numpy as np
import torch as th
from tpp.processes.hawkes import neg_log_likelihood_old as nll_old
from tpp.processes.hawkes import neg_log_likelihood as nll_new
from tpp.utils.keras_preprocessing.sequence import pad_sequences
def test_nll():
n_seq = 10
my_alpha = 0.7
my_mu = 0.1
pad_id = -1.
my_window = 100
my_sizes = [np.random.randint(low=1, high=10) for _ in range(n_seq)]
my_points = [th.sort(th.rand(size=[s])).values for s in my_sizes]
nll_1 = [nll_old(mu=my_mu, alpha=my_alpha, points=p, window=my_window)
for p in my_points]
nll_1 = th.stack(nll_1, dim=0)
my_points_padded = pad_sequences(
my_points, padding="post", dtype=np.float32, value=pad_id)
my_points_padded = th.from_numpy(my_points_padded)
my_mask = (my_points_padded != pad_id).type(my_points_padded.dtype)
nll_2 = nll_new(
mu=my_mu, alpha=my_alpha,
sequences_padded=my_points_padded, sequence_mask=my_mask,
window=my_window)
assert np.allclose(nll_1, nll_2)
if __name__ == "__main__":
test_nll()
| 1,095 | 27.102564 | 74 | py |
neuralTPPs | neuralTPPs-master/tests/test_intensity.py | import numpy as np
import torch as th
from tpp.processes.hawkes import intensity_old as intensity_old
from tpp.processes.hawkes import intensity_at_t as intensity_new
from tpp.processes.hawkes import intensity_at_times
from tpp.utils.keras_preprocessing.sequence import pad_sequences
def test_intensity():
n_seq = 10
my_alpha = 0.7
my_mu = 0.1
pad_id = -1.
my_t = 0.5
my_sizes = [np.random.randint(low=1, high=10) for _ in range(n_seq)]
my_points = [th.sort(th.rand(size=[s])).values for s in my_sizes]
intensity_1 = [
intensity_old(mu=my_mu, alpha=my_alpha, points=p, t=my_t) for p in
my_points]
intensity_1 = th.stack(intensity_1, dim=0)
my_points_padded = pad_sequences(
my_points, padding="post", dtype=np.float32, value=pad_id)
my_points_padded = th.from_numpy(my_points_padded)
my_mask = (my_points_padded != pad_id).type(my_points_padded.dtype)
intensity_2 = intensity_new(
mu=my_mu, alpha=my_alpha,
sequences_padded=my_points_padded, mask=my_mask, t=my_t)
assert np.allclose(intensity_1, intensity_2)
def test_intensity_general():
n_seq = 10
my_alpha = 0.7
my_mu = 0.1
pad_id = -1.
my_seq_sizes = [np.random.randint(low=1, high=10) for _ in range(n_seq)]
my_time_sizes = [np.random.randint(low=1, high=5) for _ in range(n_seq)]
my_seqs = [th.sort(th.rand(size=[s])).values for s in my_seq_sizes]
my_times = [th.sort(th.rand(size=[s])).values for s in my_time_sizes]
my_seqs_padded = pad_sequences(
my_seqs, padding="post", dtype=np.float32, value=pad_id)
my_seqs_padded = th.from_numpy(my_seqs_padded)
my_times_padded = pad_sequences(
my_times, padding="post", dtype=np.float32, value=pad_id)
my_times_padded = th.from_numpy(my_times_padded)
my_seq_mask = (my_seqs_padded != pad_id).type(
my_seqs_padded.dtype) # B x L
intensity_1 = [[intensity_new(
mu=my_mu, alpha=my_alpha,
sequences_padded=my_seqs_padded, mask=my_seq_mask, t=t)[b]
for t in times] for b, times in enumerate(my_times_padded)]
intensity_1 = np.array(intensity_1)
intensity_2 = intensity_at_times(
mu=my_mu, alpha=my_alpha,
sequences_padded=my_seqs_padded, sequence_mask=my_seq_mask,
times=my_times_padded)
assert np.allclose(intensity_1, intensity_2)
if __name__ == "__main__":
test_intensity()
test_intensity_general()
| 2,487 | 30.897436 | 79 | py |
neuralTPPs | neuralTPPs-master/tests/processes/test_hawkes_fast_custom.py | import torch as th
from tpp.utils.history_bst import get_prev_times
from tpp.processes.hawkes_fast import decoder_fast
from tpp.processes.hawkes_slow import decoder_slow
from tpp.utils.events import get_events, get_window
def get_fast_slow_results():
padding_id = -1.
times = th.Tensor([[1, 2, -1., -1.]]).type(th.float32)
labels = th.Tensor([[1, 0, 0, 1]]).type(th.long)
mask = (times != padding_id).type(times.dtype).to(times.device)
marks = 2
query = th.Tensor([[2.5, 7.]]).type(th.float32)
window_start, window_end = get_window(times=times, window=4.)
beta = th.Tensor([2, 1, 1, 3]).reshape(marks, marks).float()
alpha = th.Tensor([1, 2, 1, 1]).reshape(marks, marks).float()
mu = th.zeros(size=[marks], dtype=th.float32) + 3.00001
events = get_events(
times=times, mask=mask, labels=labels,
window_start=window_start, window_end=window_end, marks=2)
(prev_times, _), is_event, _ = get_prev_times(
query=query, events=events, allow_window=True)
results_fast = decoder_fast(
events=events,
query=query,
prev_times=prev_times,
is_event=is_event,
alpha=alpha,
beta=beta,
mu=mu,
marks=marks)
results_slow = decoder_slow(
events=events,
query=query,
prev_times=prev_times,
is_event=is_event,
alpha=alpha,
beta=beta,
mu=mu,
marks=marks)
return results_fast, results_slow
def test_fast_intensity():
results_fast, results_slow = get_fast_slow_results()
log_int_f, int_itg_f, mask_f, _ = results_fast # [B,T,M], [B,T]
log_int_s, int_itg_s, mask_s, _ = results_slow # [B,T,M], [B,T]
assert th.all(mask_f == mask_s), "Masks do not match"
masked_intensity_fast = log_int_f * mask_f.unsqueeze(dim=-1)
masked_intensity_slow = log_int_s * mask_s.unsqueeze(dim=-1)
assert th.allclose(
masked_intensity_fast,
masked_intensity_slow,
atol=1.e-5), "Intensities do not match."
masked_intensity_itg_fast = int_itg_f * mask_f.unsqueeze(dim=-1)
masked_intensity_itg_slow = int_itg_s * mask_s.unsqueeze(dim=-1)
assert th.allclose(
masked_intensity_itg_fast,
masked_intensity_itg_slow,
atol=1.e-5), "Intensity integrals do not match."
if __name__ == "__main__":
test_fast_intensity()
| 2,388 | 28.493827 | 68 | py |
neuralTPPs | neuralTPPs-master/tests/processes/test_r_terms.py | import torch as th
from tpp.processes.hawkes.r_terms import get_r_terms as get_r_terms_n
from tpp.processes.hawkes.r_terms_recursive import get_r_terms as get_r_terms_r
from tpp.processes.hawkes.r_terms_recursive_v import get_r_terms as get_r_terms_v
from tpp.utils.test import get_test_events_query
def test_r_terms(device=th.device("cpu")):
th.manual_seed(3)
marks, batch_size = 12, 23
max_seq_len = 138
queries = 13
events, _ = get_test_events_query(
marks=marks, batch_size=batch_size, max_seq_len=max_seq_len,
queries=queries, device=device)
beta = th.rand([marks, marks], dtype=th.float32).to(device)
r_terms_naive = get_r_terms_n(events=events, beta=beta)
r_terms_recursive = get_r_terms_r(events=events, beta=beta)
r_terms_recursive_v = get_r_terms_v(events=events, beta=beta)
assert th.allclose(
r_terms_naive, r_terms_recursive), (
"The r term computational approaches do not match.")
assert th.allclose(
r_terms_naive, r_terms_recursive_v), (
"The r term vector computational approaches do not match.")
if __name__ == "__main__":
test_r_terms()
if th.cuda.is_available():
test_r_terms(device=th.device("cuda"))
| 1,238 | 33.416667 | 81 | py |
neuralTPPs | neuralTPPs-master/tests/processes/test_hawkes_fast.py | import torch as th
from tpp.utils.history_bst import get_prev_times
from tpp.utils.index import unravel_index
from tpp.utils.test import get_test_events_query
from tpp.processes.hawkes_fast import decoder_fast
from tpp.processes.hawkes_slow import decoder_slow
def get_fast_slow_results(
queries=1, marks=2, max_seq_len=10, batch_size=1,
dtype=th.float32):
events, query = get_test_events_query(
marks=marks, batch_size=batch_size, max_seq_len=max_seq_len,
queries=queries, dtype=dtype)
alpha = th.rand([marks, marks], dtype=dtype)
beta = th.rand([marks, marks], dtype=dtype)
mu = th.rand([marks], dtype=dtype)
(prev_times, _), is_event, _ = get_prev_times(
query=query, events=events, allow_window=True)
results_fast = decoder_fast(
events=events,
query=query,
prev_times=prev_times,
is_event=is_event,
alpha=alpha,
beta=beta,
mu=mu,
marks=marks)
results_slow = decoder_slow(
events=events,
query=query,
prev_times=prev_times,
is_event=is_event,
alpha=alpha,
beta=beta,
mu=mu,
marks=marks)
return results_fast, results_slow
def test_fast_intensity():
results_fast, results_slow = get_fast_slow_results()
log_int_f, int_itg_f, mask_f, _ = results_fast # [B,T,M], [B,T]
log_int_s, int_itg_s, mask_s, _ = results_slow # [B,T,M], [B,T]
assert th.all(mask_f == mask_s), "Masks do not match"
masked_intensity_fast = log_int_f * mask_f.unsqueeze(dim=-1)
masked_intensity_slow = log_int_s * mask_s.unsqueeze(dim=-1)
assert th.allclose(
masked_intensity_fast,
masked_intensity_slow,
atol=1.e-5), "Intensities do not match."
masked_intensity_itg_fast = int_itg_f * mask_f.unsqueeze(dim=-1)
masked_intensity_itg_slow = int_itg_s * mask_s.unsqueeze(dim=-1)
a = 0
assert th.allclose(
masked_intensity_itg_fast,
masked_intensity_itg_slow,
atol=1.e-3), (
"Intensity integrals do not match. "
"Max abs difference: {} occurs at {}".format(
th.max(th.abs(masked_intensity_itg_fast -
masked_intensity_itg_slow)),
unravel_index(
th.argmax(
th.abs(masked_intensity_itg_fast -
masked_intensity_itg_slow)),
shape=masked_intensity_itg_fast.shape)))
if __name__ == "__main__":
# for i in range(10000):
# print(i)
# th.manual_seed(i)
# test_fast_intensity()
th.manual_seed(9943)
test_fast_intensity()
| 2,648 | 29.102273 | 68 | py |
neuralTPPs | neuralTPPs-master/tests/processes/test_r_terms_custom.py | import torch as th
from tpp.processes.hawkes.r_terms import get_r_terms as get_r_terms_n
from tpp.processes.hawkes.r_terms_recursive import get_r_terms as get_r_terms_r
from tpp.processes.hawkes.r_terms_recursive_v import get_r_terms as get_r_terms_v
from tpp.utils.events import get_events, get_window
def test_setup():
padding_id = -1.
times = th.Tensor([[1, 2, 3]]).type(th.float32)
labels = th.Tensor([[1, 0, 0]]).type(th.long)
mask = (times != padding_id).type(times.dtype).to(times.device)
window_start, window_end = get_window(times=times, window=4.)
events = get_events(
times=times, mask=mask, labels=labels,
window_start=window_start, window_end=window_end, marks=2)
query = th.Tensor([[2.5]]).type(th.float32)
return events, query
def test_r_terms():
th.manual_seed(3)
events, query = test_setup()
beta = th.Tensor([[1., 1.], [1., 1.]]).type(th.float32)
r_terms_naive = get_r_terms_n(events=events, beta=beta)[0]
r_terms_recursive = get_r_terms_r(events=events, beta=beta)[0]
r_terms_recursive_v = get_r_terms_v(events=events, beta=beta)[0]
assert th.allclose(
r_terms_naive, r_terms_recursive), (
"The r term computational approaches do not match.")
assert th.allclose(
r_terms_naive, r_terms_recursive_v), (
"The r term vector computational approaches do not match.")
if __name__ == "__main__":
test_r_terms()
| 1,451 | 32 | 81 | py |
neuralTPPs | neuralTPPs-master/tests/utils/test_stability.py | import torch as th
from tpp.utils.stability import subtract_exp
def test_stability():
a, b, c = th.tensor(8.1), th.tensor(0.0), th.tensor(0.0)
naive_subtraction_1 = th.exp(a) - th.exp(b)
safe_subtraction_1 = subtract_exp(a, b)
naive_subtraction_2 = th.exp(b) - th.exp(a)
safe_subtraction_2 = subtract_exp(b, a)
naive_subtraction_3 = th.exp(c) - th.exp(c)
safe_subtraction_3 = subtract_exp(c, c)
assert th.isclose(naive_subtraction_1, safe_subtraction_1)
assert th.isclose(naive_subtraction_2, safe_subtraction_2)
assert th.isclose(naive_subtraction_3, safe_subtraction_3)
if __name__ == "__main__":
test_stability()
| 668 | 26.875 | 62 | py |
neuralTPPs | neuralTPPs-master/tests/utils/test_searchsorted.py | import numpy as np
import torch as th
from tpp.utils.searchsorted import searchsorted
def get_test_data(rows=7, data_cols=9, query_cols=11, padding_id=-1.):
x_padded = th.rand(rows, data_cols).float()
x_padded = th.sort(x_padded, dim=-1).values
query = th.rand(rows, query_cols).float()
lens = th.randint(low=1, high=data_cols, size=[rows]).long()
for xi, li in zip(x_padded, lens):
xi[li:] = padding_id
mask = (x_padded != padding_id).type(x_padded.dtype)
x = [xi[mi.bool()] for xi, mi in zip(x_padded, mask)]
return x, x_padded, mask, query
def simple_approach(x, query):
return [np.searchsorted(a=xi, v=qi) for xi, qi in zip(x, query)]
def test_searchsorted():
x, x_padded, mask, query = get_test_data()
simple_answer = simple_approach(x=x, query=query)
simple_answer = th.stack(simple_answer, dim=0)
batched_answer = searchsorted(a=x_padded, v=query, mask=mask)
assert th.all(simple_answer == batched_answer)
if __name__ == "__main__":
test_searchsorted()
| 1,037 | 30.454545 | 70 | py |
neuralTPPs | neuralTPPs-master/tests/utils/test_to_flat_idxs.py | import torch as th
from tpp.utils.test import get_test_events_query
def test_to_flat_idxs():
events, query = get_test_events_query()
times_marked = events.get_times(marked=True)
times = events.get_times()
masks = events.get_mask(marked=True)
to_flat_idxs = events.to_flat_idxs
for time_marked, time, mask, to_flat_idx in zip(
times_marked, times, masks, to_flat_idxs):
for tm, m, idx in zip(time_marked, mask, to_flat_idx):
assert th.all(tm * m == th.take(time, idx) * m)
if __name__ == "__main__":
test_to_flat_idxs()
| 586 | 25.681818 | 62 | py |
neuralTPPs | neuralTPPs-master/tests/utils/test_get_previous_times_marked.py | import torch as th
from tpp.utils.test import get_test_events_query
from tpp.utils.history_bst import get_prev_times
from tpp.utils.history_marked_bst import get_prev_times_marked
def test_get_previous_times_marked():
th.random.manual_seed(0)
events, query = get_test_events_query(
batch_size=1, max_seq_len=12, queries=7)
result = get_prev_times_marked(query=query, events=events)
a = 0
if __name__ == "__main__":
test_get_previous_times_marked()
| 482 | 24.421053 | 62 | py |
neuralTPPs | neuralTPPs-master/tests/utils/test_get_previous_times.py | import torch as th
from tests.processes.test_hawkes_fast import get_test_setup
from tpp.utils.history import get_prev_times
from tpp.utils.history_bst import get_prev_times as get_prev_times_bst
def test_get_previous_times():
(marks, query, events,
prev_times, is_event,
alpha, beta, mu) = get_test_setup()
for allow_window in [False, True]:
(prev_times, prev_times_idxs), is_event, mask = get_prev_times(
query=query, events=events, allow_window=allow_window)
((prev_times_bst, prev_times_idxs_bst),
is_event_bst, mask_bst) = get_prev_times_bst(
query=query, events=events, allow_window=allow_window)
assert th.all(mask == mask_bst)
prev_times_masked = prev_times * mask
prev_times_masked_bst = prev_times_bst * mask_bst
assert th.all(prev_times_masked == prev_times_masked_bst)
prev_times_idx_masked = prev_times * mask.type(prev_times.dtype)
prev_times_idx_masked_bst = prev_times_bst * mask_bst.type(
prev_times_bst.dtype)
assert th.all(prev_times_idx_masked == prev_times_idx_masked_bst)
is_event_masked = is_event * mask.type(is_event.dtype)
is_event_masked_bst = is_event_bst * mask.type(is_event_bst.dtype)
assert th.all(is_event_masked == is_event_masked_bst)
if __name__ == "__main__":
test_get_previous_times()
| 1,395 | 35.736842 | 74 | py |
neuralTPPs | neuralTPPs-master/tpp/models/__init__.py | from argparse import Namespace
from torch import nn
from pprint import pprint
from tpp.models.base.enc_dec import EncDecProcess
from tpp.models.base.modular import ModularProcess
from tpp.models.poisson import PoissonProcess
from tpp.models.encoders.base.encoder import Encoder
from tpp.models.encoders.gru import GRUEncoder
from tpp.models.encoders.identity import IdentityEncoder
from tpp.models.encoders.mlp_fixed import MLPFixedEncoder
from tpp.models.encoders.mlp_variable import MLPVariableEncoder
from tpp.models.encoders.stub import StubEncoder
from tpp.models.encoders.self_attention import SelfAttentionEncoder
from tpp.models.decoders.base.decoder import Decoder
from tpp.models.decoders.conditional_poisson import ConditionalPoissonDecoder
from tpp.models.decoders.conditional_poisson_cm import ConditionalPoissonCMDecoder
from tpp.models.decoders.hawkes import HawkesDecoder
from tpp.models.decoders.log_normal_mixture import LogNormalMixtureDecoder
from tpp.models.decoders.mlp_cm import MLPCmDecoder
from tpp.models.decoders.mlp_mc import MLPMCDecoder
from tpp.models.decoders.neural_hawkes import NeuralHawkesDecoder
from tpp.models.decoders.poisson import PoissonDecoder
from tpp.models.decoders.rmtpp import RMTPPDecoder
from tpp.models.decoders.rmtpp_cm import RMTPPCmDecoder
from tpp.models.decoders.self_attention_cm import SelfAttentionCmDecoder
from tpp.models.decoders.self_attention_simple_cm import SelfAttentionCmDecoder as SelfAttentionSimpleCmDecoder
from tpp.models.decoders.self_attention_mc import SelfAttentionMCDecoder
ENCODER_CLASSES = {
"gru": GRUEncoder,
"identity": IdentityEncoder,
"mlp-fixed": MLPFixedEncoder,
"mlp-variable": MLPVariableEncoder,
"stub": StubEncoder,
"selfattention": SelfAttentionEncoder}
DECODER_CLASSES = {
"conditional-poisson": ConditionalPoissonDecoder,
"conditional-poisson-cm": ConditionalPoissonCMDecoder,
"hawkes": HawkesDecoder,
"log-normal-mixture": LogNormalMixtureDecoder,
"mlp-cm": MLPCmDecoder,
"mlp-mc": MLPMCDecoder,
"neural-hawkes": NeuralHawkesDecoder,
"poisson": PoissonDecoder,
"rmtpp": RMTPPDecoder,
"rmtpp-cm": RMTPPCmDecoder,
"selfattention-cm": SelfAttentionCmDecoder,
"selfattention-simple-cm": SelfAttentionSimpleCmDecoder,
"selfattention-mc": SelfAttentionMCDecoder}
ENCODER_NAMES = sorted(list(ENCODER_CLASSES.keys()))
DECODER_NAMES = sorted(list(DECODER_CLASSES.keys()))
CLASSES = {"encoder": ENCODER_CLASSES, "decoder": DECODER_CLASSES}
NAMES = {"encoder": ENCODER_NAMES, "decoder": DECODER_NAMES}
def instantiate_encoder_or_decoder(
args: Namespace, component="encoder") -> nn.Module:
assert component in {"encoder", "decoder"}
prefix, classes = component + "_", CLASSES[component]
kwargs = {
k[len(prefix):]: v for
k, v in args.__dict__.items() if k.startswith(prefix)}
kwargs["marks"] = args.marks
name = args.__dict__[component]
if name not in classes:
raise ValueError("Unknown {} class {}. Must be one of {}.".format(
component, name, NAMES[component]))
component_class = classes[name]
component_instance = component_class(**kwargs)
print("Instantiated {} of type {}".format(component, name))
print("kwargs:")
pprint(kwargs)
print()
return component_instance
def get_model(args: Namespace) -> EncDecProcess:
args.decoder_units_mlp = args.decoder_units_mlp + [args.marks]
decoder: Decoder
decoder = instantiate_encoder_or_decoder(args, component="decoder")
if decoder.input_size is not None:
args.encoder_units_mlp = args.encoder_units_mlp + [decoder.input_size]
encoder: Encoder
encoder = instantiate_encoder_or_decoder(args, component="encoder")
process = EncDecProcess(
encoder=encoder, decoder=decoder, multi_labels=args.multi_labels)
if args.include_poisson:
processes = {process.name: process}
processes.update({"poisson": PoissonProcess(marks=process.marks)})
process = ModularProcess(
processes=processes, use_coefficients=args.use_coefficients)
process = process.to(device=args.device)
return process
| 4,192 | 36.106195 | 111 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/conditional_poisson_cm.py | import torch as th
from typing import List, Optional, Tuple, Dict
from tpp.models.decoders.base.cumulative import CumulativeDecoder
from tpp.models.base.process import Events
from tpp.pytorch.models import MLP
from tpp.utils.encoding import encoding_size
from tpp.utils.index import take_2_by_2, take_3_by_2
class ConditionalPoissonCMDecoder(CumulativeDecoder):
"""A mlp decoder based on the cumulative approach.
Args:
units_mlp: List of hidden layers sizes, including the output size.
activation_mlp: Activation functions. Either a list or a string.
constraint_mlp: Constraint of the network. Either none, nonneg or
softplus.
dropout_mlp: Dropout rates, either a list or a float.
activation_final_mlp: Last activation of the MLP.
mc_prop_est: Proportion of numbers of samples for the MC method,
compared to the size of the input. (Default=1.).
do_zero_subtraction: If `True` the class computes
Lambda(tau) = Lambda'(tau) - Lambda'(0)
in order to enforce Lambda(0) = 0. Defaults to `True`.
emb_dim: Size of the embeddings (default=2).
encoding: Way to encode the events: either times_only, or temporal.
Defaults to times_only.
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
# MLP
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = "nonneg",
activation_final_mlp: Optional[str] = "parametric_softplus",
# Other params
do_zero_subtraction: Optional[bool] = True,
model_log_cm: Optional[bool] = False,
emb_dim: Optional[int] = 2,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
if constraint_mlp is None:
print("Warning! MLP decoder is unconstrained. Setting to `nonneg`")
constraint_mlp = "nonneg"
input_size = units_mlp[0]
super(ConditionalPoissonCMDecoder, self).__init__(
name="mlp-cm",
do_zero_subtraction=do_zero_subtraction,
model_log_cm=model_log_cm,
input_size=input_size,
emb_dim=emb_dim,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
self.mlp = MLP(
units=units_mlp[1:],
activations=activation_mlp,
constraint=constraint_mlp,
dropout_rates=dropout_mlp,
# units_mlp in this class also provides the input dimensionality
# of the mlp
input_shape=input_size,
activation_final=activation_final_mlp)
def cum_intensity(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None,
update_running_stats: Optional[bool] = True
) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the cumulative log intensity and a mask
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
update_running_stats: whether running stats are updated or not.
Returns:
intensity_integral: [B,T,M] The cumulative intensities for each
query time for each mark (class).
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
artifacts: Some measures.
"""
history_representations = take_3_by_2(
representations, index=prev_times_idxs) # [B,T,D]
intensity_itg = self.mlp(history_representations) # [B,T,output_size]
delta_t = query - prev_times # [B,T]
delta_t = delta_t.unsqueeze(dim=-1) # [B,T,1]
intensity_integrals = intensity_itg * delta_t # [B,T,M]
intensity_mask = pos_delta_mask # [B,T]
if representations_mask is not None:
history_representations_mask = take_2_by_2(
representations_mask, index=prev_times_idxs) # [B,T]
intensity_mask = intensity_mask * history_representations_mask
return intensity_integrals, intensity_mask, artifacts
| 5,722 | 41.392593 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/hawkes.py | import torch as th
import torch.nn as nn
from typing import Dict, Optional, Tuple
from tpp.models.decoders.base.decoder import Decoder
from tpp.utils.events import Events
from tpp.utils.nnplus import non_neg_param
from tpp.processes.hawkes_fast import decoder_fast as hawkes_decoder
# from tpp.processes.hawkes_slow import decoder_slow as hawkes_decoder
class HawkesDecoder(Decoder):
"""A parametric Hawkes Process decoder.
Args:
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(self, marks: Optional[int] = 1, **kwargs):
super(HawkesDecoder, self).__init__(name="hawkes", marks=marks)
self.alpha = nn.Parameter(th.Tensor(self.marks, self.marks))
self.beta = nn.Parameter(th.Tensor(self.marks, self.marks))
self.mu = nn.Parameter(th.Tensor(self.marks))
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.alpha)
nn.init.uniform_(self.beta)
nn.init.uniform_(self.mu)
def forward(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.LongTensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:
self.alpha.data = non_neg_param(self.alpha.data)
self.mu.data = non_neg_param(self.mu.data)
return hawkes_decoder(
events=events,
query=query,
prev_times=prev_times,
is_event=is_event,
alpha=self.alpha,
beta=self.beta,
mu=self.mu,
marks=self.marks)
| 1,860 | 32.836364 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/conditional_poisson.py | import torch as th
from typing import Dict, Optional, Tuple, List
from tpp.models.decoders.base.decoder import Decoder
from tpp.pytorch.models import MLP
from tpp.utils.events import Events
from tpp.utils.index import take_2_by_2, take_3_by_2
from tpp.utils.stability import epsilon, check_tensor
class ConditionalPoissonDecoder(Decoder):
"""A parametric Hawkes Process decoder.
Args:
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = None,
activation_final_mlp: Optional[str] = "parametric_softplus",
marks: Optional[int] = 1,
**kwargs):
input_size = units_mlp[0]
if len(units_mlp) < 2:
raise ValueError("Units of length at least 2 need to be specified")
super(ConditionalPoissonDecoder, self).__init__(
name="conditional-poisson",
input_size=input_size,
marks=marks)
self.mlp = MLP(
units=units_mlp[1:],
activations=activation_mlp,
constraint=constraint_mlp,
dropout_rates=dropout_mlp,
# units_mlp in this class also provides the input dimensionality
# of the mlp
input_shape=input_size,
activation_final=activation_final_mlp)
def forward(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.LongTensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:
history_representations = take_3_by_2(
representations, index=prev_times_idxs) # [B,T,units_mlp[0]]
outputs = self.mlp(history_representations) # [B,T,M]
outputs = outputs + epsilon(dtype=outputs.dtype, device=outputs.device)
delta_t = query - prev_times # [B,T]
delta_t = delta_t.unsqueeze(dim=-1) # [B,T,1]
intensity_integrals = outputs * delta_t # [B,T,M]
intensity_mask = pos_delta_mask # [B,T]
if representations_mask is not None:
history_representations_mask = take_2_by_2(
representations_mask, index=prev_times_idxs) # [B,T]
intensity_mask = intensity_mask * history_representations_mask
check_tensor(outputs, positive=True, strict=True)
check_tensor(intensity_integrals * intensity_mask.unsqueeze(-1),
positive=True)
return th.log(outputs), intensity_integrals, intensity_mask, artifacts
| 3,000 | 36.987342 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/log_normal_mixture.py | import math
import torch as th
import torch.nn as nn
from typing import Dict, Optional, Tuple, List
from tpp.models.decoders.base.decoder import Decoder
from tpp.utils.events import Events
from tpp.utils.index import take_3_by_2, take_2_by_2
from tpp.utils.stability import epsilon, check_tensor
class LogNormalMixtureDecoder(Decoder):
"""Analytic decoder process, uses a closed form for the intensity
to train the model.
See https://arxiv.org/pdf/1909.12127.pdf.
Args:
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
n_mixture: int,
units_mlp: List[int],
multi_labels: Optional[bool] = False,
marks: Optional[int] = 1,
**kwargs):
super(LogNormalMixtureDecoder, self).__init__(
name="log-normal-mixture",
input_size=units_mlp[0],
marks=marks,
**kwargs)
if len(units_mlp) < 2:
raise ValueError("Units of length at least 2 need to be specified")
self.mu = nn.Linear(in_features=units_mlp[0], out_features=n_mixture)
self.s = nn.Linear(in_features=units_mlp[0], out_features=n_mixture)
self.w = nn.Linear(in_features=units_mlp[0], out_features=n_mixture)
self.marks1 = nn.Linear(
in_features=units_mlp[0], out_features=units_mlp[1])
self.marks2 = nn.Linear(
in_features=units_mlp[1], out_features=marks)
self.multi_labels = multi_labels
def forward(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.LongTensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:
"""Compute the intensities for each query time given event
representations.
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of window start and
each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensity_integrals: [B,T,M] The integral of the intensity from
the most recent event to the query time for each mark.
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
artifacts: A dictionary of whatever else you might want to return.
"""
query.requires_grad = True
query_representations = take_3_by_2(
representations, index=prev_times_idxs) # [B,T,D]
delta_t = query - prev_times # [B,T]
delta_t = delta_t.unsqueeze(-1) # [B,T,1]
delta_t = th.relu(delta_t)
delta_t = delta_t + (delta_t == 0).float() * epsilon(
dtype=delta_t.dtype, device=delta_t.device)
mu = self.mu(query_representations) # [B,T,K]
std = th.exp(self.s(query_representations))
w = th.softmax(self.w(query_representations), dim=-1)
if self.multi_labels:
p_m = th.sigmoid(
self.marks2(th.tanh(self.marks1(query_representations))))
else:
p_m = th.softmax(
self.marks2(
th.tanh(self.marks1(query_representations))), dim=-1)
cum_f = w * 0.5 * (
1 + th.erf((th.log(delta_t) - mu) / (std * math.sqrt(2))))
cum_f = th.sum(cum_f, dim=-1)
one_min_cum_f = 1. - cum_f
one_min_cum_f = th.relu(one_min_cum_f) + epsilon(
dtype=cum_f.dtype, device=cum_f.device)
f = th.autograd.grad(
outputs=cum_f,
inputs=query,
grad_outputs=th.ones_like(cum_f),
retain_graph=True,
create_graph=True)[0]
query.requires_grad = False
f = f + epsilon(dtype=f.dtype, device=f.device)
base_log_intensity = th.log(f / one_min_cum_f)
marked_log_intensity = base_log_intensity.unsqueeze(
dim=-1) # [B,T,1]
marked_log_intensity = marked_log_intensity + th.log(p_m) # [B,T,M]
base_intensity_itg = - th.log(one_min_cum_f)
marked_intensity_itg = base_intensity_itg.unsqueeze(dim=-1) # [B,T,1]
marked_intensity_itg = marked_intensity_itg * p_m # [B,T,M]
intensity_mask = pos_delta_mask # [B,T]
if representations_mask is not None:
history_representations_mask = take_2_by_2(
representations_mask, index=prev_times_idxs) # [B,T]
intensity_mask = intensity_mask * history_representations_mask
artifacts_decoder = {
"base_log_intensity": base_log_intensity,
"base_intensity_integral": base_intensity_itg,
"mark_probability": p_m}
if artifacts is None:
artifacts = {'decoder': artifacts_decoder}
else:
artifacts['decoder'] = artifacts_decoder
check_tensor(marked_log_intensity)
check_tensor(marked_intensity_itg * intensity_mask.unsqueeze(-1),
positive=True)
return (marked_log_intensity,
marked_intensity_itg,
intensity_mask,
artifacts) # [B,T,M], [B,T,M], [B,T], Dict
| 6,516 | 40.509554 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/self_attention_cm.py | import torch as th
from typing import List, Optional, Tuple, Dict
from tpp.models.decoders.base.cumulative import CumulativeDecoder
from tpp.models.base.process import Events
from tpp.pytorch.models import MLP
from tpp.utils.encoding import encoding_size
from tpp.utils.transformer_utils import TransformerDecoderNetwork
from tpp.utils.transformer_utils import TransformerDecoderLayer
class SelfAttentionCmDecoder(CumulativeDecoder):
"""A self attention decoder based on the new cumulative sigmoid attention.
Args:
units_mlp: List of hidden layers sizes, including the output size.
activation_mlp: Activation functions. Either a list or a string.
constraint_mlp: Constraint of the network. Either none, nonneg or
softplus.
dropout_mlp: Dropout rates, either a list or a float.
activation_final_mlp: Last activation of the MLP.
units_rnn: Hidden size of the Transformer.
layers_rnn: Number of layers in the Transformer.
n_heads: Number of heads in the Transformer.
activation_rnn: The non-linearity to use for the Transformer.
dropout_rnn: Rate of dropout in the Transformer.
emb_dim: Size of the embeddings (default=2).
temporal_scaling: Scaling parameter for temporal encoding
encoding: Way to encode the events: either times_only, marks_only,
concatenate or temporal_encoding. Defaults to times_only.
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
# MLP
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = None,
activation_final_mlp: Optional[str] = "parametric_softplus",
# Transformer
units_rnn: Optional[int] = 16,
layers_rnn: Optional[int] = 1,
n_heads: Optional[int] = 1,
activation_rnn: Optional[str] = "relu",
dropout_rnn: Optional[float] = 0.,
attn_activation: Optional[str] = "softmax",
constraint_rnn: Optional[str] = None,
# Other params
do_zero_subtraction: Optional[bool] = True,
model_log_cm: Optional[bool] = False,
mc_prop_est: Optional[float] = 1.,
emb_dim: Optional[int] = 4,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
if constraint_rnn is None:
print("Warning! SA decoder is unconstrained. Setting to `nonneg`")
constraint_rnn = "nonneg"
if constraint_mlp is None:
print("Warning! MLP decoder is unconstrained. Setting to `nonneg`")
constraint_mlp = "nonneg"
super(SelfAttentionCmDecoder, self).__init__(
name="selfattention-cm",
do_zero_subtraction=do_zero_subtraction,
model_log_cm=model_log_cm,
input_size=encoding_size(encoding=encoding, emb_dim=emb_dim),
mc_prop_est=mc_prop_est,
emb_dim=emb_dim,
temporal_scaling=temporal_scaling,
encoding=encoding,
marks=marks,
**kwargs)
decoder_layer = TransformerDecoderLayer(
d_model=self.encoding_size,
nhead=n_heads,
dim_feedforward=units_rnn,
dropout=dropout_rnn,
activation=activation_rnn,
attn_activation=attn_activation,
constraint=constraint_rnn,
normalisation="layernorm_with_running_stats")
self.transformer_decoder = TransformerDecoderNetwork(
decoder_layer=decoder_layer,
num_layers=layers_rnn)
self.mlp = MLP(
units=units_mlp,
activations=activation_mlp,
constraint=constraint_mlp,
dropout_rates=dropout_mlp,
input_shape=self.encoding_size,
activation_final=activation_final_mlp)
self.n_heads = n_heads
def cum_intensity(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None,
update_running_stats: Optional[bool] = True,
) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the log_intensity and a mask
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
update_running_stats: whether running stats are updated or not
(optional).
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
"""
batch_size, query_length = query.size()
_, events_length, _ = representations.size()
query_representations, intensity_mask = self.get_query_representations(
events=events,
query=query,
prev_times=prev_times,
prev_times_idxs=prev_times_idxs,
pos_delta_mask=pos_delta_mask,
is_event=is_event,
representations=representations,
representations_mask=representations_mask) # [B,T,D], [B,T]
memory_mask = th.arange(
events_length, device=representations.device).repeat(
batch_size, query_length).reshape(
batch_size, query_length, events_length)
items_to_zero = memory_mask <= prev_times_idxs.unsqueeze(-1)
# Make sure there is at least one zero in the row
missing_zeros = items_to_zero.sum(-1) == 0
items_to_zero = items_to_zero | missing_zeros.unsqueeze(-1)
items_to_neg_inf = ~items_to_zero
memory_mask = memory_mask.float()
memory_mask = memory_mask.masked_fill(items_to_zero, float(0.))
memory_mask = memory_mask.masked_fill(items_to_neg_inf, float('-inf'))
if self.n_heads > 1:
memory_mask = memory_mask.repeat(self.n_heads, 1, 1)
assert list(memory_mask.size()) == [
self.n_heads * batch_size, query_length, events_length]
# [B,T,D] -> [T,B,D] and [B,L+1,D] -> [L+1,B,D]
query_representations = query_representations.transpose(0, 1)
representations = representations.transpose(0, 1)
hidden, attn_weights = self.transformer_decoder(
tgt=query_representations,
memory=representations,
memory_mask=memory_mask,
update_running_stats=update_running_stats
) # [T,B,hidden_size], [B,T,L]
# [T,B,hidden_size] -> [B,T,hidden_size]
hidden = hidden.transpose(0, 1)
outputs = self.mlp(hidden) # [B,L,output_size]
if artifacts is None:
artifacts = {'decoder': {"attention_weights": attn_weights}}
else:
artifacts['decoder'] = {"attention_weights": attn_weights}
return outputs, intensity_mask, artifacts
| 8,473 | 40.950495 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/poisson.py | import torch as th
import torch.nn as nn
from typing import Dict, Optional, Tuple
from tpp.models.decoders.base.decoder import Decoder
from tpp.utils.events import Events
from tpp.utils.nnplus import non_neg_param
class PoissonDecoder(Decoder):
"""A parametric Hawkes Process decoder.
Args:
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(self, marks: Optional[int] = 1, **kwargs):
super(PoissonDecoder, self).__init__(name="poisson", marks=marks)
self.mu = nn.Parameter(th.Tensor(self.marks))
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.mu)
def forward(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.LongTensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:
batch_size, n_queries = query.shape
self.mu.data = non_neg_param(self.mu.data)
mu = self.mu.reshape([1, 1, self.marks]) # [1,1,M]
mu = mu.repeat([batch_size, n_queries, 1]) # [B,T,M]
delta_t = query - prev_times # [B,T]
delta_t = delta_t.unsqueeze(dim=-1) # [B,T,1]
intensity_integrals = mu * delta_t # [B,T,M]
intensities_mask = events.within_window(query)
return th.log(mu), intensity_integrals, intensities_mask, dict()
| 1,740 | 33.82 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/rmtpp.py | import torch as th
import torch.nn as nn
from typing import Dict, Optional, Tuple
from tpp.models.decoders.base.decoder import Decoder
from tpp.utils.events import Events
from tpp.utils.index import take_3_by_2, take_2_by_2
from tpp.utils.stability import epsilon, subtract_exp, check_tensor
class RMTPPDecoder(Decoder):
"""Analytic decoder process, uses a closed form for the intensity
to train the model.
See https://www.kdd.org/kdd2016/papers/files/rpp1081-duA.pdf.
Args:
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
multi_labels: Optional[bool] = False,
marks: Optional[int] = 1,
**kwargs):
super(RMTPPDecoder, self).__init__(
name="rmtpp",
input_size=marks+1,
marks=marks)
self.w = nn.Parameter(th.Tensor(1))
self.multi_labels = multi_labels
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.w, b=0.001)
def forward(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.LongTensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:
"""Compute the intensities for each query time given event
representations.
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of window start and
each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensity_integrals: [B,T,M] The integral of the intensity from
the most recent event to the query time for each mark.
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
artifacts: A dictionary of whatever else you might want to return.
"""
query_representations = take_3_by_2(
representations, index=prev_times_idxs) # [B,T,D]
v_h_t = query_representations[:, :, 0] # [B,T]
v_h_m = query_representations[:, :, 1:] # [B,T,M]
w_delta_t = self.w * (query - prev_times) # [B,T]
base_log_intensity = v_h_t + w_delta_t # [B,T]
if self.multi_labels:
p_m = th.sigmoid(v_h_m) # [B,T,M]
else:
p_m = th.softmax(v_h_m, dim=-1) # [B,T,M]
regulariser = epsilon(dtype=p_m.dtype, device=p_m.device)
p_m = p_m + regulariser
marked_log_intensity = base_log_intensity.unsqueeze(
dim=-1) # [B,T,1]
marked_log_intensity = marked_log_intensity + th.log(p_m) # [B,T,M]
intensity_mask = pos_delta_mask # [B,T]
if representations_mask is not None:
history_representations_mask = take_2_by_2(
representations_mask, index=prev_times_idxs) # [B,T]
intensity_mask = intensity_mask * history_representations_mask
exp_1, exp_2 = v_h_t + w_delta_t, v_h_t # [B,T]
# Avoid exponentiating to get masked infinity
exp_1, exp_2 = exp_1 * intensity_mask, exp_2 * intensity_mask # [B,T]
base_intensity_itg = subtract_exp(exp_1, exp_2)
base_intensity_itg = base_intensity_itg / self.w # [B,T]
base_intensity_itg = th.relu(base_intensity_itg)
marked_intensity_itg = base_intensity_itg.unsqueeze(dim=-1) # [B,T,1]
marked_intensity_itg = marked_intensity_itg * p_m # [B,T,M]
artifacts_decoder = {
"base_log_intensity": base_log_intensity,
"base_intensity_integral": base_intensity_itg,
"mark_probability": p_m}
if artifacts is None:
artifacts = {'decoder': artifacts_decoder}
else:
artifacts['decoder'] = artifacts_decoder
check_tensor(marked_log_intensity)
check_tensor(marked_intensity_itg * intensity_mask.unsqueeze(-1),
positive=True)
return (marked_log_intensity,
marked_intensity_itg,
intensity_mask,
artifacts) # [B,T,M], [B,T,M], [B,T], Dict
| 5,709 | 41.61194 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/self_attention_simple_cm.py | import torch as th
from typing import List, Optional, Tuple, Dict
from tpp.models.decoders.base.cumulative import CumulativeDecoder
from tpp.models.base.process import Events
from tpp.pytorch.models import MLP
from tpp.utils.encoding import encoding_size
class SelfAttentionCmDecoder(CumulativeDecoder):
"""A self attention decoder based on the new cumulative sigmoid attention.
Args:
units_mlp: List of hidden layers sizes, including the output size.
activation_mlp: Activation functions. Either a list or a string.
constraint_mlp: Constraint of the network. Either none, nonneg or
softplus.
dropout_mlp: Dropout rates, either a list or a float.
activation_final_mlp: Last activation of the MLP.
units_rnn: Hidden size of the Transformer.
layers_rnn: Number of layers in the Transformer.
num_heads: Number of heads in the Transformer.
activation_rnn: The non-linearity to use for the Transformer.
dropout_rnn: Rate of dropout in the Transformer.
emb_dim: Size of the embeddings (default=2).
temporal_scaling: Scaling parameter for temporal encoding
encoding: Way to encode the events: either times_only, marks_only,
concatenate or temporal_encoding. Defaults to times_only.
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
# MLP
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = None,
activation_final_mlp: Optional[str] = "parametric_softplus",
# Transformer
units_rnn: Optional[int] = 16,
layers_rnn: Optional[int] = 1,
num_heads: Optional[int] = 1,
activation_rnn: Optional[str] = "relu",
dropout_rnn: Optional[float] = 0.,
attn_activation: Optional[str] = "softmax",
# Other params
mc_prop_est: Optional[float] = 1.,
emb_dim: Optional[int] = 4,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
super(SelfAttentionCmDecoder, self).__init__(
name="selfattention-cm",
input_size=encoding_size(encoding=encoding, emb_dim=emb_dim),
mc_prop_est=mc_prop_est,
emb_dim=emb_dim,
temporal_scaling=temporal_scaling,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
self.mlp = MLP(
units=units_mlp,
activations=activation_mlp,
constraint=constraint_mlp,
dropout_rates=dropout_mlp,
input_shape=self.encoding_size,
activation_final=activation_final_mlp)
self.mlp1 = MLP(
units=[1],
activations=None,
constraint="nonneg",
dropout_rates=None,
input_shape=1,
activation_final=None,
use_bias=False)
def cum_intensity(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None,
update_running_stats: Optional[bool] = True
) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the log_intensity and a mask
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
update_running_stats: whether running stats are updated or not.
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
"""
_, events_length, _ = representations.size()
batch_size, query_length = query.size()
event_times = events.get_times(prepend_window=True)
query_representations, intensity_mask = self.get_query_representations(
events=events,
query=query,
prev_times=prev_times,
prev_times_idxs=prev_times_idxs,
pos_delta_mask=pos_delta_mask,
is_event=is_event,
representations=representations,
representations_mask=representations_mask) # [B,T,D], [B,T]
memory_mask = th.arange(
events_length, device=representations.device).repeat(
batch_size, query_length).reshape(
batch_size, query_length, events_length)
items_to_zero = memory_mask <= prev_times_idxs.unsqueeze(-1)
# items_to_zero[:, :, 0] = events.first_event_on_window
# Make sure there is at least one zero in the row
missing_zeros = items_to_zero.sum(-1) == 0
items_to_zero = items_to_zero | missing_zeros.unsqueeze(dim=-1)
items_to_neg_inf = ~items_to_zero
memory_mask = memory_mask.float()
memory_mask = memory_mask.masked_fill(items_to_zero, float(0.))
memory_mask = memory_mask.masked_fill(items_to_neg_inf, float('-inf'))
assert list(memory_mask.size()) == [
batch_size, query_length, events_length]
delta_times = query.unsqueeze(1) - event_times.unsqueeze(-1)
delta_times = delta_times.unsqueeze(-1) # [B,T,L+1,D]
delta_times = self.mlp1(delta_times)
delta_times = delta_times.squeeze(-1) # [B,T,L+1]
attention_coefficients = th.exp(-delta_times) # [B,T,L+1]
# [B,T,D] -> [T,B,D] and [B,L+1,D] -> [L+1,B,D]
tgt = query_representations.transpose(0, 1) # [T,B,D]
tgt = th.zeros_like(tgt)
memory = representations.transpose(0, 1) # [L+1,B,D]
hidden, attn_weights = self.transformer_decoder(
tgt=tgt,
memory=memory,
memory_mask=memory_mask,
tgt_times=tgt_times,
memory_times=memory_times,
prev_times=prev_times_x) # [T,B,hidden_size], [B,T,L]
# [T,B,hidden_size] -> [B,T,hidden_size]
hidden = hidden.transpose(0, 1)
outputs = self.mlp(hidden) # [B,L,output_size]
if artifacts is None:
artifacts = {'decoder': {"attention_weights": attn_weights}}
else:
artifacts['decoder'] = {"attention_weights": attn_weights}
return outputs, intensity_mask, artifacts
| 7,952 | 39.784615 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/mlp_mc.py | import torch as th
import torch.nn.functional as F
from typing import List, Optional, Tuple, Dict
from tpp.models.decoders.base.monte_carlo import MCDecoder
from tpp.models.base.process import Events
from tpp.pytorch.models import MLP
from tpp.utils.encoding import encoding_size
from tpp.utils.index import take_3_by_2
class MLPMCDecoder(MCDecoder):
"""A mlp decoder based on Monte Carlo estimations.
Args:
units_mlp: List of hidden layers sizes, including the output size.
activation_mlp: Activation functions. Either a list or a string.
constraint_mlp: Constraint of the network. Either `None`, nonneg or
softplus.
dropout_mlp: Dropout rates, either a list or a float.
activation_final_mlp: Last activation of the MLP.
mc_prop_est: Proportion of numbers of samples for the MC method,
compared to the size of the input. (Default=1.).
emb_dim: Size of the embeddings (default=2).
temporal_scaling: Scaling parameter for temporal encoding
encoding: Way to encode the events: either times_only, or temporal.
Defaults to times_only.
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
# MLP
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = None,
activation_final_mlp: Optional[str] = "parametric_softplus",
# Other params
mc_prop_est: Optional[float] = 1.,
emb_dim: Optional[int] = 2,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
enc_size = encoding_size(encoding=encoding, emb_dim=emb_dim)
input_size = units_mlp[0] - enc_size
if len(units_mlp) < 2:
raise ValueError("Units of length at least 2 need to be specified")
super(MLPMCDecoder, self).__init__(
name="mlp-mc",
input_size=input_size,
mc_prop_est=mc_prop_est,
emb_dim=emb_dim,
temporal_scaling=temporal_scaling,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
self.mlp = MLP(
units=units_mlp[1:],
activations=activation_mlp,
constraint=constraint_mlp,
dropout_rates=dropout_mlp,
# units_mlp in this class also provides the input dimensionality
# of the mlp
input_shape=units_mlp[0],
activation_final=activation_final_mlp)
def log_intensity(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None
) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the log_intensity and a mask
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
artifacts: Some measures.
"""
(query_representations,
intensity_mask) = self.get_query_representations(
events=events,
query=query,
prev_times=prev_times,
prev_times_idxs=prev_times_idxs,
pos_delta_mask=pos_delta_mask,
is_event=is_event,
representations=representations,
representations_mask=representations_mask) # [B,T,enc_size], [B,T]
history_representations = take_3_by_2(
representations, index=prev_times_idxs) # [B,T,D]
hidden = th.cat(
[query_representations, history_representations],
dim=-1) # [B,T,units_mlp[0]]
outputs = self.mlp(hidden) # [B,T,output_size]
return outputs, intensity_mask, artifacts
| 5,563 | 39.911765 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/self_attention_mc.py | import torch as th
import torch.nn.functional as F
from typing import List, Optional, Tuple, Dict
from tpp.models.decoders.base.monte_carlo import MCDecoder
from tpp.models.base.process import Events
from tpp.pytorch.models import MLP
from tpp.utils.encoding import encoding_size
from tpp.utils.transformer_utils import TransformerDecoderNetwork
from tpp.utils.transformer_utils import TransformerDecoderLayer
class SelfAttentionMCDecoder(MCDecoder):
"""A self attention decoder based on Monte Carlo estimations.
Args:
units_mlp: List of hidden layers sizes, including the output size.
activation_mlp: Activation functions. Either a list or a string.
constraint_mlp: Constraint of the network. Either none, nonneg or
softplus.
dropout_mlp: Dropout rates, either a list or a float.
activation_final_mlp: Last activation of the MLP.
units_rnn: Hidden size of the Transformer.
layers_rnn: Number of layers in the Transformer.
n_heads: Number of heads in the Transformer.
activation_rnn: The non-linearity to use for the Transformer.
dropout_rnn: Rate of dropout in the Transformer.
mc_prop_est: Proportion of numbers of samples for the MC method,
compared to the size of the input. (Default=1.).
emb_dim: Size of the embeddings (default=2).
temporal_scaling: Scaling parameter for temporal encoding
encoding: Way to encode the events: either times_only, marks_only,
concatenate or temporal_encoding. Defaults to times_only.
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
# MLP
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = None,
activation_final_mlp: Optional[str] = "parametric_softplus",
# Transformer
units_rnn: Optional[int] = 16,
layers_rnn: Optional[int] = 1,
n_heads: Optional[int] = 1,
activation_rnn: Optional[str] = "relu",
dropout_rnn: Optional[float] = 0.,
attn_activation: Optional[str] = "softmax",
constraint_rnn: Optional[str] = None,
# Other params
mc_prop_est: Optional[float] = 1.,
emb_dim: Optional[int] = 2,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
super(SelfAttentionMCDecoder, self).__init__(
name="selfattention-mc",
input_size=encoding_size(encoding=encoding, emb_dim=emb_dim),
mc_prop_est=mc_prop_est,
emb_dim=emb_dim,
temporal_scaling=temporal_scaling,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
decoder_layer = TransformerDecoderLayer(
d_model=self.encoding_size,
nhead=n_heads,
dim_feedforward=units_rnn,
dropout=dropout_rnn,
activation=activation_rnn,
attn_activation=attn_activation,
constraint=constraint_rnn,
normalisation="layernorm")
self.transformer_decoder = TransformerDecoderNetwork(
decoder_layer=decoder_layer,
num_layers=layers_rnn)
self.mlp = MLP(
units=units_mlp,
activations=activation_mlp,
constraint=constraint_mlp,
dropout_rates=dropout_mlp,
input_shape=self.encoding_size,
activation_final=activation_final_mlp)
self.n_heads = n_heads
def log_intensity(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None
) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the log_intensity and a mask
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
"""
batch_size, query_length = query.size()
_, events_length, _ = representations.size()
query_representations, intensity_mask = self.get_query_representations(
events=events,
query=query,
prev_times=prev_times,
prev_times_idxs=prev_times_idxs,
pos_delta_mask=pos_delta_mask,
is_event=is_event,
representations=representations,
representations_mask=representations_mask) # [B,T,D], [B,T]
memory_mask = th.arange(
events_length, device=representations.device).repeat(
batch_size, query_length).reshape(
batch_size, query_length, events_length)
items_to_zero = memory_mask <= prev_times_idxs.unsqueeze(-1)
# Make sure there is at least one zero in the row
missing_zeros = items_to_zero.sum(-1) == 0
items_to_zero = items_to_zero | missing_zeros.unsqueeze(-1)
items_to_neg_inf = ~items_to_zero
memory_mask = memory_mask.float()
memory_mask = memory_mask.masked_fill(items_to_zero, float(0.))
memory_mask = memory_mask.masked_fill(items_to_neg_inf, float('-inf'))
if self.n_heads > 1:
memory_mask = memory_mask.repeat(self.n_heads, 1, 1)
assert list(memory_mask.size()) == [
self.n_heads * batch_size, query_length, events_length]
# [B,T,D] -> [T,B,D] and [B,L+1,D] -> [L+1,B,D]
query_representations = query_representations.transpose(0, 1)
representations = representations.transpose(0, 1)
hidden, attn_weights = self.transformer_decoder(
tgt=query_representations,
memory=representations,
memory_mask=memory_mask
) # [T,B,hidden_size], [B,T,L]
# [T,B,hidden_size] -> [B,T,hidden_size]
hidden = hidden.transpose(0, 1)
hidden = F.normalize(hidden, dim=-1, p=2)
outputs = self.mlp(hidden) # [B,L,output_size]
if artifacts is None:
artifacts = {'decoder': {"attention_weights": attn_weights}}
else:
artifacts['decoder'] = {"attention_weights": attn_weights}
return outputs, intensity_mask, artifacts
| 7,944 | 41.945946 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/mlp_cm.py | import torch as th
from typing import List, Optional, Tuple, Dict
from tpp.models.decoders.base.cumulative import CumulativeDecoder
from tpp.models.base.process import Events
from tpp.pytorch.models import MLP
from tpp.utils.encoding import encoding_size
from tpp.utils.index import take_3_by_2
class MLPCmDecoder(CumulativeDecoder):
"""A mlp decoder based on the cumulative approach.
Args:
units_mlp: List of hidden layers sizes, including the output size.
activation_mlp: Activation functions. Either a list or a string.
constraint_mlp: Constraint of the network. Either none, nonneg or
softplus.
dropout_mlp: Dropout rates, either a list or a float.
activation_final_mlp: Last activation of the MLP.
mc_prop_est: Proportion of numbers of samples for the MC method,
compared to the size of the input. (Default=1.).
do_zero_subtraction: If `True` the class computes
Lambda(tau) = Lambda'(tau) - Lambda'(0)
in order to enforce Lambda(0) = 0. Defaults to `True`.
emb_dim: Size of the embeddings (default=2).
encoding: Way to encode the events: either times_only, or temporal.
Defaults to times_only.
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
# MLP
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = "nonneg",
activation_final_mlp: Optional[str] = "parametric_softplus",
# Other params
model_log_cm: Optional[bool] = False,
do_zero_subtraction: Optional[bool] = True,
emb_dim: Optional[int] = 2,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
if constraint_mlp is None:
print("Warning! MLP decoder is unconstrained. Setting to `nonneg`")
constraint_mlp = "nonneg"
enc_size = encoding_size(encoding=encoding, emb_dim=emb_dim)
input_size = units_mlp[0] - enc_size
super(MLPCmDecoder, self).__init__(
name="mlp-cm",
do_zero_subtraction=do_zero_subtraction,
model_log_cm=model_log_cm,
input_size=input_size,
emb_dim=emb_dim,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
self.mlp = MLP(
units=units_mlp[1:],
activations=activation_mlp,
constraint=constraint_mlp,
dropout_rates=dropout_mlp,
# units_mlp in this class also provides the input dimensionality
# of the mlp
input_shape=units_mlp[0],
activation_final=activation_final_mlp)
def cum_intensity(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None,
update_running_stats: Optional[bool] = True
) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the cumulative log intensity and a mask
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
update_running_stats: whether running stats are updated or not.
Returns:
intensity_integral: [B,T,M] The cumulative intensities for each
query time for each mark (class).
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
artifacts: Some measures.
"""
(query_representations,
intensity_mask) = self.get_query_representations(
events=events,
query=query,
prev_times=prev_times,
prev_times_idxs=prev_times_idxs,
pos_delta_mask=pos_delta_mask,
is_event=is_event,
representations=representations,
representations_mask=representations_mask) # [B,T,enc_size], [B,T]
history_representations = take_3_by_2(
representations, index=prev_times_idxs) # [B,T,D]
hidden = th.cat(
[query_representations, history_representations],
dim=-1) # [B,T,units_mlp[0]]
intensity_itg = self.mlp(hidden) # [B,T,output_size]
return intensity_itg, intensity_mask, artifacts
| 5,883 | 40.730496 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/rmtpp_cm.py | import torch as th
import torch.nn as nn
from typing import Dict, Optional, Tuple
from tpp.models.decoders.base.cumulative import CumulativeDecoder
from tpp.utils.events import Events
from tpp.utils.index import take_3_by_2, take_2_by_2
from tpp.utils.stability import epsilon
class RMTPPCmDecoder(CumulativeDecoder):
"""Analytic decoder process, uses a closed form for the intensity
integeral. Has a closed form for the intensity but we compute using the
gradient. This is just a check.
See https://www.kdd.org/kdd2016/papers/files/rpp1081-duA.pdf.
Args:
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
multi_labels: Optional[bool] = False,
model_log_cm: Optional[bool] = False,
do_zero_subtraction: Optional[bool] = True,
marks: Optional[int] = 1,
**kwargs):
super(RMTPPCmDecoder, self).__init__(
name="rmtpp-cm",
do_zero_subtraction=do_zero_subtraction,
model_log_cm=model_log_cm,
input_size=marks+1,
marks=marks)
self.multi_labels = multi_labels
self.w = nn.Parameter(th.Tensor(1))
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.w, b=0.001)
def cum_intensity(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None,
update_running_stats: Optional[bool] = True
) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the cumulative log intensity and a mask
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
update_running_stats: whether running stats are updated or not.
Returns:
intensity_integral: [B,T,M] The cumulative intensities for each
query time for each mark (class).
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
artifacts: Some measures.
"""
query_representations = take_3_by_2(
representations, index=prev_times_idxs) # [B,T,D]
v_h_t = query_representations[:, :, 0] # [B,T]
v_h_m = query_representations[:, :, 1:] # [B,T,M]
w_delta_t = self.w * (query - prev_times) # [B,T]
if self.multi_labels:
p_m = th.sigmoid(v_h_m) # [B,T,M]
else:
p_m = th.softmax(v_h_m, dim=-1) # [B,T,M]
regulariser = epsilon(dtype=p_m.dtype, device=p_m.device)
p_m = p_m + regulariser
intensity_mask = pos_delta_mask # [B,T]
if representations_mask is not None:
history_representations_mask = take_2_by_2(
representations_mask, index=prev_times_idxs) # [B,T]
intensity_mask = intensity_mask * history_representations_mask
exp_1, exp_2 = w_delta_t, v_h_t # [B,T]
# Avoid exponentiating to get masked infinity - seems to induce an
# error in gradient calculationg if we use the numerically stable one.
exp_1, exp_2 = exp_1 * intensity_mask, exp_2 * intensity_mask # [B,T]
if self.model_log_cm:
base_intensity_itg = -th.log(self.w) + exp_2 + th.log(
th.exp(exp_1) - 1. + 1e-30)
else:
base_intensity_itg = th.exp(exp_1 + exp_2) - th.exp(exp_2)
base_intensity_itg = base_intensity_itg / self.w # [B,T]
base_intensity_itg = th.relu(base_intensity_itg)
marked_intensity_itg = base_intensity_itg.unsqueeze(dim=-1) # [B,T,1]
if self.model_log_cm:
marked_intensity_itg = marked_intensity_itg + p_m # [B,T,M]
else:
marked_intensity_itg = marked_intensity_itg * p_m # [B,T,M]
artifacts_decoder = {
"base_intensity_integral": base_intensity_itg,
"mark_probability": p_m}
if artifacts is None:
artifacts = {'decoder': artifacts_decoder}
else:
artifacts['decoder'] = artifacts_decoder
return (marked_intensity_itg,
intensity_mask, artifacts) # [B,T,M], [B,T], Dict
| 5,695 | 42.151515 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/neural_hawkes.py | """
Based on the implementation of Xiao Liu on Jan. 31, 2019.
https://github.com/xiao03/nh
"""
from typing import List, Optional, Tuple, Dict
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from tpp.models.decoders.base.monte_carlo import MCDecoder
from tpp.models.base.process import Events
from tpp.pytorch.layers.log import Log
from tpp.pytorch.models import MLP
from tpp.utils.index import take_3_by_2
from tpp.utils.stability import check_tensor
class NeuralHawkesDecoder(MCDecoder):
"""Continuous time LSTM network with decay function.
"""
def __init__(self,
# RNN args
units_rnn: int,
# MLP args
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = None,
activation_final_mlp: Optional[str] = "parametric_softplus",
# Other params
mc_prop_est: Optional[float] = 1.,
input_size: Optional[int] = None,
emb_dim: Optional[int] = 1,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
super(NeuralHawkesDecoder, self).__init__(
name="neural-hawkes",
mc_prop_est=mc_prop_est,
input_size=input_size,
emb_dim=emb_dim,
temporal_scaling=temporal_scaling,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
# Parameters
self.weight_ih = nn.Parameter(th.Tensor(units_rnn, units_rnn * 7))
self.weight_hh = nn.Parameter(th.Tensor(units_rnn, units_rnn * 7))
self.bias = nn.Parameter(th.Tensor(units_rnn * 7))
self.mlp = MLP(
units=units_mlp,
activations=activation_mlp,
constraint=constraint_mlp,
dropout_rates=dropout_mlp,
input_shape=self.encoding_size,
activation_final=activation_final_mlp)
self.units_rnn = units_rnn
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.weight_ih, b=0.001)
nn.init.uniform_(self.weight_hh, b=0.001)
nn.init.uniform_(self.bias, b=0.001)
def recurrence(self, emb_event_t, h_d_tm1, c_tm1, c_bar_tm1):
gates = (emb_event_t @ self.weight_ih +
h_d_tm1 @ self.weight_hh + self.bias)
# B * 2H
(gate_i,
gate_f,
gate_z,
gate_o,
gate_i_bar,
gate_f_bar,
gate_delta) = th.chunk(gates, 7, -1)
gate_i = th.sigmoid(gate_i)
gate_f = th.sigmoid(gate_f)
gate_z = th.tanh(gate_z)
gate_o = th.sigmoid(gate_o)
gate_i_bar = th.sigmoid(gate_i_bar)
gate_f_bar = th.sigmoid(gate_f_bar)
gate_delta = F.softplus(gate_delta)
c_t = gate_f * c_tm1 + gate_i * gate_z
c_bar_t = gate_f_bar * c_bar_tm1 + gate_i_bar * gate_z
return c_t, c_bar_t, gate_o, gate_delta
@staticmethod
def decay(c_t, c_bar_t, o_t, delta_t, duration_t):
c_d_t = c_bar_t + (c_t - c_bar_t) * \
th.exp(-delta_t * duration_t.view(-1, 1))
h_d_t = o_t * th.tanh(c_d_t)
return c_d_t, h_d_t
def log_intensity(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None
) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the log_intensity and a mask
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
"""
batch_size, query_length = query.size()
query_representations, intensity_mask = self.get_query_representations(
events=events,
query=query,
prev_times=prev_times,
prev_times_idxs=prev_times_idxs,
pos_delta_mask=pos_delta_mask,
is_event=is_event,
representations=representations,
representations_mask=representations_mask) # [B,T,D], [B,T]
history_representations = take_3_by_2(
representations, index=prev_times_idxs)
query = query * intensity_mask
prev_times = prev_times * intensity_mask
h_seq = th.zeros(
query_length,
batch_size,
self.units_rnn,
dtype=th.float,
device=representations.device)
h_d = th.zeros(
batch_size,
self.units_rnn,
dtype=th.float,
device=representations.device)
c_d = th.zeros(
batch_size,
self.units_rnn,
dtype=th.float,
device=representations.device)
c_bar = th.zeros(
batch_size,
self.units_rnn,
dtype=th.float,
device=representations.device)
for t in range(query_length):
c, new_c_bar, o_t, delta_t = self.recurrence(
history_representations[:, t], h_d, c_d, c_bar)
new_c_d, new_h_d = self.decay(
c, new_c_bar, o_t, delta_t, query[:, t] - prev_times[:, t])
mask = intensity_mask[:, t].unsqueeze(-1)
h_d = new_h_d * mask + h_d * (1. - mask)
c_d = new_c_d * mask + c_d * (1. - mask)
c_bar = new_c_bar * mask + c_bar * (1. - mask)
h_seq[t] = h_d
hidden = h_seq.transpose(0, 1)
hidden = F.normalize(hidden, dim=-1, p=2)
outputs = self.mlp(hidden) # [B,L,output_size]
check_tensor(outputs, positive=True, strict=True)
log = Log.apply
outputs = log(outputs)
return outputs, intensity_mask, artifacts
| 7,444 | 36.225 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/base/variable_history.py | import abc
import torch as th
import torch.nn as nn
from typing import Optional
from tpp.models.base.process import Events
from tpp.models.decoders.base.decoder import Decoder
from tpp.pytorch.models import MLP
from tpp.utils.encoding import SinusoidalEncoding
from tpp.utils.encoding import event_encoder
from tpp.utils.encoding import encoding_size
from tpp.utils.index import take_3_by_2, take_2_by_2
class VariableHistoryDecoder(Decoder, abc.ABC):
"""Variable history decoder. Here, the size H depends on the encoding type.
It can be either 1, emb_dim or emb_dim+1.
Args:
name: The name of the decoder class.
input_size: The dimensionality of the input required from the encoder.
Defaults to `None`. This is mainly just for tracking/debugging
ease.
emb_dim: Size of the embeddings. Defaults to 1.
temporal_scaling: Scaling parameter for temporal encoding
encoding: Way to encode the queries: either times_only, marks_only,
concatenate or temporal. Defaults to times_only
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
name: str,
input_size: Optional[int] = None,
emb_dim: Optional[int] = 1,
embedding_constraint: Optional[str] = None,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
super(VariableHistoryDecoder, self).__init__(
name=name,
input_size=input_size,
marks=marks,
**kwargs)
self.emb_dim = emb_dim
self.encoding = encoding
self.time_encoding = time_encoding
self.embedding_constraint = embedding_constraint
self.encoding_size = encoding_size(
encoding=self.encoding, emb_dim=self.emb_dim)
self.embedding = None
if encoding in ["marks_only", "concatenate", "temporal_with_labels",
"learnable_with_labels"]:
self.embedding = MLP(
units=[self.emb_dim],
activations=None,
constraint=self.embedding_constraint,
dropout_rates=0,
input_shape=self.marks,
activation_final=None,
use_bias=False)
self.temporal_enc = None
if encoding in ["temporal", "temporal_with_labels"]:
self.temporal_enc = SinusoidalEncoding(
emb_dim=self.emb_dim, scaling=temporal_scaling)
elif encoding in ["learnable", "learnable_with_labels"]:
self.temporal_enc = MLP(
units=[self.emb_dim],
activations=None,
constraint=self.embedding_constraint,
dropout_rates=0,
input_shape=1,
activation_final=None)
def get_query_representations(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None):
"""Computes the query representations
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
Returns:
query_representations: [B,T,D] Representations of the queries
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
"""
if self.time_encoding == "relative":
query = query - prev_times
labels = events.labels
labels = th.cat(
(th.zeros(
size=(labels.shape[0], 1, labels.shape[-1]),
dtype=labels.dtype, device=labels.device),
labels), dim=1)
query_representations, representations_mask = event_encoder(
times=query,
mask=representations_mask,
encoding=self.encoding,
labels=take_3_by_2(labels, index=prev_times_idxs),
embedding_layer=self.embedding,
temporal_enc=self.temporal_enc) # [B,T,D], [B,T]
intensity_mask = pos_delta_mask # [B,T]
if representations_mask is not None:
history_representations_mask = take_2_by_2(
representations_mask, index=prev_times_idxs) # [B,T]
intensity_mask = intensity_mask * history_representations_mask
return query_representations, intensity_mask # [B,T,D], [B,T]
| 5,694 | 39.105634 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/base/decoder.py | import abc
import torch as th
import torch.nn as nn
from typing import Dict, Optional, Tuple
from tpp.utils.events import Events
class Decoder(nn.Module, abc.ABC):
"""An decoder for a TPP.
Args:
name: The name of the decoder class.
input_size: The dimensionality of the input required from the encoder.
Defaults to `None`. This is mainly just for tracking/debugging
ease.
marks: The distinct number of marks (classes) for the process.
Defaults to 1.
"""
def __init__(
self,
name: str,
input_size: Optional[int] = None,
marks: Optional[int] = 1,
**kwargs):
super(Decoder, self).__init__()
self.name = name
self.input_size = input_size
self.marks = marks
if self.input_size is not None and self.input_size <= 0:
raise ValueError("Representation dimensionality of decoder is 0.")
@abc.abstractmethod
def forward(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
is_event: th.Tensor,
pos_delta_mask: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:
"""Compute the intensities for each query time given event
representations.
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of window start and
each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensity_integrals: [B,T,M] The integral of the intensity from
the most recent event to the query time for each mark.
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
artifacts: A dictionary of whatever else you might want to return.
"""
pass
| 3,146 | 37.851852 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/base/cumulative.py | import abc
import torch as th
from typing import Optional, Tuple, Dict
from tpp.models.base.process import Events
from tpp.models.decoders.base.variable_history import VariableHistoryDecoder
from tpp.pytorch.layers.log import Log
from tpp.utils.stability import epsilon, check_tensor, subtract_exp
class CumulativeDecoder(VariableHistoryDecoder, abc.ABC):
"""Decoder based on Cumulative intensity method. Here, the cumulative
intensity is specified, but its derivative is directly computed
Args:
name: The name of the decoder class.
do_zero_subtraction: If `True` the class computes
Lambda(tau) = Lambda'(tau) - Lambda'(0)
in order to enforce Lambda(0) = 0. Defaults to `True`.
input_size: The dimensionality of the input required from the encoder.
Defaults to `None`. This is mainly just for tracking/debugging
ease.
emb_dim: Size of the embeddings. Defaults to 1.
encoding: Way to encode the queries: either times_only, marks_only,
concatenate or temporal_encoding. Defaults to times_only
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(self,
name: str,
do_zero_subtraction: Optional[bool] = True,
model_log_cm: Optional[bool] = False,
input_size: Optional[int] = None,
emb_dim: Optional[int] = 1,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
super(CumulativeDecoder, self).__init__(
name=name,
input_size=input_size,
emb_dim=emb_dim,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
self.do_zero_subtraction = do_zero_subtraction
self.model_log_cm = model_log_cm
@abc.abstractmethod
def cum_intensity(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None,
update_running_stats: Optional[bool] = True
) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the cumulative log intensity and a mask
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
update_running_stats: whether running stats are updated or not
(optional).
Returns:
intensity_integral: [B,T,M] The cumulative intensities for each
query time for each mark (class).
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
artifacts: Some measures
"""
pass
def forward(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[bool] = None
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:
"""Compute the intensities for each query time given event
representations.
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensity_integrals: [B,T,M] The integral of the intensity from
the most recent event to the query time for each mark.
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
artifacts: Some measures
"""
# Add grads for query to compute derivative
query.requires_grad = True
intensity_integrals_q, intensity_mask_q, artifacts = \
self.cum_intensity(
events=events,
query=query,
prev_times=prev_times,
prev_times_idxs=prev_times_idxs,
pos_delta_mask=pos_delta_mask,
is_event=is_event,
representations=representations,
representations_mask=representations_mask,
artifacts=artifacts,
update_running_stats=False)
# Remove masked values and add epsilon for stability
intensity_integrals_q = \
intensity_integrals_q * intensity_mask_q.unsqueeze(-1)
# Optional zero substraction
if self.do_zero_subtraction:
(intensity_integrals_z, intensity_mask_z,
artifacts_zero) = self.cum_intensity(
events=events,
query=prev_times,
prev_times=prev_times,
prev_times_idxs=prev_times_idxs,
pos_delta_mask=pos_delta_mask,
is_event=is_event,
representations=representations,
representations_mask=representations_mask,
artifacts=artifacts)
intensity_integrals_z = \
intensity_integrals_z * intensity_mask_z.unsqueeze(-1)
intensity_integrals_q = th.clamp(
intensity_integrals_q - intensity_integrals_z, min=0.
) + intensity_integrals_z
intensity_integrals_q = intensity_integrals_q + epsilon(
eps=1e-3,
dtype=intensity_integrals_q.dtype,
device=intensity_integrals_q.device) * query.unsqueeze(-1)
if self.model_log_cm:
intensity_integrals = subtract_exp(
intensity_integrals_q, intensity_integrals_z)
else:
intensity_integrals = \
intensity_integrals_q - intensity_integrals_z
intensity_mask = intensity_mask_q * intensity_mask_z
else:
intensity_integrals_q = intensity_integrals_q + epsilon(
eps=1e-3,
dtype=intensity_integrals_q.dtype,
device=intensity_integrals_q.device) * query.unsqueeze(-1)
intensity_mask = intensity_mask_q
if self.model_log_cm:
intensity_integrals = th.exp(intensity_integrals_q)
else:
intensity_integrals = intensity_integrals_q
check_tensor(intensity_integrals * intensity_mask.unsqueeze(-1),
positive=True)
# Compute derivative of the integral
grad_outputs = th.zeros_like(intensity_integrals_q, requires_grad=True)
grad_inputs = th.autograd.grad(
outputs=intensity_integrals_q,
inputs=query,
grad_outputs=grad_outputs,
retain_graph=True,
create_graph=True)[0]
marked_intensity = th.autograd.grad(
outputs=grad_inputs,
inputs=grad_outputs,
grad_outputs=th.ones_like(grad_inputs),
retain_graph=True,
create_graph=True)[0]
query.requires_grad = False
check_tensor(marked_intensity, positive=True, strict=True)
log = Log.apply
if self.model_log_cm:
marked_log_intensity = \
log(marked_intensity) + intensity_integrals_q
else:
marked_log_intensity = log(marked_intensity)
artifacts_decoder = {
"intensity_integrals": intensity_integrals,
"marked_intensity": marked_intensity,
"marked_log_intensity": marked_log_intensity,
"intensity_mask": intensity_mask}
if artifacts is None:
artifacts = {'decoder': artifacts_decoder}
else:
if 'decoder' in artifacts:
if 'attention_weights' in artifacts['decoder']:
artifacts_decoder['attention_weights'] = \
artifacts['decoder']['attention_weights']
artifacts['decoder'] = artifacts_decoder
return (marked_log_intensity,
intensity_integrals,
intensity_mask,
artifacts) # [B,T,M], [B,T,M], [B,T], Dict
| 10,617 | 41.64257 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/decoders/base/monte_carlo.py | import abc
import torch as th
from typing import Optional, Tuple, Dict
from tpp.models.decoders.base.variable_history import VariableHistoryDecoder
from tpp.models.base.process import Events
from tpp.utils.stability import check_tensor
class MCDecoder(VariableHistoryDecoder, abc.ABC):
"""Decoder based on Monte Carlo method. Here, the intensity is specified,
but its cumulative function is determined by a Monte Carlo estimation.
Args:
name: The name of the decoder class.
mc_prop_est: Proportion of numbers of samples for the MC method,
compared to the size of the input. Defaults to 1.
input_size: The dimensionality of the input required from the encoder.
Defaults to `None`. This is mainly just for tracking/debugging
ease.
emb_dim: Size of the embeddings. Defaults to 1.
temporal_scaling: Scaling parameter for temporal encoding
encoding: Way to encode the queries: either times_only, marks_only,
concatenate or temporal_encoding. Defaults to times_only
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(self,
name: str,
mc_prop_est: Optional[float] = 1.,
input_size: Optional[int] = None,
emb_dim: Optional[int] = 1,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
super(MCDecoder, self).__init__(
name=name,
input_size=input_size,
emb_dim=emb_dim,
temporal_scaling=temporal_scaling,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
self.mc_prop_est = mc_prop_est
@abc.abstractmethod
def log_intensity(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None
) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the log_intensity and a mask
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
"""
pass
def forward(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
pos_delta_mask: th.Tensor,
is_event: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:
"""Compute the intensities for each query time given event
representations.
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensity_integrals: [B,T,M] The integral of the intensity from
the most recent event to the query time for each mark.
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
"""
marked_log_intensity, intensity_mask, artifacts = self.log_intensity(
events=events,
query=query,
prev_times=prev_times,
prev_times_idxs=prev_times_idxs,
pos_delta_mask=pos_delta_mask,
is_event=is_event,
representations=representations,
representations_mask=representations_mask,
artifacts=artifacts) # [B,T,M], [B,T], dict
# Create Monte Carlo samples and sort them
n_est = int(self.mc_prop_est)
mc_times_samples = th.rand(
query.shape[0], query.shape[1], n_est, device=query.device) * \
(query - prev_times).unsqueeze(-1) + prev_times.unsqueeze(-1)
mc_times_samples = th.sort(mc_times_samples, dim=-1).values
mc_times_samples = mc_times_samples.reshape(
mc_times_samples.shape[0], -1) # [B, TxN]
mc_marked_log_intensity, _, _ = self.log_intensity(
events=events,
query=mc_times_samples,
prev_times=th.repeat_interleave(prev_times, n_est, dim=-1),
prev_times_idxs=th.repeat_interleave(
prev_times_idxs, n_est, dim=-1),
pos_delta_mask=th.repeat_interleave(pos_delta_mask, n_est, dim=-1),
is_event=th.repeat_interleave(is_event, n_est, dim=-1),
representations=representations,
representations_mask=representations_mask) # [B,TxN,M]
mc_marked_log_intensity = mc_marked_log_intensity.reshape(
query.shape[0], query.shape[1], n_est, self.marks) # [B,T,N,M]
mc_marked_log_intensity = mc_marked_log_intensity * \
intensity_mask.unsqueeze(-1).unsqueeze(-1) # [B,T,N,M]
marked_intensity_mc = th.exp(mc_marked_log_intensity)
intensity_integrals = (query - prev_times).unsqueeze(-1) * \
marked_intensity_mc.sum(-2) / float(n_est) # [B,T,M]
check_tensor(marked_log_intensity)
check_tensor(intensity_integrals * intensity_mask.unsqueeze(-1),
positive=True)
return (marked_log_intensity,
intensity_integrals,
intensity_mask,
artifacts) # [B,T,M], [B,T,M], [B,T], Dict
| 8,100 | 43.756906 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/encoders/identity.py | import torch as th
from typing import Dict, Optional, Tuple
from tpp.models.encoders.base.variable_history import VariableHistoryEncoder
from tpp.utils.encoding import encoding_size
from tpp.utils.events import Events
class IdentityEncoder(VariableHistoryEncoder):
"""Variable encoder that passes the representations straight to the
decoder, i.e. r(t) = rep(l, t).
Args:
emb_dim: Size of the embeddings. Defaults to 1.
embedding_constraint: Constraint on the weights. Either `None`,
'nonneg' or 'softplus'. Defaults to `None`.
temporal_scaling: Scaling parameter for temporal encoding
padding_id: Id of the padding. Defaults to -1.
encoding: Way to encode the events: either times_only, marks_only,
concatenate or temporal_encoding. Defaults to times_only
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
# Other args
emb_dim: Optional[int] = 1,
embedding_constraint: Optional[str] = None,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
super(IdentityEncoder, self).__init__(
name="identity",
output_size=encoding_size(encoding=encoding, emb_dim=emb_dim),
emb_dim=emb_dim,
embedding_constraint=embedding_constraint,
temporal_scaling=temporal_scaling,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
def forward(self, events: Events) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the (query time independent) event representations.
Args:
events: [B,L] Times and labels of events.
Returns:
representations: [B,L+1,M+1] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined.
"""
histories, histories_mask = self.get_events_representations(
events=events) # [B,L+1,enc] [B,L+1]
return (histories, histories_mask,
dict()) # [B,L+1,D], [B,L+1], Dict
| 2,409 | 37.870968 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/encoders/mlp_variable.py | import torch as th
from typing import Dict, List, Optional, Tuple
from tpp.models.encoders.base.variable_history import VariableHistoryEncoder
from tpp.pytorch.models import MLP
from tpp.utils.events import Events
class MLPVariableEncoder(VariableHistoryEncoder):
"""Variable MLP encoder, i.e. r(t) = MLP(rep(l, t))
Args:
units_mlp: List of hidden layers sizes for MLP.
activations: MLP activation functions. Either a list or a string.
emb_dim: Size of the embeddings. Defaults to 1.
embedding_constraint: Constraint on the weights. Either `None`,
'nonneg' or 'softplus'. Defaults to `None`.
temporal_scaling: Scaling parameter for temporal encoding
padding_id: Id of the padding. Defaults to -1.
encoding: Way to encode the events: either times_only, marks_only,
concatenate or temporal_encoding. Defaults to times_only
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
# MLP args
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = None,
activation_final_mlp: Optional[str] = None,
# Other args
emb_dim: Optional[int] = 1,
embedding_constraint: Optional[str] = None,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
super(MLPVariableEncoder, self).__init__(
name="mlp-variable",
output_size=units_mlp[-1],
emb_dim=emb_dim,
embedding_constraint=embedding_constraint,
temporal_scaling=temporal_scaling,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
self.mlp = MLP(
units=units_mlp,
activations=activation_mlp,
constraint=constraint_mlp,
dropout_rates=dropout_mlp,
input_shape=self.encoding_size,
activation_final=activation_final_mlp)
def forward(self, events: Events) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the (query time independent) event representations.
Args:
events: [B,L] Times and labels of events.
Returns:
representations: [B,L+1,M+1] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined.
"""
histories, histories_mask = self.get_events_representations(
events=events) # [B,L+1,enc] [B,L+1]
representations = self.mlp(histories) # [B,L+1,D]
return (representations, histories_mask,
dict()) # [B,L+1,D], [B,L+1], Dict
| 3,057 | 38.714286 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/encoders/gru.py | from torch import nn
from typing import Optional, List
from tpp.models.encoders.base.recurrent import RecurrentEncoder
from tpp.utils.encoding import encoding_size
class GRUEncoder(RecurrentEncoder):
"""GRU network, based on a variable recurrent encoder.
Args:
units_rnn: Hidden size of the GRU.
layers_rnn: Number of layers in the GRU.
units_mlp: List of hidden layers sizes for MLP.
activations: MLP activation functions. Either a list or a string.
dropout: Dropout rates (shared by MLP and GRU).
activation_final_mlp: Activation of final layer of MLP.
emb_dim: Size of the embeddings. Defaults to 1.
embedding_constraint: Constraint on the weights. Either `None`,
'nonneg' or 'softplus'. Defaults to `None`.
temporal_scaling: Scaling parameter for temporal encoding
encoding: Way to encode the events: either times_only, marks_only,
concatenate or temporal_encoding. Defaults to times_only
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
# RNN args
units_rnn: int,
layers_rnn: int,
dropout_rnn: float,
# MLP args
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = None,
activation_final_mlp: Optional[str] = None,
# Other args
emb_dim: Optional[int] = 1,
embedding_constraint: Optional[str] = None,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
gru = nn.GRU(
# This will become self.encoding_size later whoops.
input_size=encoding_size(encoding=encoding, emb_dim=emb_dim),
hidden_size=units_rnn,
batch_first=True,
bidirectional=False,
dropout=dropout_rnn,
num_layers=layers_rnn)
super(GRUEncoder, self).__init__(
name="gru",
rnn=gru,
units_mlp=units_mlp,
activation=activation_mlp,
dropout_mlp=dropout_mlp,
constraint=constraint_mlp,
activation_final_mlp=activation_final_mlp,
emb_dim=emb_dim,
embedding_constraint=embedding_constraint,
temporal_scaling=temporal_scaling,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
| 2,738 | 38.128571 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/encoders/self_attention.py | import torch as th
import torch.nn.functional as F
from typing import List, Optional, Tuple, Dict
from tpp.models.encoders.base.variable_history import VariableHistoryEncoder
from tpp.pytorch.models import MLP
from tpp.utils.events import Events
from tpp.utils.transformer_utils import TransformerEncoderNetwork
from tpp.utils.transformer_utils import TransformerEncoderLayer
from tpp.utils.transformer_utils import generate_square_subsequent_mask
class SelfAttentionEncoder(VariableHistoryEncoder):
"""Self-attention network, based on a variable history encoder.
Args:
units_mlp: List of hidden layers sizes, including the output size.
activation_mlp: Activation functions. Either a list or a string.
constraint_mlp: Constraint of the network. Either none, nonneg or
softplus.
dropout_mlp: Dropout rates, either a list or a float.
activation_final_mlp: Last activation of the MLP.
units_rnn: Hidden size of the Transformer.
layers_rnn: Number of layers in the Transformer.
n_heads: Number of heads in the Transformer.
activation_rnn: The non-linearity to use for the Transformer.
dropout_rnn: Rate of dropout in the Transformer.
allow_window_attention: If True, attention allows attendence to the
window. False otherwise. Defaults to False,
emb_dim: Size of the embeddings (default=2).
embedding_constraint: Constraint on the weights. Either `None`,
'nonneg' or 'softplus'. Defaults to `None`.
temporal_scaling: Scaling parameter for temporal encoding
encoding: Way to encode the events: either times_only, marks_only,
concatenate or temporal_encoding. Defaults to times_only
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
# MLP
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = None,
activation_final_mlp: Optional[str] = None,
# Transformer
units_rnn: Optional[int] = 16,
layers_rnn: Optional[int] = 1,
n_heads: Optional[int] = 1,
activation_rnn: Optional[str] = "relu",
dropout_rnn: Optional[float] = 0.,
attn_activation: Optional[str] = "softmax",
# Other
allow_window_attention: Optional[bool] = False,
emb_dim: Optional[int] = 2,
embedding_constraint: Optional[str] = None,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
super(SelfAttentionEncoder, self).__init__(
name="selfattention",
output_size=units_mlp[-1],
emb_dim=emb_dim,
embedding_constraint=embedding_constraint,
temporal_scaling=temporal_scaling,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
self.src_mask = None
self.allow_window_attention = allow_window_attention
encoder_layer = TransformerEncoderLayer(
d_model=self.encoding_size,
nhead=n_heads,
dim_feedforward=units_rnn,
dropout=dropout_rnn,
activation=activation_rnn,
attn_activation=attn_activation)
self.transformer_encoder = TransformerEncoderNetwork(
encoder_layer=encoder_layer,
num_layers=layers_rnn)
self.mlp = MLP(
units=units_mlp,
activations=activation_mlp,
constraint=constraint_mlp,
dropout_rates=dropout_mlp,
input_shape=self.encoding_size,
activation_final=activation_final_mlp)
def forward(self, events: Events) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the (query time independent) event representations.
Args:
events: [B,L] Times and labels of events.
Returns:
representations: [B,L+1,M+1] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined.
"""
histories, histories_mask = self.get_events_representations(
events=events) # [B,L+1,D] [B,L+1]
# Compute src_mask
if (self.src_mask is None
or self.src_mask.size(0) != histories.size()[1]):
src_mask = generate_square_subsequent_mask(
sz=histories.size()[1],
device=histories.device)
self.src_mask = src_mask
if not self.allow_window_attention:
self.src_mask[1:, 0] = float('-inf')
# [B,L,D] -> [L,B,D]
histories = histories.transpose(0, 1)
hidden, attn_weights = self.transformer_encoder(
src=histories,
mask=self.src_mask
) # [L,B,hidden_size], [B,L,L]
# [L,B,hidden_size] -> [B,L,hidden_size]
hidden = hidden.transpose(0, 1)
hidden = F.normalize(hidden, dim=-1, p=2)
outputs = self.mlp(hidden) # [B,L,output_size]
artifacts = {'encoder': {"attention_weights": attn_weights}}
return outputs, histories_mask, artifacts
| 5,508 | 38.070922 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/encoders/mlp_fixed.py | from typing import List, Optional
from tpp.models.encoders.base.fixed_history import FixedHistoryEncoder
from tpp.pytorch.models import MLP
class MLPFixedEncoder(FixedHistoryEncoder):
"""MLP network using a fixed history encoder.
Args
units_mlp: List of hidden layers sizes.
activation_mlp: Activation functions. Either a list or a string.
constraint_mlp: Constraint of the network. Either none, nonneg or
softplus.
dropout_mlp: Dropout rates, either a list or a float.
activation_final_mlp: Last activation of the MLP.
history_size: The size of each history.
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = None,
activation_final_mlp: Optional[str] = None,
history_size: Optional[int] = 2,
marks: Optional[int] = 1,
**kwargs):
mlp = MLP(
units=units_mlp,
activations=activation_mlp,
constraint=constraint_mlp,
dropout_rates=dropout_mlp,
input_shape=history_size,
activation_final=activation_final_mlp)
super(MLPFixedEncoder, self).__init__(
name="mlp-fixed",
net=mlp,
output_size=units_mlp[-1],
history_size=history_size,
marks=marks,
**kwargs)
| 1,597 | 34.511111 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/encoders/stub.py | import torch as th
from typing import Dict, Optional, Tuple
from tpp.models.encoders.base.encoder import Encoder
from tpp.utils.events import Events
class StubEncoder(Encoder):
"""An encoder that does nothing. Used for e.g. the Hawkes decoder that
needs no encoding.
Args:
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(self, marks: Optional[int] = 1, **kwargs):
super(StubEncoder, self).__init__(
name="stub", output_size=0, marks=marks, **kwargs)
def forward(self, events: Events) -> Tuple[th.Tensor, th.Tensor, Dict]:
return th.Tensor(), th.Tensor(), dict()
| 691 | 29.086957 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/encoders/base/encoder.py | import abc
import torch as th
import torch.nn as nn
from typing import Dict, Optional, Tuple
from tpp.utils.events import Events
class Encoder(nn.Module, abc.ABC):
"""An encoder for a TPP.
Args:
name: The name of the encoder class.
output_size: The output size (dimensionality) of the representations
formed by the encoder.
marks: The distinct number of marks (classes) for the process.
Defaults to 1.
"""
def __init__(
self,
name: str,
output_size: int,
marks: Optional[int] = 1,
**kwargs):
super(Encoder, self).__init__()
self.name = name
self.marks = marks
self.output_size = output_size
@abc.abstractmethod
def forward(self, events: Events) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the (query time independent) event representations.
Args:
events: [B,L] Times and labels of events.
Returns:
representations: [B,L+1,D] Representations of each event,
including the window start.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined.
artifacts: A dictionary of whatever else you might want to return.
"""
pass
| 1,346 | 26.489796 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/encoders/base/variable_history.py | import abc
import torch as th
import torch.nn as nn
from typing import Optional, Tuple
from tpp.utils.events import Events
from tpp.models.encoders.base.encoder import Encoder
from tpp.pytorch.models import LAYER_CLASSES, MLP
from tpp.utils.history import get_prev_times
from tpp.utils.encoding import SinusoidalEncoding, event_encoder, encoding_size
class VariableHistoryEncoder(Encoder, abc.ABC):
"""Variable history encoder. Here, the size H depends on the encoding type.
It can be either 1, emb_dim or emb_dim+1.
Args:
name: The name of the encoder class.
output_size: The output size (dimensionality) of the representations
formed by the encoder.
emb_dim: Size of the embeddings. Defaults to 1.
embedding_constraint: Constraint on the weights. Either `None`,
'nonneg' or 'softplus'. Defaults to `None`.
temporal_scaling: Scaling parameter for temporal encoding
encoding: Way to encode the events: either times_only, marks_only,
concatenate or temporal_encoding. Defaults to times_only
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
name: str,
output_size: int,
emb_dim: Optional[int] = 1,
embedding_constraint: Optional[str] = None,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
super(VariableHistoryEncoder, self).__init__(
name=name, output_size=output_size, marks=marks, **kwargs)
self.emb_dim = emb_dim
self.encoding = encoding
self.time_encoding = time_encoding
self.embedding_constraint = embedding_constraint
self.encoding_size = encoding_size(
encoding=self.encoding, emb_dim=self.emb_dim)
self.embedding = None
if encoding in ["marks_only", "concatenate", "temporal_with_labels",
"learnable_with_labels"]:
embedding_layer_class = nn.Linear
if self.embedding_constraint is not None:
embedding_layer_class = LAYER_CLASSES[
self.embedding_constraint]
self.embedding = embedding_layer_class(
in_features=self.marks, out_features=self.emb_dim, bias=False)
self.temporal_enc = None
if encoding in ["temporal", "temporal_with_labels"]:
self.temporal_enc = SinusoidalEncoding(
emb_dim=self.emb_dim, scaling=temporal_scaling)
elif encoding in ["learnable", "learnable_with_labels"]:
self.temporal_enc = MLP(
units=[self.emb_dim],
activations=None,
constraint=self.embedding_constraint,
dropout_rates=0,
input_shape=1,
activation_final=None)
def get_events_representations(
self, events: Events) -> Tuple[th.Tensor, th.Tensor]:
"""Compute the history vectors.
Args:
events: [B,L] Times and labels of events.
Returns:
merged_embeddings: [B,L+1,emb_dim] Histories of each event.
histories_mask: [B,L+1] Mask indicating which histories
are well-defined.
"""
times = events.get_times(prepend_window=True) # [B,L+1]
histories_mask = events.get_mask(prepend_window=True) # [B,L+1]
# Creates a delta_t tensor, with first time set to zero
# Masks it and sets masked values to padding id
prev_times, is_event, pos_delta_mask = get_prev_times(
query=times,
events=events,
allow_window=True) # ([B,L+1],[B,L+1]), [B,L+1], [B,L+1]
if self.time_encoding == "relative":
prev_times, prev_times_idxs = prev_times # [B,L+1], [B,L+1]
times = times - prev_times
histories_mask = histories_mask * pos_delta_mask
if self.encoding != "marks_only" and self.time_encoding == "relative":
histories_mask = histories_mask * is_event
labels = events.labels
labels = th.cat(
(th.zeros(
size=(labels.shape[0], 1, labels.shape[-1]),
dtype=labels.dtype, device=labels.device),
labels), dim=1) # [B,L+1,M]
return event_encoder(
times=times,
mask=histories_mask,
encoding=self.encoding,
labels=labels,
embedding_layer=self.embedding,
temporal_enc=self.temporal_enc)
| 4,746 | 37.593496 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/encoders/base/recurrent.py | import torch as th
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, List, Optional, Tuple
from tpp.models.encoders.base.variable_history import VariableHistoryEncoder
from tpp.pytorch.models import MLP
from tpp.utils.events import Events
class RecurrentEncoder(VariableHistoryEncoder):
"""Abstract classes for recurrent encoders. The encoding has a
variable history size.
Args:
name: The name of the encoder class.
rnn: RNN encoder function.
units_mlp: List of hidden layers sizes for MLP.
activations: MLP activation functions. Either a list or a string.
emb_dim: Size of the embeddings. Defaults to 1.
embedding_constraint: Constraint on the weights. Either `None`,
'nonneg' or 'softplus'. Defaults to `None`.
temporal_scaling: Scaling parameter for temporal encoding
padding_id: Id of the padding. Defaults to -1.
encoding: Way to encode the events: either times_only, marks_only,
concatenate or temporal_encoding. Defaults to times_only
marks: The distinct number of marks (classes) for the process. Defaults
to 1.
"""
def __init__(
self,
name: str,
rnn: nn.Module,
# MLP args
units_mlp: List[int],
activation_mlp: Optional[str] = "relu",
dropout_mlp: Optional[float] = 0.,
constraint_mlp: Optional[str] = None,
activation_final_mlp: Optional[str] = None,
# Other args
emb_dim: Optional[int] = 1,
embedding_constraint: Optional[str] = None,
temporal_scaling: Optional[float] = 1.,
encoding: Optional[str] = "times_only",
time_encoding: Optional[str] = "relative",
marks: Optional[int] = 1,
**kwargs):
super(RecurrentEncoder, self).__init__(
name=name,
output_size=units_mlp[-1],
emb_dim=emb_dim,
embedding_constraint=embedding_constraint,
temporal_scaling=temporal_scaling,
encoding=encoding,
time_encoding=time_encoding,
marks=marks,
**kwargs)
self.rnn = rnn
self.mlp = MLP(
units=units_mlp,
activations=activation_mlp,
constraint=constraint_mlp,
dropout_rates=dropout_mlp,
input_shape=self.rnn.hidden_size,
activation_final=activation_final_mlp)
def forward(self, events: Events) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the (query time independent) event representations.
Args:
events: [B,L] Times and labels of events.
Returns:
representations: [B,L+1,M+1] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined.
"""
histories, histories_mask = self.get_events_representations(
events=events) # [B,L+1,D] [B,L+1]
representations, _ = self.rnn(histories)
representations = F.normalize(representations, dim=-1, p=2)
representations = self.mlp(representations) # [B,L+1,M+1]
return (representations, histories_mask,
dict()) # [B,L+1,M+1], [B,L+1], Dict
| 3,414 | 37.370787 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/encoders/base/fixed_history.py | import torch as th
import torch.nn as nn
from typing import Dict, Optional, Tuple
from tpp.utils.events import Events
from tpp.models.encoders.base.encoder import Encoder
from tpp.utils.history import build_histories
class FixedHistoryEncoder(Encoder):
"""A parametric encoder process with a fixed history size representation.
Args
name: The name of the encoder class.
net: The network used to encode the history.
history_size: The size of each history.
output_size: The output size (dimensionality) of the representations
formed by the encoder.
marks: The distinct number of marks (classes) for the process.
Defaults to 1.
"""
def __init__(
self,
name: str,
net: nn.Module,
output_size: int,
history_size: Optional[int] = 2,
marks: Optional[int] = 1,
**kwargs):
super(FixedHistoryEncoder, self).__init__(
name=name, output_size=output_size, marks=marks, **kwargs)
self.net = net
self.history_size = history_size
def get_history_representations(
self, events: Events) -> Tuple[th.Tensor, th.Tensor]:
"""Compute the history vectors.
Args:
events: [B,L] Times and labels of events.
Returns:
histories: [B,L+1,H] Histories of each event.
histories_mask: [B,L+1] Mask indicating which histories
are well-defined.
"""
histories = events.times.unsqueeze(dim=-1) # [B,L,1]
histories_mask = events.mask # [B,L]
batch_size, _ = histories_mask.shape
if self.history_size > 1:
h_prev, h_prev_mask = build_histories(
query=events.times, events=events,
history_size=self.history_size - 1) # [B,L,H-1], [B,L]
histories = th.cat([h_prev, histories], dim=-1) # [B,L,H]
histories_mask = histories_mask * h_prev_mask # [B,L]
# Add on a masked history for the window start representation
window_history = th.zeros(
[batch_size, 1, self.history_size],
dtype=histories.dtype,
device=histories.device)
histories = th.cat([window_history, histories], dim=1) # [B,L+1,H]
window_mask = th.zeros(
[batch_size, 1],
dtype=histories_mask.dtype,
device=histories.device)
histories_mask = th.cat(
[window_mask, histories_mask], dim=1) # [B,L+1]
return histories, histories_mask # [B,L+1,H], [B,L+1]
def forward(self, events: Events) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the (query time independent) event representations.
Args:
events: [B,L] Times and labels of events.
Returns:
representations: [B,L+1,M+1] Representations of each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined.
"""
histories, histories_mask = self.get_history_representations(
events=events) # [B,L+1,H] [B,L+1]
representations = self.net(histories) # [B,L+1,M+1]
return (representations,
histories_mask, dict()) # [B,L+1,M+1], [B,L+1], Dict
| 3,415 | 36.130435 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/base/modular.py | import torch as th
import torch.nn as nn
import torch.nn.functional as F
from tpp.utils.events import Events
from typing import Dict, Optional, Tuple
from tpp.models.base.enc_dec import EncDecProcess
class ModularProcess(EncDecProcess):
"""Build a process out of multiple process instances.
Args:
processes: A list of instances of EncDecProcess with the same number
of marks.
use_coefficients: If `True` puts a learnable positive coefficient in
front of each process, i.e. lambda = sum_i alpha_i lambda_i.
Defaults to `False`.
"""
def __init__(
self,
processes: Dict[str, EncDecProcess],
use_coefficients: Optional[bool] = False,
multi_labels: Optional[bool] = False,
**kwargs):
name = '_'.join([p.name for p in processes.values()])
marks = {p.marks for p in processes.values()}
if len(marks) > 1:
raise ValueError("The number of independent marks ({}) is {}. It "
"should be 1.".format(marks, len(marks)))
marks = list(marks)[0]
super(ModularProcess, self).__init__(
name=name, marks=marks,
encoder=None, decoder=None, multi_labels=multi_labels, **kwargs)
self.processes = processes
for k, p in self.processes.items():
self.add_module(k, p)
self.n_processes = len(processes)
self.use_coefficients = use_coefficients
if self.use_coefficients:
self.alpha = nn.Parameter(th.Tensor(self.n_processes))
self.reset_parameters()
def reset_parameters(self):
if "poisson" in self.processes:
init_constant = [
1. if x == "poisson" else -2. for x in self.processes]
init_constant = th.Tensor(init_constant).to(
self.alpha.device).type(
self.alpha.dtype)
self.alpha.data = init_constant
else:
nn.init.uniform_(self.alpha)
def artifacts(
self, query: th.Tensor, events: Events
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:
"""Compute the (log) intensities and intensity integrals at query times
given events.
Args:
query: [B,T] Sequences of query times to evaluate the intensity
function.
events: [B,L] Times and labels of events.
Returns:
log_intensity: [B,T,M] The log intensities for each query time for
each mark (class).
intensity_integral: [B,T,M] The integral of the intensity from
the most recent event to the query time for each mark.
intensities_mask: [B,T,M] Which intensities are valid for further
computation based on e.g. sufficient history available.
artifacts: A dictionary of whatever else you might want to return.
"""
# [B,T,M], [B,T,M], [B,T], Dict
artifacts = {
k: p.artifacts(query=query, events=events)
for k, p in self.processes.items()}
log_intensity, intensity_integral, intensity_mask, _ = zip(
*artifacts.values())
log_intensity = th.stack(log_intensity, dim=0) # [P,B,T,M]
intensities_mask = th.stack(intensity_mask, dim=0) # [P,B,T,M]
intensity_integral = th.stack(intensity_integral, dim=0) # [P,B,T,M]
if self.use_coefficients:
alpha = self.alpha.reshape(-1, 1, 1, 1) # [P,1,1,1]
alpha = F.softmax(alpha, dim=0)
log_alpha = th.log(alpha)
log_intensity = log_intensity + log_alpha # [P,B,T,M]
intensity_integral = alpha * intensity_integral # [P,B,T,M]
log_intensity = th.logsumexp(log_intensity, dim=0) # [B,T,M]
intensity_integral = th.sum(intensity_integral, dim=0) # [B,T,M]
intensities_mask = th.prod(intensities_mask, dim=0) # [B,T,M]
artifacts = {k: v[-1] for k, v in artifacts.items()}
return log_intensity, intensity_integral, intensities_mask, artifacts
def encode(self, **kwargs):
pass
def decode(self, **kwargs):
pass
| 4,297 | 37.035398 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/base/enc_dec.py | import torch as th
from typing import Dict, Optional, Tuple
from tpp.models.decoders.base.decoder import Decoder
from tpp.models.encoders.base.encoder import Encoder
from tpp.models.base.process import Process
from tpp.utils.events import Events
from tpp.utils.history_bst import get_prev_times
from tpp.utils.index import take_2_by_1
from tpp.utils.logical import xor
from tpp.utils.stability import epsilon
class EncDecProcess(Process):
"""A parametric encoder decoder process.
Args
encoder: The encoder.
decoder: The decoder.
"""
def __init__(self,
encoder: Encoder,
decoder: Decoder,
multi_labels: Optional[bool] = False,
**kwargs):
# TODO: Fix this hack that allows modular to work.
if encoder is not None:
assert encoder.marks == decoder.marks
name = '_'.join([encoder.name, decoder.name])
marks = encoder.marks
if decoder.input_size is not None:
assert encoder.output_size == decoder.input_size
else:
name = kwargs.pop("name")
marks = kwargs.pop("marks")
super(EncDecProcess, self).__init__(name=name, marks=marks, **kwargs)
self.encoder = encoder
self.decoder = decoder
self.multi_labels = multi_labels
if self.encoder is not None:
self.enc_dec_hidden_size = self.encoder.output_size
def intensity(
self, query: th.Tensor, events: Events
) -> Tuple[th.Tensor, th.Tensor]:
"""Compute the intensities at query times given events.
Args:
query: [B,T] Sequences of query times to evaluate the intensity
function.
events: [B,L] Times and labels of events.
Returns:
intensity: [B,T,M] The intensities for each query time for each
mark (class).
intensity_mask: [B,T,M] Which intensities are valid for further
computation based on e.g. sufficient history available.
"""
log_intensity, _, intensity_mask, _ = self.artifacts(
query=query, events=events)
return th.exp(log_intensity), intensity_mask
def log_density(
self, query: th.Tensor, events: Events
) -> Tuple[th.Tensor, th.Tensor]:
"""Compute the log densities at query times given events.
Args:
query: [B,T] Sequences of query times to evaluate the intensity
function.
events: [B,L] Times and labels of events.
Returns:
log_density: [B,T,M] The densities for each query time for each
mark (class).
density_mask: [B,T,M] Which intensities are valid for further
computation based on e.g. sufficient history available.
"""
# TODO: Intensity integral should be summed over marks.
log_intensity, intensity_integral, intensity_mask, _ = self.artifacts(
query=query, events=events)
log_density = log_intensity - intensity_integral.sum(-1).unsqueeze(-1)
return log_density, intensity_mask
def neg_log_likelihood(
self, events: Events) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the negative log likelihood of events.
Args:
events: [B,L] Times and labels of events.
Returns:
nll: [B] The negative log likelihoods for each sequence.
nll_mask: [B] Which neg_log_likelihoods are valid for further
computation based on e.g. at least one element in sequence has
a contribution.
artifacts: Other useful items, e.g. the relevant window of the
sequence.
"""
events_times = events.get_times(postpend_window=True) # [B,L+1]
log_intensity, intensity_integral, intensity_mask, _ = self.artifacts(
query=events_times, events=events) # [B,L+1,M], [B,L+1,M], [B,L+1]
# For the interval normalisation
shift = 1. + th.max(events_times) - th.min(events_times)
shifted_events = events_times + (1 - intensity_mask) * shift
interval_start_idx = th.min(shifted_events, dim=-1).indices
interval_start_times = events.get_times(prepend_window=True)
interval_start_times = take_2_by_1(
interval_start_times, index=interval_start_idx)
interval_end_idx = th.max(events_times, dim=-1).indices
interval_end_times = take_2_by_1(
events_times, index=interval_end_idx)
interval = interval_end_times - interval_start_times
artifacts = {
"interval_start_times": interval_start_times,
"interval_end_times": interval_end_times,
"interval": interval}
log_intensity = log_intensity[:, :-1, :] # [B,L,M]
intensity_integral = th.sum(intensity_integral, dim=-1) # [B,L+1]
window_integral = intensity_integral[:, -1] # [B]
intensity_integral = intensity_integral[:, :-1] # [B,L]
window_intensity_mask = intensity_mask[:, -1] # [B]
intensity_mask = intensity_mask[:, :-1] # [B,L]
labels = events.labels # [B,L,M]
log_density = (log_intensity
- intensity_integral.unsqueeze(dim=-1)) # [B,L,M]
log_density = log_density * intensity_mask.unsqueeze(dim=-1) # [B,L,M]
true_log_density = log_density * labels # [B,L,M]
true_log_density_flat = true_log_density.reshape(
true_log_density.shape[0], -1) # [B,L*M]
log_likelihood = th.sum(true_log_density_flat, dim=-1) # [B]
if self.multi_labels:
eps = epsilon(dtype=log_density.dtype, device=log_density.device)
log_density = th.clamp(log_density, max=-eps)
one_min_density = 1. - th.exp(log_density) + eps # [B,L,M]
log_one_min_density = th.log(one_min_density) # [B,L,M]
log_one_min_density = (log_one_min_density *
intensity_mask.unsqueeze(dim=-1))
one_min_true_log_density = (1. - labels) * log_one_min_density
one_min_true_log_density_flat = one_min_true_log_density.reshape(
one_min_true_log_density.shape[0], -1) # [B,L*M]
log_likelihood = log_likelihood + th.sum(
one_min_true_log_density_flat, dim=-1) # [B]
add_window_integral = 1 - events.final_event_on_window.type(
log_likelihood.dtype) # [B]
window_integral = window_integral * add_window_integral # [B]
log_likelihood = log_likelihood - window_integral
nll = - log_likelihood
nll_mask = th.sum(intensity_mask, dim=-1) # [B]
nll_mask = (nll_mask > 0.).type(nll.dtype) # [B]
defined_window_integral = window_intensity_mask * add_window_integral
no_window_integral = 1 - add_window_integral # [B]
window_mask = xor(defined_window_integral, no_window_integral) # [B]
nll_mask = nll_mask * window_mask
return nll, nll_mask, artifacts
def artifacts(
self, query: th.Tensor, events: Events
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:
"""Compute the (log) intensities and intensity integrals at query times
given events.
Args:
query: [B,T] Sequences of query times to evaluate the intensity
function.
events: [B,L] Times and labels of events.
Returns:
log_intensity: [B,T,M] The log intensities for each query time for
each mark (class).
intensity_integrals: [B,T,M] The integral of the intensity from
the most recent event to the query time for each mark.
intensities_mask: [B,T,M] Which intensities are valid for further
computation based on e.g. sufficient history available.
artifacts: A dictionary of whatever else you might want to return.
"""
representations, representations_mask, artifacts = self.encode(
events=events) # [B,L+1,D] [B,L+1], Dict
prev_times, is_event, pos_delta_mask = get_prev_times(
query=query,
events=events,
allow_window=True) # ([B,T],[B,T]), [B,T], [B,T]
prev_times, prev_times_idxs = prev_times # [B,T], [B,T]
return self.decode(
events=events,
query=query,
prev_times=prev_times,
prev_times_idxs=prev_times_idxs,
is_event=is_event,
pos_delta_mask=pos_delta_mask,
representations=representations,
representations_mask=representations_mask,
artifacts=artifacts)
def encode(self, events: Events) -> Tuple[th.Tensor, th.Tensor, Dict]:
return self.encoder(events=events)
def decode(
self,
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
prev_times_idxs: th.Tensor,
is_event: th.Tensor,
pos_delta_mask: th.Tensor,
representations: th.Tensor,
representations_mask: Optional[th.Tensor] = None,
artifacts: Optional[dict] = None
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:
"""Compute the intensities for each query time given event
representations.
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
prev_times_idxs: [B,T] Indexes of times of events directly
preceding queries. These indexes are of window-prepended
events.
pos_delta_mask: [B,T] A mask indicating if the time difference
`query - prev_times` is strictly positive.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
representations: [B,L+1,D] Representations of window start and
each event.
representations_mask: [B,L+1] Mask indicating which representations
are well-defined. If `None`, there is no mask. Defaults to
`None`.
artifacts: A dictionary of whatever else you might want to return.
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensity_integrals: [B,T,M] The integral of the intensity from
the most recent event to the query time for each mark.
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
artifacts: A dictionary of whatever else you might want to return.
"""
return self.decoder(
events=events,
query=query,
prev_times=prev_times,
prev_times_idxs=prev_times_idxs,
is_event=is_event,
pos_delta_mask=pos_delta_mask,
representations=representations,
representations_mask=representations_mask,
artifacts=artifacts)
| 11,732 | 40.903571 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/models/base/process.py | import abc
import torch as th
import torch.nn as nn
from typing import Dict, Optional, Tuple
from tpp.utils.events import Events
class Process(nn.Module):
def __init__(self, name: str, marks: Optional[int] = 1, **kwargs):
"""A parametric process.
Args:
name: The name of the process.
marks: The distinct number of marks (classes) for the process.
Defaults to 1.
"""
super(Process, self).__init__()
self.name = name
self.marks = marks
@abc.abstractmethod
def intensity(
self, query: th.Tensor, events: Events
) -> Tuple[th.Tensor, th.Tensor]:
"""Compute the intensities at query times given events.
Args:
query: [B,T] Sequences of query times to evaluate the intensity
function.
events: [B,L] Times and labels of events.
Returns:
intensities: [B,T,M] The intensities for each query time for each
mark (class).
intensity_mask: [B,T,M] Which intensities are valid for further
computation based on e.g. sufficient history available.
"""
pass
@abc.abstractmethod
def neg_log_likelihood(
self, events: Events) -> Tuple[th.Tensor, th.Tensor, Dict]:
"""Compute the negative log likelihood of events.
Args:
events: [B,L] Times and labels of events.
Returns:
nll: [B] The negative log likelihoods for each sequence.
nll_mask: [B] Which neg_log_likelihoods are valid for further
computation based on e.g. at least one element in sequence has
a contribution.
artifacts: Other useful quantities.
"""
pass
| 1,777 | 28.633333 | 78 | py |
neuralTPPs | neuralTPPs-master/tpp/processes/hawkes_fast.py | import torch as th
from typing import Dict, Optional, Tuple
# from tpp.processes.hawkes.r_terms import get_r_terms as get_r_terms
# from tpp.processes.hawkes.r_terms_recursive import get_r_terms
from tpp.processes.hawkes.r_terms_recursive_v import get_r_terms
from tpp.utils.events import Events
from tpp.utils.history_bst import get_prev_times
from tpp.utils.index import take_3_by_2, take_2_by_2
def decoder_fast(
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
is_event: th.Tensor,
alpha: th.Tensor,
beta: th.Tensor,
mu: th.Tensor,
marks: Optional[int] = 1
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:
"""Compute the intensities for each query time given event
representations.
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
alpha: [M,M] The alpha matrix for the Hawkes process.
beta: [M,M] The beta matrix for the Hawkes process.
mu: [M,M] The mu vector for the Hawkes process.
marks: The number of marks for the process (default=1).
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensity_integrals: [B,T,M] The integral of the intensity from
the most recent event to the query time for each mark.
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
"""
(batch_size, t), seq_len = query.shape, events.times.shape[-1]
# term_2 = sum_n alpha_m_n exp (-beta_m_n t-t_i_m) (R_m_n_i + 1)
((prev_times, prev_times_idxs),
is_event, mask) = get_prev_times(
query=query, events=events, allow_window=True) # [B,T]
events_labels = th.argmax(events.labels, dim=-1).long() # [B,L]
prev_labels = take_2_by_2(
events_labels, index=prev_times_idxs - 1) # [B,T]
r_terms = get_r_terms(events=events, beta=beta) # [B,L,M,N]
window_r_term = th.zeros(
size=(batch_size, 1, marks, marks),
dtype=r_terms.dtype,
device=r_terms.device)
r_terms = th.cat([window_r_term, r_terms], dim=1) # [B,L+1,M,N]
r_terms_query = r_terms.reshape(
batch_size, seq_len + 1, -1) # [B,L+1,M*N]
r_terms_query = take_3_by_2(
r_terms_query, index=prev_times_idxs) # [B,T,M*N]
r_terms_query = r_terms_query.reshape(
batch_size, t, marks, marks) # [B,T,M,N]
delta_t = query - prev_times # [B,T]
# Compute exp( -beta_mn * (t - t_-) )
arg = delta_t.unsqueeze(dim=-1).unsqueeze(dim=-1) # [B,T,1,1]
arg = - beta.reshape(1, 1, marks, marks) * arg # [B,T,M,N]
exp_mask = is_event * mask # [B,T]
exp_mask = exp_mask.unsqueeze(dim=-1).unsqueeze(dim=-1) # [B,T,1,1]
arg = arg * exp_mask # [B,T,M,N]
exp = th.exp(arg) # [B,T,M,N]
exp_mask = is_event.unsqueeze(dim=-1).unsqueeze(dim=-1) # [B,T,1,1]
exp = exp * exp_mask # [B,T,M,N]
mark_range = th.arange(
end=marks, device=events_labels.device, dtype=events_labels.dtype)
ones = (prev_labels.unsqueeze(dim=-1) ==
mark_range.unsqueeze(dim=0).unsqueeze(dim=0)) # [B,T,M]
ones = ones.unsqueeze(dim=2).repeat(1, 1, marks, 1) # [B,T,M,N]
ones = ones.type(exp.dtype)
r_terms_plus_one = r_terms_query + ones
exp_intensity = exp * r_terms_plus_one
exp_intensity = alpha.reshape(1, 1, marks, marks) * exp_intensity
exp_intensity = th.sum(exp_intensity, dim=-1)
intensity = mu.reshape(1, 1, marks) + exp_intensity # [B,T,M]
log_intensity = th.log(intensity) # [B,T,M]
intensity_integral = 1 - exp # [B,T,M,N]
intensity_integral = intensity_integral * exp_mask # [B,T,M,N]
intensity_integral = alpha.reshape(
1, 1, marks, marks) * intensity_integral # [B,T,M,N]
intensity_integral = intensity_integral / beta.reshape(
1, 1, marks, marks) # [B,T,M,N]
intensity_integral = intensity_integral * r_terms_plus_one # [B,T,M,N]
intensity_integral = th.sum(intensity_integral, dim=-1) # [B,T,M]
# term_4 = mu (t - t_i) ti < t
term_4 = delta_t.unsqueeze(dim=-1) * mu.reshape(1, 1, marks) # [B,T,M]
intensity_integral = intensity_integral + term_4 # [B,T,M]
intensities_mask = events.within_window(query) # [B,T]
return log_intensity, intensity_integral, intensities_mask, dict()
| 5,271 | 44.059829 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/processes/hawkes.py | import torch as th
from torch.nn.functional import relu
def intensity_at_t(mu, alpha, sequences_padded, mask, t):
"""Finds the hawkes intensity:
mu + alpha * sum( np.exp(-(t-s)) for s in points if s<=t )
Args:
mu: float
alpha: float
sequences_padded: 2d numpy array
mask: 2d numpy array
t: float
Returns:
1d numpy array
"""
history_mask = (sequences_padded < t).type(sequences_padded.dtype)
combined_mask = history_mask * mask
intensities = sequences_padded - t
intensities = _zero_positives(intensities)
intensities = alpha * th.exp(intensities)
intensities = combined_mask * intensities
intensities = th.sum(intensities, dim=-1)
intensities += mu
return intensities
def _zero_positives(x):
return - relu(-x)
def intensity_at_times(mu, alpha, sequences_padded, times, sequence_mask=None):
"""Finds the hawkes intensity:
mu + alpha * sum( np.exp(-(t-s)) for s in points if s<=t )
Args:
mu: float
alpha: float
sequences_padded: 2d numpy array B x L
sequence_mask: 2d numpy array B x L
times: 2d numpy array B x T
Returns:
2d numpy array B x T
"""
if sequence_mask is None:
sequence_mask = th.ones_like(
sequences_padded, device=sequences_padded.device)
sequences_padded = sequences_padded.unsqueeze(dim=1) # B x 1 x L
sequence_mask = sequence_mask.unsqueeze(dim=1) # B x 1 x L
times = times.unsqueeze(dim=-1) # B x T x 1
history_mask = (sequences_padded < times).type(times.dtype) # B x T x L
history_mask = history_mask.float()
combined_mask = history_mask * sequence_mask # B x T x L
intensities = sequences_padded - times # B x T x L
# We only care about when the quantity above is negative
intensities = _zero_positives(intensities)
intensities = alpha * th.exp(intensities) # B x T x L
intensities = combined_mask * intensities # B x T x L
intensities = th.sum(intensities, dim=-1) # B x T
intensities += mu # B x T
return intensities
def neg_log_likelihood(mu, alpha, sequences_padded, sequence_mask, window):
"""Find the nll:
Args:
mu: float
alpha: float
sequences_padded: 2d numpy array B x L
sequence_mask: 2d numpy array B x L
window: float
Returns:
1d numpy array B
"""
if sequence_mask is None:
sequence_mask = th.ones_like(
sequences_padded, device=sequences_padded.device)
intensities = intensity_at_times(
mu=mu, alpha=alpha,
sequences_padded=sequences_padded,
sequence_mask=sequence_mask,
times=sequences_padded
) # B x L
intensities = th.log(intensities)
intensities *= sequence_mask
intensities = th.sum(intensities, dim=-1) # B
seq_lens = th.sum(sequence_mask, dim=-1) # B
exp_term = sequences_padded - window # B x L
exp_term = th.exp(exp_term)
exp_term *= sequence_mask
exp_term = alpha * th.sum(exp_term, dim=-1) # B
return - intensities + window * mu + alpha * seq_lens - exp_term # B
def intensity_old(mu, alpha, points, t):
"""Finds the hawkes intensity:
mu + alpha * sum( np.exp(-(t-s)) for s in points if s<=t )
"""
p = points[points < t]
p = th.exp(p - t) * alpha
return mu + th.sum(p)
def neg_log_likelihood_old(mu, alpha, points, window):
intensities = sum(
[th.log(intensity_old(mu, alpha, points, point)) for
point in points])
n_points = len(points)
neg_log_l = (
- intensities
+ window * mu
+ alpha * n_points
- alpha * sum(th.exp(points - window))
)
return neg_log_l
| 3,827 | 27.355556 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/processes/hawkes_slow.py | import torch as th
from typing import Dict, Optional, Tuple
import tpp.utils.batch as bu
from tpp.utils.events import Events
def decoder_slow(
events: Events,
query: th.Tensor,
prev_times: th.Tensor,
is_event: th.Tensor,
alpha: th.Tensor,
beta: th.Tensor,
mu: th.Tensor,
marks: Optional[int] = 1
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, Dict]:
"""Compute the intensities for each query time given event
representations.
Args:
events: [B,L] Times and labels of events.
query: [B,T] Times to evaluate the intensity function.
prev_times: [B,T] Times of events directly preceding queries.
is_event: [B,T] A mask indicating whether the time given by
`prev_times_idxs` corresponds to an event or not (a 1 indicates
an event and a 0 indicates a window boundary).
alpha: [M,M] The alpha matrix for the Hawkes process.
beta: [M,M] The beta matrix for the Hawkes process.
mu: [M,M] The mu vector for the Hawkes process.
marks: The number of marks for the process (default=1).
Returns:
log_intensity: [B,T,M] The intensities for each query time for
each mark (class).
intensity_integrals: [B,T,M] The integral of the intensity from
the most recent event to the query time for each mark.
intensities_mask: [B,T] Which intensities are valid for further
computation based on e.g. sufficient history available.
"""
# TODO: Work mainly in log space, rather than exponentiating
(b, t), seq_len, m = query.shape, events.times.shape[-1], marks
mark_vocab = th.arange(marks, device=events.labels.device) # [M]
mark_vocab_r = mark_vocab.reshape(1, 1, m) # [1,1,M]
# The argmax step is probably redundant due to the mark index that follows
events_labels = th.argmax(events.labels, dim=-1).long() # [B,L]
mark_index = events_labels.unsqueeze(dim=-1) == mark_vocab_r # [B,L,M]
mark_index = mark_index.type(events.times.dtype) # [B,L,M]
# Alpha, beta and gamma=alpha/beta coefficients for each point in
# history affecting marks of all types
al_event = th.matmul(mark_index, alpha.transpose(1, 0)) # [B,L,M]
be_event = th.matmul(mark_index, beta.transpose(1, 0)) # [B,L,M]
# Compute query time dependent terms
be_event_r = be_event.reshape(b, 1, seq_len, m) # [B,1,L,M]
# Double masked so that we don't end up with infinities after the
# exponential that we then mask
def get_masked_exp(times, allow_equal_times=False):
arg = bu.batchwise_difference(times, events.times) # [B,T,L]
if allow_equal_times:
mask = (arg >= 0).type(arg.dtype) # [B,T,L]
else:
mask = (arg > 0).type(arg.dtype) # [B,T,L]
mask = mask * is_event.unsqueeze(dim=-1) # [B,T,L]
mask = mask * events.mask.unsqueeze(dim=1) # [B,T,L]
arg = arg.reshape(b, t, seq_len, 1) # [B,T,L,1]
arg = - be_event_r * arg # [B,T,L,M]
arg = mask.unsqueeze(dim=-1) * arg # [B,T,L,M]
exp = th.exp(arg) # [B,T,L,M]
exp = mask.unsqueeze(dim=-1) * exp # [B,T,L,M]
return exp
# exp_1 = exp( - beta_mn (t - tn) ) tn < t [B,T,L,M]
exp_1_masked = get_masked_exp(times=query) # [B,T,L,M]
# term_1 = sum_n al_mn sum_{tn < t} exp( - beta_mn (t - tn) ) [B,T,M]
# term_2 = sum_n ga_mn sum_{tn < t} exp( - beta_mn (t - tn) ) [B,T,M]
# ga_mn = al_mn / be_mn
al_event_r = al_event.reshape(b, 1, seq_len, m) # [B,1,L,M]
term_1 = exp_1_masked * al_event_r # [B,T,L,M]
term_2 = term_1 / be_event_r # [B,T,L,M]
term_1 = th.sum(term_1, dim=2) # [B,T,M]
term_2 = th.sum(term_2, dim=2) # [B,T,M]
# exp_2 = exp( - beta_mn (ti - tn) ) tn <= ti [B,T,L,M]
exp_2_masked = get_masked_exp(
times=prev_times, allow_equal_times=True) # [B,T,L,M]
# term_3 = sum_n ga_mn sum_{tn < ti} exp( - beta_mn (ti - tn)) [B,T,M]
term_3 = exp_2_masked * al_event_r / be_event_r # [B,T,L,M]
term_3 = th.sum(term_3, dim=2) # [B,T,M]
# term_4 = mu (t - t_i) ti < t
delta_t = query - prev_times # [B,T]
mu_r = mu.reshape(1, 1, marks) # [1,1,M]
term_4 = delta_t.unsqueeze(dim=-1) * mu_r # [B,T,M]
intensity = mu_r + term_1 # [B,T,M]
log_intensity = th.log(intensity) # [B,T,M]
intensity_integral = term_4 + term_3 - term_2 # [B,T,M]
intensities_mask = events.within_window(query) # [B,T]
return log_intensity, intensity_integral, intensities_mask, dict()
| 4,663 | 40.274336 | 78 | py |
neuralTPPs | neuralTPPs-master/tpp/processes/multi_class_dataset.py | import json
import os
import numpy as np
import torch as th
from tick.hawkes import SimuHawkes, HawkesKernelExp
from tqdm import tqdm
from typing import List
from tpp.utils.marked_times import objects_from_events
from tpp.utils.marked_times import pad
from tpp.utils.record import hawkes_seq_to_record
class MultiClassDataset:
"""
MultiClassDataset: Unmarked Multi-class Hawkes dataset.
The intensity function for this process is:
lambda_i(t|tjk) = mu(i) + sum[
alpha(i, j) * sum[exp(t - tjk) for tjk < t] for j in range(n_nodes)
]
where tjk are timestamps of all events of node j
Args
alpha: excitation components of the Hawkes processes
decay: decay components of the Hawes processes
device: device where the generated data is loaded
mu: base components of the Hawkes process
n_processes: number of generated Hawkes processes
padding_id: id of the padded value used to generate the dataset
seed: seed of the process
size: size of the dataset
window: window size of the Hawkes processes
"""
def __init__(self, args, size, seed, name):
self.alpha = args.alpha.astype(np.float64)
self.data_dir = args.data_dir
self.decay = args.beta.astype(np.float64)
self.device = args.device
self.mu = args.mu.astype(np.float64)
self.n_processes = args.marks
self.name = name
self.padding_id = args.padding_id
self.load_from_dir = args.load_from_dir
self.seed = seed
self.size = size
self.window = args.window
self.times_dtype = th.float32
self.labels_dtype = th.float32
self.verbose = args.verbose
if args.load_from_dir is None:
assert len(self.alpha.shape) == 2
assert self.alpha.shape[0] == self.alpha.shape[1]
assert self.alpha.shape[0] == self.n_processes
assert len(self.decay.shape) == 2
assert self.decay.shape[0] == self.decay.shape[1]
assert self.decay.shape[0] == self.n_processes
self.raw_objects = self._build_sequences()
self.times = self.raw_objects["times"]
self.labels = self.raw_objects["labels"]
self.lengths = [len(x) for x in self.times]
self.max_length = max(self.lengths)
self.lengths = th.Tensor(self.lengths).long().to(self.device)
self.build_dict_names(args)
def _build_sequences(self):
events = self.load_data()
if "events" in events[0]:
records = events
events = [r["events"] for r in records]
events = [e for e in events if len(e) > 0]
# times, labels
raw_objects = objects_from_events(
events=events,
marks=self.n_processes,
labels_dtype=self.labels_dtype,
verbose=self.verbose,
device=self.device)
not_empty = [len(x) > 0 for x in raw_objects["times"]]
def keep_not_empty(x):
return [y for y, nonempty in zip(x, not_empty) if nonempty]
return {k: keep_not_empty(v) for k, v in raw_objects.items()}
def __getitem__(self, item):
raw_objects = {k: v[item] for k, v in self.raw_objects.items()}
seq_len = self.lengths[item]
result = {
"raw": raw_objects, "seq_len": seq_len,
"padding_id": self.padding_id}
return result
def __len__(self):
return len(self.times)
@staticmethod
def to_features(batch):
"""
Casts times and events to PyTorch tensors
"""
times = [b["raw"]["times"] for b in batch]
labels = [b["raw"]["labels"] for b in batch]
padding_id = batch[0]["padding_id"]
assert padding_id not in th.cat(times)
padded_times = pad(x=times, value=padding_id) # [B,L]
# Pad with zero, not with padding_id so that the embeddings don't fail.
padded_labels = pad(x=labels, value=0) # [B,L]
features = {"times": padded_times, "labels": padded_labels,
"seq_lens": th.stack([b["seq_len"] for b in batch])}
return features
def load_data(self) -> List:
if self.load_from_dir is not None:
data_dir = os.path.join(self.data_dir, self.load_from_dir)
data_path = os.path.join(data_dir, self.name + ".json")
with open(data_path, "r") as h:
records = json.load(h)
else:
range_size = range(self.size)
if self.verbose:
range_size = tqdm(range_size)
times_marked = [
generate_points(
n_processes=self.n_processes,
mu=self.mu,
alpha=self.alpha,
decay=self.decay,
window=self.window,
seed=self.seed + i) for i in range_size] # D x M x Li
records = [hawkes_seq_to_record(seq) for seq in times_marked]
return records
def build_dict_names(self, args):
if self.load_from_dir is None:
codes_to_names = names_to_codes = {}
for i in range(self.n_processes):
codes_to_names[str(i)] = str(i)
names_to_codes[str(i)] = str(i)
with open(os.path.join(args.save_dir, 'codes_to_int.json'), 'w'
) as fp:
json.dump(codes_to_names, fp)
with open(os.path.join(args.save_dir, 'int_to_codes.json'), 'w'
) as fp:
json.dump(names_to_codes, fp)
with open(os.path.join(args.save_dir, 'int_to_codes_to_plot.json'
), 'w'
) as fp:
json.dump(names_to_codes, fp)
with open(os.path.join(args.save_dir, 'codes_to_names.json'), 'w'
) as fp:
json.dump(codes_to_names, fp)
with open(os.path.join(args.save_dir, 'names_to_codes.json'), 'w'
) as fp:
json.dump(names_to_codes, fp)
with open(os.path.join(args.save_dir, 'int_to_codes.json'), 'w'
) as fp:
json.dump(names_to_codes, fp)
with open(os.path.join(args.save_dir, 'codes_to_int.json'), 'w'
) as fp:
json.dump(names_to_codes, fp)
with open(os.path.join(args.save_dir, 'int_to_codes_to_plot.json'
), 'w') as fp:
json.dump(names_to_codes, fp)
def generate_points(
n_processes,
mu,
alpha,
decay,
window,
seed,
dt=0.01):
"""
Generates points of an marked Hawkes processes using the tick library
"""
hawkes = SimuHawkes(
n_nodes=n_processes,
end_time=window,
verbose=False,
seed=seed)
for i in range(n_processes):
for j in range(n_processes):
hawkes.set_kernel(
i=i, j=j,
kernel=HawkesKernelExp(
intensity=alpha[i][j] / decay[i][j], decay=decay[i][j]))
hawkes.set_baseline(i, mu[i])
hawkes.track_intensity(dt)
hawkes.simulate()
return hawkes.timestamps
| 7,324 | 34.216346 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/processes/hawkes/r_terms.py | import torch as th
from tpp.utils.events import Events
def get_r_terms(events: Events, beta: th.Tensor) -> th.Tensor:
"""
R_{m,n}(i)=sum_{j: t_j^n < t_i^m} exp(- beta_{m,n} (t_i^m - t_j^n))
Returns:
[B,Li,M,N] The R term for each event. Note, these are only defined when
there are actually events. See `events.mask` for this information.
"""
(batch_size, seq_len), marks = events.times.shape, beta.shape[0]
r_terms = th.zeros(
[batch_size, seq_len, marks, marks],
dtype=events.times.dtype, device=events.times.device)
for b in range(batch_size):
times, labels, mask = (events.times[b], events.labels[b],
events.mask[b]) # [L]
for i, (ti, mi) in enumerate(zip(times, mask)):
for tin, label, mn in zip(times, labels, mask):
if tin < ti:
delta_t = ti - tin
beta_m = beta[:, label] # [M]
arg = - beta_m * delta_t # [M]
exp = th.exp(arg) # [M]
exp = exp * mi * mn
r_terms[b, i, :, label] = r_terms[b, i, :, label] + exp
return r_terms
| 1,208 | 34.558824 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/processes/hawkes/r_terms_recursive_v.py | import time
import torch as th
from tpp.utils.events import Events
def get_r_terms(events: Events, beta: th.Tensor) -> th.Tensor:
"""
R_{m,n}(i)=sum_{j: t_j^n < t_i^m} exp(- beta_{m,n} (t_i^m - t_j^n))
Computed using the recursive definition
R_{m,n}(i) = term_1 + term_2
R_{m,n}(1) = 0
term_1
exp(- beta_{m,n} (t_i^m - t_{i-1}^m)) * R_{m,n}(i - 1)
term_2
sum_{j: t_{i-1}^m <= t_j^n <t_i^m} exp(- beta_{m,n} (t_i^m - t_j^n))
Returns:
[B,L,M,N] The R term for each event. Note, these are only defined when
there are actually events. See `events.mask` for this information.
"""
marks, batch_size = events.marks, events.batch_size()
times, mask = events.times, events.mask # [B,L]
labels = th.argmax(events.labels, dim=-1) # [B,L]
seq_len = times.shape[-1]
ti, tim1, lim1 = times[:, 1:], times[:, :-1], labels[:, :-1] # [B,L-1]
mi = mask[:, 1:] * mask[:, :-1] # [B,L-1]
delta_t_i = ti - tim1 # [B,L-1]
delta_t_i = delta_t_i.unsqueeze(-1).unsqueeze(-1) # [B,L-1,1,1]
arg = - beta.unsqueeze(0).unsqueeze(0) * delta_t_i # [B,L-1,M,N]
arg = arg * mi.unsqueeze(-1).unsqueeze(-1) # [B,L-1,M,N]
exp = th.exp(arg) # [B,L-1,M,N]
exp = exp * mi.unsqueeze(-1).unsqueeze(-1) # [B,L-1,M,N]
mark_range = th.arange(end=marks, device=lim1.device, dtype=lim1.dtype)
ones = (lim1.unsqueeze(dim=-1) ==
mark_range.unsqueeze(dim=0).unsqueeze(dim=0)) # [B,L-1,M]
ones = ones.unsqueeze(dim=2).repeat(1, 1, marks, 1) # [B,L-1,M,N]
ones = ones.type(exp.dtype)
r_terms = [th.zeros(
batch_size, marks, marks,
dtype=exp.dtype, device=exp.device)]
for i in range(1, seq_len):
r_term_i = r_terms[i-1] + ones[:, i-1]
r_term_i = exp[:, i-1] * r_term_i
r_terms.append(r_term_i)
r_terms = th.stack(r_terms, dim=1)
return r_terms
| 2,081 | 34.896552 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/processes/hawkes/r_terms_recursive.py | import torch as th
from typing import Optional
from tpp.utils.events import Events
def get_r_terms(
events: Events,
beta: th.Tensor,
r_terms: Optional[th.Tensor] = None) -> th.Tensor:
"""
R_{m,n}(i)=sum_{j: t_j^n < t_i^m} exp(- beta_{m,n} (t_i^m - t_j^n))
Computed using the recursive definition
R_{m,n}(i) = term_1 + term_2
R_{m,n}(1) = 0
term_1
exp(- beta_{m,n} (t_i^m - t_{i-1}^m)) * R_{m,n}(i - 1)
term_2
sum_{j: t_{i-1}^m <= t_j^n <t_i^m} exp(- beta_{m,n} (t_i^m - t_j^n))
Returns:
[B,Li,M,N] The R term for each event. Note, these are only defined when
there are actually events. See `events.mask` for this information.
"""
marks, batch_size = events.marks, events.batch_size()
if r_terms is None:
r_terms = th.zeros(
[batch_size, 1, marks, marks],
dtype=events.times.dtype, device=events.times.device)
i, seq_len = r_terms.shape[1], events.times.shape[-1]
if i == seq_len:
return r_terms
times, labels, mask = events.times, events.labels, events.mask # [B,L]
ti, tim1, lim1 = times[:, i], times[:, i-1], labels[:, i-1] # [B]
mi, mim1 = mask[:, i], mask[:, i-1] # [B]
rim1 = r_terms[:, i-1] # [B,M,N]
delta_t_i = (ti - tim1).unsqueeze(dim=-1).unsqueeze(dim=-1) # [B,1,1]
arg = - beta.unsqueeze(dim=0) * delta_t_i # [B,M,N]
exp = th.exp(arg) # [B,M,N]
exp = exp * mi.unsqueeze(dim=-1).unsqueeze(dim=-1) # [B,M,N]
exp = exp * mim1.unsqueeze(dim=-1).unsqueeze(dim=-1) # [B,M,N]
ones = th.zeros(
[batch_size, marks, marks],
dtype=exp.dtype,
device=exp.device) # [B,M,N]
# TODO: Vectorise this
for b in range(batch_size):
ones[b, :, lim1[b]] = 1.
ri = exp * (rim1 + ones) # [B,M,N]
r_terms = th.cat([r_terms, ri.unsqueeze(dim=1)], dim=1) # [B,I,M,N]
return get_r_terms(events=events, beta=beta, r_terms=r_terms)
| 2,154 | 33.206349 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/lr_scheduler.py | import torch
import torch.optim as optim
from torch.optim.lr_scheduler import _LRScheduler
def create_lr_scheduler(optimizer, args):
if not isinstance(optimizer, optim.Optimizer):
# assume FP16_Optimizer
optimizer = optimizer.optimizer
if args.lr_scheduler == 'plateau':
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
# NB This threshold is (not) used so that we only change lr if
# there is a significant difference.
threshold=0,
patience=args.lr_scheduler_patience,
factor=args.lr_scheduler_gamma)
elif args.lr_scheduler == 'step':
step_size = args.lr_scheduler_step_size
gamma = args.lr_scheduler_gamma
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=step_size, gamma=gamma)
elif args.lr_scheduler == 'cos':
max_epochs = args.max_epochs
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, max_epochs)
elif args.lr_scheduler == 'milestones':
milestones = args.lr_scheduler_milestones
gamma = args.lr_scheduler_gamma
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=milestones, gamma=gamma)
elif args.lr_scheduler == 'findlr':
max_steps = args.max_steps
lr_scheduler = FindLR(optimizer, max_steps)
elif args.lr_scheduler == 'noam':
warmup_steps = args.lr_scheduler_warmup
lr_scheduler = NoamLR(optimizer, warmup_steps=warmup_steps)
elif args.lr_scheduler == "clr":
step_size = args.lr_scheduler_step_size
learning_rate = args.lr_rate_init
lr_scheduler_gamma = args.lr_scheduler_gamma
mode = "exp_range"
lr_scheduler = torch.optim.lr_scheduler.CyclicLR(
optimizer,
base_lr=learning_rate * 1.e-2,
max_lr=learning_rate,
step_size_up=step_size,
step_size_down=step_size,
mode=mode,
cycle_momentum=False,
gamma=lr_scheduler_gamma)
elif args.lr_scheduler == 'calr':
step_size = args.lr_scheduler_step_size
learning_rate = args.lr_rate_init
lr_scheduler_gamma = args.lr_scheduler_gamma
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=step_size,
eta_min=learning_rate * lr_scheduler_gamma)
else:
raise NotImplementedError("unknown lr_scheduler " + args.lr_scheduler)
return lr_scheduler
class FindLR(_LRScheduler):
"""
inspired by fast.ai @https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html
"""
def __init__(self, optimizer, max_steps, max_lr=10):
self.max_steps = max_steps
self.max_lr = max_lr
super().__init__(optimizer)
def get_lr(self):
return [base_lr * ((self.max_lr / base_lr) ** (
self.last_epoch / (self.max_steps - 1)))
for base_lr in self.base_lrs]
class NoamLR(_LRScheduler):
"""
Implements the Noam Learning rate schedule. This corresponds to increasing the learning rate
linearly for the first ``warmup_steps`` training steps, and decreasing it thereafter proportionally
to the inverse square root of the step number, scaled by the inverse square root of the
dimensionality of the model. Time will tell if this is just madness or it's actually important.
Parameters
----------
warmup_steps: ``int``, required.
The number of steps to linearly increase the learning rate.
"""
def __init__(self, optimizer, warmup_steps):
self.warmup_steps = warmup_steps
super().__init__(optimizer)
def get_lr(self):
last_epoch = max(1, self.last_epoch)
scale = self.warmup_steps ** 0.5 * min(
last_epoch ** (-0.5), last_epoch * self.warmup_steps ** (-1.5))
return [base_lr * scale for base_lr in self.base_lrs]
| 4,016 | 38.382353 | 103 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/events.py | import torch as th
from typing import NamedTuple, Optional, Tuple
class Events(NamedTuple):
"""All event information.
Props:
times: [B,L] The times of the events.
times_first: [B] The time of the first event.
times_first_idx: [B] The index (into [L]) of the first event.
times_final: [B] The time of the final event.
times_final_idx: [B] The index (into [L]) of the final event.
mask: [B,L] The mask indicating which times are defined.
labels: [B,L] The labels for each time.
window_start: [B] The start of the observation window.
window_end: [B] The end of the observation window.
first_event_on_window: [B] Boolean indicating if the first event
lies precisely on the window.
final_event_on_window: [B] Boolean indicating if the final event
lies precisely on the window.
"""
times: th.Tensor # [B,L]
times_first: th.Tensor # [B]
times_first_idx: th.LongTensor # [B]
times_final: th.Tensor # [B]
times_final_idx: th.LongTensor # [B]
mask: th.Tensor # [B,L]
labels: th.LongTensor # [B,L]
marks: int
window_start: th.Tensor # [B]
window_end: th.Tensor # [B]
first_event_on_window: th.Tensor # [B]
final_event_on_window: th.Tensor # [B]
def batch_size(self):
return self.times.shape[0]
def get_times(
self,
prepend_window: Optional[bool] = False,
postpend_window: Optional[bool] = False):
batch_size = self.batch_size()
times = [self.times]
w_reshape, w_repeats = (batch_size, 1), (1, 1)
window_start = self.window_start.reshape(w_reshape).repeat(w_repeats)
window_end = self.window_end.reshape(w_reshape).repeat(w_repeats)
# Consider that each mark has a starting window event, but they're all
# really the same event.
if prepend_window:
times = [window_start] + times
# Consider that each mark has an ending window event, but they're all
# really the same event.
if postpend_window:
times = times + [window_end]
return th.cat(times, dim=-1)
def get_mask(
self,
prepend_window: Optional[bool] = False,
postpend_window: Optional[bool] = False):
batch_size = self.batch_size()
mask = [self.mask]
o_reshape, o_repeats = (batch_size, 1), (1, 1)
ones = th.ones_like(
self.window_start,
dtype=self.mask.dtype,
device=self.window_start.device)
ones = [ones.reshape(o_reshape).repeat(o_repeats)]
if prepend_window:
mask = ones + mask
if postpend_window:
mask = mask + ones
return th.cat(mask, dim=-1)
def within_window(self, query: th.Tensor):
after_window = query > self.window_start.unsqueeze(dim=-1) # [B,T]
before_window = query <= self.window_end.unsqueeze(dim=-1) # [B,T]
result = after_window & before_window # [B,T]
result = result.type(query.dtype) # [B,T]
return result
def get_events(
times: th.Tensor,
mask: th.Tensor,
labels: Optional[th.LongTensor] = None,
window_start: Optional[th.Tensor] = None,
window_end: Optional[th.Tensor] = None
) -> Events:
"""
Args:
times: [B,L] The times of the events.
mask: [B,L] The mask indicating which times are defined.
labels: [B,L] The labels for each time. If `None`, the labels will be
all 0's.
window_start: [B] The start of the observation window. If `None`, this
will be taken as the first observed event.
window_end: The end of the observation window. If `None`, this
will be taken as the final observed event.
Returns:
events: The events named tuple, containing:
times: [B,L] The times of the events.
times_first: [B] The time of the first event.
times_first_idx: [B] The index (into [L]) of the first event.
times_final: [B] The time of the final event.
times_final_idx: [B] The index (into [L]) of the final event.
mask: [B,L] The mask indicating which times are defined.
labels: [B,L] The labels for each time.
window_start: [B] The start of the observation window.
window_end: [B] The end of the observation window.
first_event_on_window: [B] Boolean indicating if the first event
lies precisely on the window.
final_event_on_window: [B] Boolean indicating if the final event
lies precisely on the window.
"""
masked_times = mask * times # [B,L]
times_final, times_final_idx = th.max(masked_times, dim=-1) # [B]
inverted_mask = 1. - mask # [B,L]
masked_time_shift = times_final.unsqueeze(dim=-1) * inverted_mask # [B,L]
masked_times_shifted = masked_times + masked_time_shift # [B,L]
times_first, times_first_idx = th.min(masked_times_shifted, dim=-1) # [B]
if window_start is None:
window_start = times_first
if window_end is None:
window_end = times_final
first_event_on_window = times_first == window_start
final_event_on_window = times_final == window_end
if labels is None:
labels = th.ones(
size=times.shape,
dtype=times.dtype,
device=times.device).unsqueeze(dim=-1)
assert len(labels.shape) == 3
assert times.shape == labels.shape[:-1]
marks = labels.shape[-1]
events = Events(
times=times,
times_first=times_first, times_first_idx=times_first_idx,
times_final=times_final, times_final_idx=times_final_idx,
mask=mask,
labels=labels,
marks=marks,
window_start=window_start, window_end=window_end,
first_event_on_window=first_event_on_window,
final_event_on_window=final_event_on_window)
return events
def get_window(
times: th.Tensor, window: float) -> Tuple[th.Tensor, th.Tensor]:
batch_size = times.shape[0]
window_start = th.zeros([batch_size]).type(times.dtype).to(times.device)
if window is None:
window_end = None
else:
window_end = (th.ones([batch_size]) * window).type(
times.dtype).to(times.device)
return window_start, window_end
| 6,790 | 37.367232 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/test.py | import torch as th
from tpp.utils.events import get_events, get_window
from tpp.utils.marked_times import pad
def get_test_events_query(
marks=2,
batch_size=16,
max_seq_len=16,
queries=4,
padding_id=-1.,
device=th.device('cpu'), dtype=th.float32):
seq_lens = th.randint(low=1, high=max_seq_len, size=[batch_size])
times = [th.rand(size=[seq_len]) for seq_len in seq_lens]
labels = [th.randint(low=0, high=marks, size=[seq_len])
for seq_len in seq_lens]
sort_idx = [th.argsort(x) for x in times]
times = [x[idx] for x, idx in zip(times, sort_idx)]
labels = [x[idx] for x, idx in zip(labels, sort_idx)]
times = pad(times, value=padding_id).type(dtype)
labels = pad(labels, value=0)
times, labels = times.to(device), labels.to(device)
mask = (times != padding_id).type(times.dtype).to(times.device)
window_start, window_end = get_window(times=times, window=1.)
events = get_events(
times=times, mask=mask, labels=labels,
window_start=window_start, window_end=window_end, marks=marks)
query = th.rand(size=[batch_size, queries])
query = th.sort(query, dim=-1).values
query = query.to(device)
return events, query
| 1,260 | 31.333333 | 70 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/marked_times.py | import torch as th
from torch.nn.functional import one_hot
from tqdm import tqdm
from typing import Dict, List, Optional
from tpp.utils.sequence import pad_sequence
def get_unmasked_tensor(
x: th.Tensor, mask: th.Tensor) -> List[th.Tensor]:
"""
Args:
x: [B,L] The tensor to subset by the mask.
mask: [B,L] The mask.
Returns:
B x [Li] tensors of non-masked values, each in principle of
different lengths.
"""
return [y[m.bool()] for y, m in zip(x, mask)]
def times_marked_from_times_labels(
times: List[th.Tensor],
labels: List[th.Tensor],
marks: int) -> List[List[th.Tensor]]:
"""
Args:
times: B x [Li] tensors of non-masked times, each in principle of
different lengths.
labels: B x [Li] tensors of non-masked labels, each in principle of
different lengths.
marks: The number of marks.
Returns:
B x [M x [Li]] Tensors of non-masked times for each mark, each in
principle of different lengths.
"""
return [
[times_i[labels_i == mark] for mark in range(marks)]
for times_i, labels_i in zip(times, labels)]
def objects_from_events(
events: List[List[th.Tensor]],
marks: int,
times_dtype: Optional[th.dtype] = th.float32,
labels_dtype: Optional[th.dtype] = th.float32,
verbose: Optional[bool] = False,
device=th.device("cpu"),
) -> Dict[str, List[th.Tensor]]:
"""
Args:
events: D x [Dict(time=12.3, labels = tuple(1, 3))].
times_dtype: Time datatype (default=th.float32).
labels_dtype: Label datatype (default=th.float32). This is a flaat
default because it needs to be used in the NLL, embeddings, ...
with multiplications of other floating point object.
marks: The number of classes.
device: The device to put the objects on. Defaults to cpu.
Returns:
A dictionary of:
times: B x [L] tensors of non-masked times, each in principle of
different lengths.
labels: B x [L,M] tensors of non-masked labels, each in principle
of different lengths.
"""
times_unsorted = [
th.Tensor([x["time"] for x in r]).to(device).type(times_dtype)
for r in events] # [D,L]
if verbose:
events = tqdm(events, total=len(events))
labels_unsorted = [
th.stack([
one_hot(
th.Tensor(x["labels"]).to(device).long(),
num_classes=marks).sum(0).type(labels_dtype)
for x in r]) for r in events] # [D,L,M]
to_sorted_idxs = [th.argsort(x) for x in times_unsorted] # [D,L]
times = [
x[idxs] for x, idxs in zip(times_unsorted, to_sorted_idxs)] # [D,L]
labels = [
x[idxs] for x, idxs in zip(labels_unsorted, to_sorted_idxs)] # [D,L,M]
return {"times": times, "labels": labels}
def pad(x: List[th.Tensor], value, pad_len: Optional[int] = None):
"""
Args:
x: B x [Li] The tensors to stack together post-padding.
value: The value to pad each tensor by to bring them all to the same
length.
pad_len: Length to pad all sequences to. If `None`,
uses the longest sequence length. Default: `None`.
Returns:
Tensor of size [B, `pad_len`].
"""
return pad_sequence(
sequences=x, batch_first=True, padding_value=value, pad_len=pad_len)
| 3,590 | 30.5 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/dtype.py | import numpy as np
import torch as th
TORCH_TO_NUMPY = {
th.float32: np.float32}
NUMPY_TO_TORCH = {k: v for v, k in TORCH_TO_NUMPY.items()}
| 146 | 17.375 | 58 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/sequence.py | """
Adapted from torch.nn.utils.rnn.pad_sequence
"""
def pad_sequence(sequences, batch_first=False, padding_value=0, pad_len=None):
r"""Pad a list of variable length Tensors with ``padding_value``
``pad_sequence`` stacks a list of Tensors along a new dimension,
and pads them to equal length. For example, if the input is list of
sequences with size ``L x *`` and if batch_first is False, and ``T x B x *``
otherwise.
`B` is batch size. It is equal to the number of elements in ``sequences``.
`T` is length of the longest sequence.
`L` is length of the sequence.
`*` is any number of trailing dimensions, including none.
Example:
>>> from torch.nn.utils.rnn import pad_sequence
>>> a = torch.ones(25, 300)
>>> b = torch.ones(22, 300)
>>> c = torch.ones(15, 300)
>>> pad_sequence([a, b, c]).size()
torch.Size([25, 3, 300])
Note:
This function returns a Tensor of size ``T x B x *`` or ``B x T x *``
where `T` is the length of the longest sequence. This function assumes
trailing dimensions and type of all the Tensors in sequences are same.
Arguments:
sequences (list[Tensor]): list of variable length sequences.
batch_first (bool, optional): output will be in ``B x T x *`` if True, or in
``T x B x *`` otherwise
padding_value (float, optional): value for padded elements. Default: 0.
pad_len (int, optional): Length to pad all sequences to. If `None`,
uses the longest sequence length. Default: `None`.
Returns:
Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``.
Tensor of size ``B x T x *`` otherwise
"""
# assuming trailing dimensions and type of all the Tensors
# in sequences are same and fetching those from sequences[0]
max_size = sequences[0].size()
trailing_dims = max_size[1:]
max_len = max([s.size(0) for s in sequences])
if pad_len is not None:
if pad_len < max_len:
raise ValueError(
"Your padding length ({}) is less than the maximum sequence "
"length ({}). You will lose information!".format(
pad_len, max_len))
else:
pad_len = max_len
if batch_first:
out_dims = (len(sequences), pad_len) + trailing_dims
else:
out_dims = (pad_len, len(sequences)) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
# use index notation to prevent duplicate references to the tensor
if batch_first:
out_tensor[i, :length, ...] = tensor
else:
out_tensor[:length, i, ...] = tensor
return out_tensor
| 2,818 | 37.094595 | 84 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/batch.py | import torch as th
def _batchwise_fn(x, y, f):
"""For each value of `x` and `y`, compute `f(x, y)` batch-wise.
Args:
x (th.Tensor): [B1, B2, ... , BN, X] The first tensor.
y (th.Tensor): [B1, B2, ... , BN, Y] The second tensor.
f (function): The function to apply.
Returns:
(th.Tensor): [B1, B2, ... , BN, X, Y] A tensor containing the result of
the function application.
"""
if x.shape[:-1] != y.shape[:-1]:
raise ValueError(
"Shape of `x` ({}) incompatible with shape of y ({})".format(
x.shape, y.shape))
x = x.unsqueeze(-1) # [B1, B2, ... , BN, X, 1]
y = y.unsqueeze(-2) # [B1, B2, ... , BN, 1, Y]
result = f(x, y) # [B1, B2, ... , BN, X, Y]
return result
def _product(x, y):
return x * y
def batchwise_product(x, y):
"""For each value of `x` and `y`, compute `x * y` batch-wise.
Args:
x (th.Tensor): [B1, B2, ... , BN, X] The first tensor.
y (th.Tensor): [B1, B2, ... , BN, Y] The second tensor.
Returns:
(th.Tensor): [B1, B2, ... , BN, X, Y] A tensor containing the result of
x * y.
"""
return _batchwise_fn(x, y, f=_product)
def _difference(x, y):
return x - y
def batchwise_difference(x, y):
"""For each value of `x` and `y`, compute `x - y` batch-wise.
Args:
x (th.Tensor): [B1, B2, ... , BN, X] The first tensor.
y (th.Tensor): [B1, B2, ... , BN, Y] The second tensor.
Returns:
(th.Tensor): [B1, B2, ... , BN, X, Y] A tensor containing the result of
x - y.
"""
return _batchwise_fn(x, y, f=_difference)
| 1,674 | 25.587302 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/history_marked_bst.py | import torch as th
from typing import Optional, Tuple
from tpp.utils.events import Events
from tpp.utils.index import take_2_by_2
from tpp.utils.searchsorted import searchsorted_marked
def get_prev_times_marked(
query: th.Tensor,
events: Events,
allow_window: Optional[bool] = False
) -> Tuple[Tuple[th.Tensor, th.Tensor], th.Tensor, th.Tensor]:
"""For each query, get the event time that directly precedes it. If no
events precedes it (but the window start does), return the window start.
Otherwise, mask the value.
Args:
query: [B,T] Sequences of query times to evaluate the intensity
function.
events: [B,L] Times and labels of events.
allow_window: If `True`, a previous time can be the window boundary.
Defaults to `False`.
Returns:
`times` is a tuple of tensor of values [B,T,M] and indices,
[B,T,M] of the largest time value in the sequence that is strictly
smaller than the query time value, or the window. the index only
event indexes into the events. If the window is returned, it
should be dealt with explicitly at encoding/decoding time.
`is_event` is a tensor [B,T,M] that indicates whether the time
corresponds to an event or not (a 1 indicates an event and a 0
indicates a window boundary).
`mask` is a tensor [B,TM] that indicates whether the time difference to
those times what positive or not.
"""
b, t = query.shape
event_times = events.get_times(prepend_window=allow_window) # [B,L]
event_times_marked = events.get_times(
marked=True, prepend_window=allow_window) # [B,M,LM]
event_mask_marked = events.get_mask(
marked=True, prepend_window=allow_window) # [B,M,LM]
marks = events.marks
prev_times_idxs_marked = searchsorted_marked(
a=event_times_marked, v=query, mask=event_mask_marked) # [B,M,T]
prev_times_idxs_marked = prev_times_idxs_marked - 1 # [B,M,T]
prev_times = take_2_by_2(
event_times_marked.reshape(b * marks, -1),
prev_times_idxs_marked.reshape(b * marks, -1)).reshape(
b, marks, -1) # [B,M,T]
# We want to index into the original unmarked times object, so we need to
# use the index map
# lm = event_mask_marked.shape[-1]
# to_flat_idxs = events.get_to_flat_idxs(prepend_window=allow_window)
# to_flat_idxs = to_flat_idxs.reshape(-1, lm) # [B*M,LM]
# prev_times_idxs = prev_times_idxs_marked.reshape(-1, t) # [B*M,T]
# prev_times_idxs = take_2_by_2(
# to_flat_idxs, index=prev_times_idxs) # [B*M,T]
#
# prev_times_idxs_flat = prev_times_idxs.reshape(b, -1) # [B,M*T]
# prev_times = take_2_by_2(
# event_times, index=prev_times_idxs_flat) # [B,M*T]
# prev_times_idxs = prev_times_idxs.reshape(prev_times_idxs_marked.shape)
# prev_times = prev_times.reshape(prev_times_idxs_marked.shape)
# Mask based on original indexes being out of range. The new ones won't
# be -1 as they'll pick the -1 element of the map.
mask = (prev_times_idxs_marked >= 0).type(
event_times_marked.dtype) # [B,M,T]
if allow_window:
# If the first event shares a time with the window boundary, that the
# index returned is the index of the event, rather than the window
# boundary.
idx_is_window = (prev_times_idxs_marked == 0).type(
prev_times_idxs_marked.dtype) # [B,M,T]
do_idx_shift = events.first_event_on_window.type(
idx_is_window.dtype) # [B]
idx_shift = idx_is_window * do_idx_shift.reshape(-1, 1, 1) # [B,M,T]
prev_times_idxs = prev_times_idxs_marked + idx_shift
# Check the indexes in case one of the window indexes became an event.
is_event = (prev_times_idxs != 0).type(mask.dtype) # [B,M,T]
else:
is_event = th.ones_like(
prev_times_idxs_marked,
device=event_times.device, dtype=event_times.dtype) # [B,M,T]
query_within_window = events.within_window(query=query) # [B,T]
mask = mask * query_within_window.unsqueeze(dim=1) # [B,M,T]
return (prev_times, prev_times_idxs), is_event, mask # [B,M,T]
| 4,577 | 44.78 | 81 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/record.py | import numpy as np
import torch as th
from typing import Dict, List, Tuple
def hawkes_seq_to_record(seq: List[np.ndarray]):
times = np.concatenate(seq)
labels = np.concatenate([[i] * len(x) for i, x in enumerate(seq)])
sort_idx = np.argsort(times)
times = times[sort_idx]
labels = labels[sort_idx]
record = [
{"time": float(t),
"labels": (int(l),)} for t, l in zip(times, labels)]
return record
| 443 | 25.117647 | 70 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/multi_head_attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from typing import Optional, Tuple
class MultiheadAttention(nn.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in key. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__annotations__ = {
'bias_k': torch._jit_internal.Optional[torch.Tensor],
'bias_v': torch._jit_internal.Optional[torch.Tensor],
}
__constants__ = ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight']
def __init__(
self,
embed_dim,
num_heads,
dropout=0.,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
constraint=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.constraint = constraint
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = nn.Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = nn.Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = nn.Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = nn.Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
if self.constraint is not None:
# At least try and make sure they start positive
if self._qkv_same_embed_dim:
mask = (self.in_proj_weight < 0).float() * - 1
mask = mask + (self.in_proj_weight >= 0).float()
mask[self.embed_dim:] = 1.
self.in_proj_weight.data = self.in_proj_weight.data * mask
else:
mask = (self.q_proj_weight < 0).float() * - 1
mask = mask + (self.q_proj_weight >= 0).float()
self.q_proj_weight.data = self.q_proj_weight.data * mask
mask = (self.out_proj.weight < 0).float() * - 1
mask = mask + (self.out_proj.weight >= 0).float()
self.out_proj.weight.data = self.out_proj.weight.data * mask
def __setstate__(self, state):
super(MultiheadAttention, self).__setstate__(state)
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if 'self._qkv_same_embed_dim' not in self.__dict__:
self._qkv_same_embed_dim = True
def forward(
self,
query,
key,
value,
key_padding_mask=None,
need_weights=True,
attn_mask=None,
activation="softmax"):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: mask that prevents attention to certain positions. This is an additive mask
(i.e. the values will be added to the attention layer).
activation: Activation applied to the attention coefficients.
'identity', 'softmax' or 'sigmoid'.
tgt_times: [T,B] The times of the target for the cumulative
attention.
memory_times: [L,B] The times of the memory for the cumulative
attention.
prev_times: [T,B] The prev times of the target for the cumulative
attention.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length.
- attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
- tgt_times: :math:`(L, N)` where L is the target sequence length, N is the batch size.
- memory_times: :math:`(S, N)` where S is the source sequence length, N is the batch size.
- prev_times: :math:`(L, N)` where L is the target sequence length, N is the batch size.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
q_proj_weight = self.q_proj_weight
in_proj_weight = self.in_proj_weight
out_proj_weight = self.out_proj.weight
if self.constraint is not None:
if self.constraint == "nonneg":
if not self._qkv_same_embed_dim:
q_proj_weight = self.q_proj_weight * (
self.q_proj_weight > 0).float()
q_proj_weight = torch.clamp(q_proj_weight, min=1e-30)
self.q_proj_weight.data = q_proj_weight
else:
# nn.Parameter(torch.empty(3 * embed_dim, embed_dim))
mask = (self.in_proj_weight > 0).float()
mask[self.embed_dim:] = 1.
in_proj_weight = self.in_proj_weight * mask
in_proj_weight[:self.embed_dim] += 1e-30 * (
in_proj_weight[:self.embed_dim] == 0.).float()
self.in_proj_weight.data = in_proj_weight
out_proj_weight = self.out_proj.weight * (
self.out_proj.weight > 0).float()
out_proj_weight = torch.clamp(out_proj_weight, min=1e-30)
self.out_proj.weight.data = out_proj_weight
elif self.constraint == "softplus":
if not self._qkv_same_embed_dim:
q_proj_weight = F.softplus(self.q_proj_weight)
else:
# nn.Parameter(torch.empty(3 * embed_dim, embed_dim))
mask = torch.zeros_like(self.in_proj_weight).float()
mask[self.embed_dim:] = 1.
in_proj_weight = self.in_proj_weight * mask + F.softplus(
self.in_proj_weight) * (1. - mask)
out_proj_weight = F.softplus(self.out_proj.weight)
elif self.constraint == "sigmoid":
if not self._qkv_same_embed_dim:
q_proj_weight = F.sigmoid(self.q_proj_weight)
else:
# nn.Parameter(torch.empty(3 * embed_dim, embed_dim))
mask = torch.zeros_like(self.in_proj_weight).float()
mask[self.embed_dim:] = 1.
in_proj_weight = self.in_proj_weight * mask + F.sigmoid(
self.in_proj_weight) * (1. - mask)
out_proj_weight = F.sigmoid(self.out_proj.weight)
else:
raise NotImplementedError
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, out_proj_weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight,
activation=activation,
constraint=self.constraint)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, out_proj_weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask,
activation=activation,
constraint=self.constraint)
def multi_head_attention_forward(
query, # type: torch.Tensor
key, # type: torch.Tensor
value, # type: torch.Tensor
embed_dim_to_check, # type: int
num_heads, # type: int
in_proj_weight, # type: torch.Tensor
in_proj_bias, # type: torch.Tensor
bias_k, # type: Optional[torch.Tensor]
bias_v, # type: Optional[torch.Tensor]
add_zero_attn, # type: bool
dropout_p, # type: float
out_proj_weight, # type: torch.Tensor
out_proj_bias, # type: torch.Tensor
training=True, # type: bool
key_padding_mask=None, # type: Optional[torch.Tensor]
need_weights=True, # type: bool
attn_mask=None, # type: Optional[torch.Tensor]
use_separate_proj_weight=False, # type: bool
q_proj_weight=None, # type: Optional[torch.Tensor]
k_proj_weight=None, # type: Optional[torch.Tensor]
v_proj_weight=None, # type: Optional[torch.Tensor]
static_k=None, # type: Optional[torch.Tensor]
static_v=None, # type: Optional[torch.Tensor]
activation="softmax", # type: Optional[str]
constraint=None, # type: Optional[bool]
):
# type: (...) -> Tuple[torch.Tensor, Optional[torch.Tensor]]
"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. This is an additive mask
(i.e. the values will be added to the attention layer). A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
activation: Activation applied to the attention coefficients. 'softmax' or 'sigmoid'.
cumulative: Whether to use cumulative attention. Defaults to `False`.
query_times: [T,B] The times of the target for the cumulative
attention.
key_times: [L,B] The times of the memory for the cumulative
attention.
prev_times: [T,B] The prev times of the target for the cumulative
attention.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- query_times: :math:`(L, N)` where L is the target sequence length, N is the batch size.
- key_times :math:`(S, N)`, where S is the source sequence length, N is the batch size,
- prev_times: :math:`(L, N)` where L is the target sequence length, N is the batch size.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
assert key.size() == value.size()
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if torch.equal(query, key) and torch.equal(key, value):
# self-attention
q, k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = F.linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = F.linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = F.linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = F.linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = F.linear(query, q_proj_weight_non_opt, in_proj_bias)
k = F.linear(key, k_proj_weight_non_opt, in_proj_bias)
v = F.linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = F.pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = F.pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = F.pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = F.pad(key_padding_mask, (0, 1))
if constraint is not None:
k = F.softplus(k)
v = F.softplus(v)
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if activation == "identity":
attn_mask = torch.exp(attn_mask)
attn_output_weights = attn_output_weights * attn_mask
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
# TODO May mess everything
if activation == "identity":
attn_output_weights = attn_output_weights
elif activation == "sigmoid":
attn_output_weights = torch.sigmoid(attn_output_weights)
elif activation == "softmax":
attn_output_weights = F.softmax(attn_output_weights, dim=-1)
else:
raise Exception("activation either identity, sigmoid or softmax")
attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
# return attn_output, attn_output_weights.sum(dim=1) / num_heads
return attn_output, attn_output_weights.transpose(0, 1)
else:
return attn_output, None
| 25,378 | 46.975425 | 117 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/searchsorted.py | import torch as th
from torchsearchsorted import searchsorted as ss
from typing import Optional
def searchsorted(
a: th.Tensor, v: th.Tensor, mask: Optional[th.Tensor] = None):
"""
Args:
a: [B,L] The row sorted array to tree sort batch-wise.
v: [B,T] The queries for the tree sort.
mask: [B,L] A mask for `a`. Default is no mask.
Returns:
[B,T] Indices such that a[i, j-1] < v[i] <= a[i, j].
"""
if mask is not None:
# Calculate mask shift such that a is still ordered, and that the index
# of anything above non-padded values of a will be in the index one
# above the final non-padded value of a
mask_shift = 1 - mask
mask_shift = th.cumsum(mask_shift, dim=-1)
min_a, max_v = th.min(a), th.max(v)
shift_value = max_v - min_a + 1
mask_shift = mask_shift * shift_value
a = a + mask_shift
idxs = ss(a=a, v=v)
return idxs
def searchsorted_marked(
a: th.Tensor,
v: th.Tensor,
mask: Optional[th.Tensor] = None):
"""
Args:
a: [B,M,L] The row sorted array to tree sort batch-wise.
v: [B,T] The queries for the tree sort.
mask: [B,M,L] A mask for `a`. Default is no mask.
Returns:
[B,M,T] Indices such that a[i, j-1] < v[i] <= a[i, j].
"""
(b, marks), t = a.shape[:-1], v.shape[-1]
result = th.zeros(size=(b, marks, t), dtype=th.long, device=a.device)
for m in range(marks):
a_m = a[:, m, :]
if mask is not None:
m_m = mask[:, m, :]
else:
m_m = None
result[:, m, :] = searchsorted(a=a_m, v=v, mask=m_m)
return result
| 1,750 | 29.719298 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/logging.py | import numpy as np
import pandas as pd
import tabulate
import torch as th
from typing import Optional
from argparse import Namespace
tabulate.MIN_PADDING = 0
def _format_key(x, split=".", key_length=5):
x = x.split(split)
x = [y[:key_length] for y in x]
x = split.join(x)
return x
def _format_dict(x):
keys = sorted(list(x.keys()))
results = ["{} {:03f}".format(_format_key(k), x[k]) for k in keys]
return " | ".join(results)
def get_summary(parameters, formatted=True):
summary = {
k: np.mean(v.cpu().detach().numpy()) for k, v in parameters.items()}
if formatted:
summary = _format_dict(summary)
return summary
def get_log_dicts(
args: Namespace,
epoch: int,
lr: float,
lr_poisson: float,
parameters: dict,
train_loss: th.float,
val_metrics: dict,
cnt_wait: int,
key_length=5):
train = {
"epoch": f"{epoch}/{args.train_epochs}",
"lr": lr,
"lr_poisson": lr_poisson,
"patience": f"{cnt_wait}/{args.patience}"}
params = get_summary(parameters, formatted=False)
encoder_params = {k: v for k, v in params.items() if ".encoder." in k}
decoder_params = {k: v for k, v in params.items() if ".decoder." in k}
other_params = {k: v for k, v in params.items() if k not in
set(encoder_params.keys()).union(
set(decoder_params.keys()))}
encoder_params = {
k.split(".encoder.")[-1]: v for k, v in encoder_params.items()}
encoder_params = {
_format_key(k, key_length=key_length): v
for k, v in encoder_params.items()}
decoder_params = {
k.split(".decoder.")[-1]: v for k, v in decoder_params.items()}
decoder_params = {
_format_key(k, key_length=key_length): v
for k, v in decoder_params.items()}
other_params = {
_format_key(k, key_length=key_length): v for
k, v in other_params.items()}
params = {
"encoder": encoder_params,
"decoder": decoder_params,
"other": other_params}
metrics = {"train_loss": train_loss,
"val_loss": val_metrics["loss"]}
if args.eval_metrics:
if args.multi_labels:
metrics.update({
"roc_auc_macro": val_metrics["roc_auc_macro"],
"roc_auc_micro": val_metrics["roc_auc_micro"],
"roc_auc_weighted": val_metrics["roc_auc_weighted"]})
else:
metrics.update({
"pre_weighted": val_metrics["pre_weighted"],
"rec_weighted": val_metrics["rec_weighted"],
"f1_weighted": val_metrics["f1_weighted"],
"acc_weighted": val_metrics["acc_weighted"]})
return {"train": train, "params": params, "metrics": metrics}
def get_status(
args: Namespace,
epoch: int,
lr: float,
lr_poisson: float,
parameters: dict,
train_loss: th.float,
val_metrics: dict,
cnt_wait: int,
print_header_freq: Optional[int] = 10,
key_length: Optional[int] = 5):
log_dicts = get_log_dicts(
args=args, epoch=epoch, lr=lr, lr_poisson=lr_poisson,
parameters=parameters, train_loss=train_loss, val_metrics=val_metrics,
cnt_wait=cnt_wait)
params_dict = log_dicts.pop("params")
for k, v in params_dict.items():
params_dict[k] = {k1.replace(".", "\n"): v1 for k1, v1 in v.items()}
params_dict = {
_format_key(k, key_length=key_length): v
for k, v in params_dict.items()}
log_dicts = {
_format_key(k, key_length=key_length):
{_format_key(k1, key_length=key_length): v1
for k1, v1 in v.items()}
for k, v in log_dicts.items()}
log_dicts.update(params_dict)
log_dicts = {
k: {k1: [v1] for k1, v1 in v.items()} for k, v in log_dicts.items()}
log_dicts = {k: pd.DataFrame(v) for k, v in log_dicts.items()}
log_df = pd.concat(log_dicts.values(), axis=1, keys=log_dicts.keys())
h = list(map('\n'.join, log_df.columns.tolist()))
msg_str = tabulate.tabulate(
log_df, headers=h, tablefmt='grid', showindex=False, floatfmt=".4f",
numalign="center", stralign="center")
msg_split = msg_str.split("\n")
header = msg_split[:-2]
record = msg_split[-2]
msg = record
if epoch % print_header_freq == 0:
msg = "\n".join(header + [msg])
return msg
def get_status_old(
args: Namespace,
epoch: int,
lr: float,
parameters: dict,
train_loss: th.float,
val_metrics: dict,
cnt_wait: int) -> str:
status = f"epoch {epoch}/{args.train_epochs} |"
status += f" lr: {lr:.4f} | "
status += get_summary(parameters)
status += f" | train_loss: {train_loss:.4f}"
val_loss = val_metrics["loss"]
status += f" | val_loss: {val_loss:.4f}"
if args.eval_metrics:
acc = val_metrics["pre_macro"]
recall = val_metrics["rec_macro"]
f1 = val_metrics['f1_macro']
status += f" | precision: {acc:.4f}"
status += f" | recall: {recall:.4f}"
status += f" | F1: {f1:.4f}"
status += f" | patience: {cnt_wait}/{args.patience}"
return status
| 5,292 | 30.135294 | 78 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/utils.py | import torch as th
def smallest_positive(inputs, dim):
"""
Args
inputs: 3d array [B,T,L].
dim: dimension on which the largest tj lower than t is evaluated.
Return
(delta_t, idx_delta_t), is_candidate:
delta_t: t - tj, where th is the largest value lower than t
idx_delta_t: position of the largest tj lower than t
is_candidate: mask to remove padded points
"""
non_positives = inputs <= 0 # [B,T,L]
non_positives = non_positives.float() # [B,T,L]
min_inputs, max_inputs = th.min(inputs), th.max(inputs) # 1,1
shift_matrix = (max_inputs - min_inputs) * non_positives * 2 # [B,T,L]
shifted_matrix = inputs + shift_matrix # [B,T,L]
is_candidate = inputs > 0 # [B,T,L]
is_candidate = th.sum(is_candidate, dim=2) # [B,T]
is_candidate = (is_candidate > 0).type(inputs.dtype) # [B,T]
result = th.min(shifted_matrix, dim=dim) # [B,T]
return result, is_candidate # [B,T], [B,T]
| 1,196 | 41.75 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/data.py | import os
from torch.utils.data import DataLoader
from argparse import Namespace
from typing import Optional
from tpp.processes.multi_class_dataset import MultiClassDataset as Dataset
def get_loader(
dataset: Dataset,
args: Namespace,
shuffle: Optional[bool] = True) -> DataLoader:
return DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=shuffle,
collate_fn=Dataset.to_features)
def load_data(args: Namespace) -> dict:
size_seeds = {
"train": [args.train_size, args.seed],
"val": [args.val_size, args.seed + args.train_size],
"test": [args.test_size, args.seed + args.train_size + args.val_size]}
if args.verbose:
print("Generating datasets...")
datasets = {
k: Dataset(args=args, size=sz, seed=ss, name=k
) for k, (sz, ss) in size_seeds.items()}
if args.verbose:
max_seq_lens = {
k: max(v.lengths) for k, v in datasets.items()}
print("Done! Maximum sequence lengths:")
for k, v in max_seq_lens.items():
print("{}: {}".format(k, v))
return datasets
| 1,153 | 27.146341 | 78 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/run.py | import git
import torch
import numpy as np
def check_repo(allow_uncommitted):
repo = git.Repo()
if repo.is_dirty() and not allow_uncommitted:
raise Warning("Repo contains uncommitted changes!")
return repo
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def make_deterministic(seed):
set_seed(seed=seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| 485 | 20.130435 | 59 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/history_bst.py | import torch as th
from typing import Optional, Tuple
from tpp.utils.events import Events
from tpp.utils.index import take_2_by_2
from tpp.utils.searchsorted import searchsorted
def get_prev_times(
query: th.Tensor,
events: Events,
allow_window: Optional[bool] = False
) -> Tuple[Tuple[th.Tensor, th.Tensor], th.Tensor, th.Tensor]:
"""For each query, get the event time that directly precedes it. If no
events precedes it (but the window start does), return the window start.
Otherwise, mask the value.
Args:
query: [B,T] Sequences of query times to evaluate the intensity
function.
events: [B,L] Times and labels of events.
allow_window: If `True`, a previous time can be the window boundary.
Defaults to `False`.
Returns:
`times` is a tuple of tensor of values [B,T] and indices, [B,T] of the
largest time value in the sequence that is strictly smaller than
the query time value, or the window. the index only event indexes
into the events. If the window is returned, it should be dealt with
explicitly at encoding/decoding time.
`is_event` is a tensor [B,T] that indicates whether the time
corresponds to an event or not (a 1 indicates an event and a 0
indicates a window boundary).
`mask` is a tensor [B,T] that indicates whether the time difference to
those times what positive or not.
"""
event_times = events.get_times(prepend_window=allow_window)
event_mask = events.get_mask(prepend_window=allow_window)
prev_times_idxs = searchsorted(
a=event_times, v=query, mask=event_mask)
prev_times_idxs = prev_times_idxs - 1
prev_times = take_2_by_2(event_times, index=prev_times_idxs) # [B,T]
mask = (prev_times_idxs >= 0).type(event_times.dtype) # [B,T]
if allow_window:
# If the first event shares a time with the window boundary, that the
# index returned is the index of the event, rather than the window
# boundary.
idx_is_window = (prev_times_idxs == 0).type(
prev_times_idxs.dtype) # [B,T]
do_idx_shift = events.first_event_on_window.type(
idx_is_window.dtype) # [B]
idx_shift = idx_is_window * do_idx_shift.reshape(-1, 1)
prev_times_idxs = prev_times_idxs + idx_shift
# Check the indexes in case one of the window indexes became an event.
is_event = (prev_times_idxs != 0).type(mask.dtype) # [B,T]
else:
is_event = th.ones_like(prev_times_idxs) # [B,T]
query_within_window = events.within_window(query=query) # [B,T]
mask = mask * query_within_window # [B,T]
return (prev_times, prev_times_idxs), is_event, mask # [B,T]
| 2,991 | 42.362319 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/stability.py | import torch as th
from typing import Optional
def epsilon(eps=1e-30, dtype=th.float32, device=None):
return th.tensor(eps, dtype=dtype, device=device)
def epsilon_like(x, eps=1e-3):
return th.zeros_like(x) + th.tensor(eps, dtype=x.dtype, device=x.device)
def log_sub_exp(
a: th.Tensor,
b: th.Tensor,
regularize: Optional[bool] = True) -> th.Tensor:
"""Compute log(exp(a) - exp(b)) safely."""
if th.any(a < b):
raise ValueError(
"All elements of exponent `a` ({}) must be at least as large "
"as `b` ({}).".format(a, b))
max_a = th.max(a)
a, b = a - max_a, b - max_a
arg = th.exp(a) - th.exp(b)
if regularize:
arg = arg + epsilon(dtype=arg.dtype, device=arg.device)
return th.log(arg) + max_a
def subtract_exp(a: th.Tensor, b: th.Tensor) -> th.Tensor:
"""Compute th.exp(a) - th.exp(b) safely."""
# Make sure we dont' have b > a
b_gt_a = (b > a).type(a.dtype)
# Anywhere it's not true, replace a with b and b with a
a1 = a + b_gt_a * (b - a)
b1 = b + b_gt_a * (a - b)
log_subtraction = log_sub_exp(a=a1, b=b1)
result = th.exp(log_subtraction)
# Swap the signs around where a and b were swapped
result = result * th.pow(-1, b_gt_a)
return result
def check_tensor(
t: th.Tensor,
positive: Optional[bool] = False,
strict: Optional[bool] = False):
"""Check if a tensor is valid """
assert th.isnan(t).sum() == 0
assert th.isinf(t).sum() == 0
if positive:
if strict:
assert (t <= 0.).sum() == 0
else:
assert (t < 0.).sum() == 0
| 1,660 | 26.683333 | 76 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/plot.py | import json
import mlflow
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import torch as th
from argparse import Namespace
from matplotlib.figure import Figure
from pathlib import Path
from torch.utils.data import DataLoader
from typing import Dict, List, Optional, Tuple
from tpp.models.base.process import Process
from tpp.models.hawkes import HawkesProcess
from tpp.utils.data import Dataset, get_loader
from tpp.utils.events import get_events, get_window
from tpp.utils.mlflow import get_epoch_str
sns.set(style="white")
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
def get_test_loader(args: Namespace, seed: int) -> DataLoader:
test_seed = seed
dataset = Dataset(args=args, size=1, seed=test_seed, name="test")
loader = get_loader(dataset, args=args, shuffle=False)
return loader
def fig_hawkes(
intensities: Dict[str, Tuple[np.ndarray, np.ndarray]],
cumulative_intensities: Dict[str, Tuple[np.ndarray, np.ndarray]],
event_times: np.ndarray,
event_labels: np.ndarray,
class_names: Dict,
legend: Optional[bool] = True,
epoch: Optional[int] = None) -> Tuple[Figure, np.ndarray]:
"""Plot a 1D Hawkes process.py for multiple models.
Args:
intensities: A dictionary of time, intensity pairs, each of which is a
2D numpy array [T,M]. The key will be used as the model label in the
legend.
cumulative_intensities: A dictionary of time, cumulative_intensities
pairs, each of which is a
2D numpy array [T,M]. The key will be used as the model label in
the legend.
event_times: A 1D numpy array of the times of true events (of which the
above intensities should be related to).
event_labels: A 1D numpy array of the labels of true events (of which
the above intensities should be related to).
class_names: The names of each class as an order list.
legend: If `True` adds a legend to the intensity plot. Defaults to
`True`.
epoch: Epoch on which the model is evaluated
Returns:
The figure and axes.
"""
marks = next(iter(intensities.items()))[-1][-1].shape[-1]
class_keys = list(class_names.keys())
class_names = list(class_names.values())
if class_names is None:
class_names = ["$y_{}$".format(m) for m in range(marks)]
fig, axs = plt.subplots(
nrows=3 * marks, ncols=1, figsize=(10, 10), sharex=True,
gridspec_kw={'height_ratios': [4, 4, 1] * marks})
intensity_axs = [x for i, x in enumerate(axs) if i % 3 == 0]
cumulative_intensity_axs = [x for i, x in enumerate(axs) if i % 3 == 1]
event_axs = [x for i, x in enumerate(axs) if i % 3 == 2]
for model_name, (query_times, model_intensities) in intensities.items():
for i, class_key in enumerate(class_keys):
intensity_ax = intensity_axs[i]
intensity_ax.plot(
query_times, model_intensities[:, i], label=model_name)
intensity_ax.set_ylabel(r"$\lambda(t)_{}$".format(i))
for model_name, (query_times, cum_model_intensities) in cumulative_intensities.items():
for i, class_key in enumerate(class_keys):
cum_intensity_ax = cumulative_intensity_axs[i]
cum_intensity_ax.plot(
query_times, cum_model_intensities[:, i], label=model_name)
cum_intensity_ax.set_ylabel(r"$\Lambda(t)_{}$".format(i))
for (class_key, class_name, event_ax) in zip(
class_keys, class_names, event_axs):
is_event_type_key = event_labels == int(class_key)
times_key = event_times[is_event_type_key]
event_ax.scatter(times_key, 0.5 * np.ones_like(times_key))
event_ax.set_yticks([0.5])
event_ax.set_yticklabels([class_name])
event_axs[-1].set_xlabel(r"$t$", fontsize=24)
if legend:
intensity_axs[0].legend(loc='upper left')
if epoch is not None:
fig.suptitle("Epoch: " + str(epoch), fontsize=16)
return fig, axs
def filter_by_mask(x, mask, mask_pos_value=1.):
return x[mask == mask_pos_value]
def fig_src_attn_weights(
attn_weights: th.Tensor,
event_times: np.ndarray,
event_labels: np.ndarray,
idx_times: np.ndarray,
class_names: List[str],
epoch: Optional[int] = None):
"""
Plot inter-events attention coefficients
Args:
attn_weights: Attention weights.
event_times: A 1D numpy array of the times of true events.
event_labels: A 1D numpy array of the labels of true events.
idx_times: a 1D numpy array to give the index of each encounter
class_names: The names of each class as an order list.
epoch: Epoch on which the model is evaluated.
Returns:
fig: The inter-events attention figure.
"""
n = event_labels.shape[0]
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(n/3, n/3))
m = attn_weights.cpu().detach().numpy()[:n+1, :n+1]
idx_times = np.insert(idx_times+1, 0, 0)
m = m[idx_times][:, idx_times]
ax.matshow(m, cmap=plt.get_cmap("Blues"))
event_tick_labels = [str(np.around(i, decimals=1)) for i in event_times]
x_event_tick_labels = [
str(i) + "\n" + x + "\n{}".format(class_names[l])
for i, (x, l) in enumerate(zip(event_tick_labels, event_labels))]
x_event_tick_labels = ["idx\ntime\nclass"] + x_event_tick_labels
y_event_tick_labels = [
str(i) + "," + x + ",{}".format(class_names[l])
for i, (x, l) in enumerate(zip(event_tick_labels, event_labels))]
y_event_tick_labels = ["w-"] + y_event_tick_labels
ax.set_xticks(ticks=np.arange(n + 1))
ax.set_yticks(ticks=np.arange(n + 1))
ax.set_xticklabels(x_event_tick_labels, fontsize=7)
ax.set_yticklabels(y_event_tick_labels, fontsize=7)
ax.set_ylabel(
"Attention coefficient for event/query (i)\n(each row rums to one)")
ax.set_xlabel("Attention coefficient for key (j)")
for i, row in enumerate(m):
for j, value in enumerate(row):
txt = "{:.1f}".format(value)
color = "white" if value > 0.6 else "black"
ax.text(j, i, txt, va='center', ha='center',
color=color, fontsize=5)
if epoch is not None:
fig.suptitle("Epoch: " + str(epoch), fontsize=16)
return fig
def fig_tgt_attn_weights(
attn_weights: th.Tensor,
event_times: np.ndarray,
event_labels: np.ndarray,
idx_times: np.ndarray,
query: np.ndarray,
class_names: List[str],
epoch: Optional[int] = None):
"""
Plot attention coefficients between events and queries
Args:
attn_weights: Attention weights.
event_times: A 1D numpy array of the times of true events.
event_labels: A 1D numpy array of the labels of true events.
idx_times: a 1D numpy array to give the index of each encounter
query: Queries on which attention coefficients are evaluated
mask: A 1D numpy array of the mask of true events.
class_names: The names of each class as an order list.
epoch: Epoch on which the model is evaluated.
Returns:
fig: Attention coefficients between events and queries
"""
n = event_labels.shape[0]
m = attn_weights.cpu().detach().numpy()
idx_times = np.insert(idx_times+1, 0, 0)
m = m[:, idx_times]
fig, axs = plt.subplots(
nrows=n + 2, ncols=1, figsize=(10, 10), sharex='all')
axs[0].plot(query, m[:, 0])
axs[0].set_ylim(-0.1, 1.1)
axs[0].set_yticks(ticks=[0.5])
axs[0].set_yticklabels([-1])
for i, label in enumerate(event_labels):
axs[i+1].plot(query, m[:, i+1])
axs[i+1].set_ylim(-0.1, 1.1)
axs[i+1].set_yticks(ticks=[0.5])
axs[i+1].set_yticklabels([class_names[label]])
axs[-1].scatter(event_times, 0.5 * np.ones_like(event_times))
axs[-1].set_ylim(-0.1, 1.1)
axs[-1].set_yticks(list())
axs[-1].set_xlabel(r"$t$", fontsize=24)
if epoch is not None:
fig.suptitle("Epoch: " + str(epoch), fontsize=16)
return fig
def save_fig(
fig: Figure,
name: str,
args: Namespace,
dpi: Optional[int] = 300,
save_on_mlflow: Optional[bool] = True):
"""
Separate figure to save figures
Args:
fig: The figure to be saved
name: The path and name of the figure
args: Namespace
dpi: DPI for fig saving.
save_on_mlflow: Whether to save the figure on mlflow
Returns:
name: The path and name of the saved figure
"""
mlflow_path = name[:-4]
name = os.path.join(args.plots_dir, name)
fig.savefig(fname=name, dpi=dpi, bbox_inches='tight')
if save_on_mlflow:
mlflow.log_artifact(name, mlflow_path)
return name
def plot_attn_weights(
model_artifacts: Dict,
event_times: np.ndarray,
event_labels: np.ndarray,
idx_times: np.ndarray,
query: np.ndarray,
args: Namespace,
class_names: List[str],
epoch: Optional[int] = None,
images_urls: Optional[Dict] = None,
save_on_mlflow: Optional[bool] = True):
"""
Args:
model_artifacts: Dict
event_times:
event_labels:
idx_times: a 1D numpy array to give the index of each encounter
query: Queries
args: Namespace
class_names: The names of each class as an order list. If `None`, then
classes y_0, y_1, ... are used. Default to `None`.
epoch: Epoch
images_urls: urls of figures
save_on_mlflow: Whether to save the figure on mlflow
"""
if epoch is not None:
epoch_str = get_epoch_str(epoch=epoch, max_epochs=args.train_epochs)
src_name = "epoch_" + epoch_str + ".jpg"
tgt_name = "epoch_" + epoch_str + ".jpg"
else:
src_name = "fig.jpg"
tgt_name = "fig.jpg"
for k, v in model_artifacts.items():
for m_k, m_v in v.items():
if "encoder" in v.keys() \
and "attention_weights" in v["encoder"].keys():
m_v = v
if "encoder" in m_v.keys() \
and "attention_weights" in m_v["encoder"].keys():
if len(m_v["encoder"]["attention_weights"]) > 1:
for i in range(len(m_v["encoder"]["attention_weights"])):
fig = fig_src_attn_weights(
attn_weights=
m_v["encoder"]["attention_weights"][i][0],
event_times=event_times,
event_labels=event_labels,
idx_times=idx_times,
class_names=class_names,
epoch=epoch)
name = os.path.join(
"src_attn", k, "head_" + str(i), src_name)
Path(os.path.join(
args.plots_dir,
"src_attn", k, "head_" + str(i))).mkdir(
parents=True, exist_ok=True)
url = save_fig(fig=fig, name=name, args=args,
save_on_mlflow=save_on_mlflow)
else:
fig = fig_src_attn_weights(
attn_weights=
m_v["encoder"]["attention_weights"][0][0],
event_times=event_times,
event_labels=event_labels,
idx_times=idx_times,
class_names=class_names,
epoch=epoch)
name = os.path.join("src_attn", k, src_name)
Path(os.path.join(args.plots_dir, "src_attn", k)).mkdir(
parents=True, exist_ok=True)
url = save_fig(fig=fig, name=name, args=args,
save_on_mlflow=save_on_mlflow)
if images_urls is not None:
images_urls['src_attn'].append(url)
if "decoder" in m_v.keys() \
and "attention_weights" in m_v["decoder"].keys():
if len(m_v["decoder"]["attention_weights"]) > 1:
for i in range(len(m_v["decoder"]["attention_weights"])):
fig = fig_tgt_attn_weights(
attn_weights=
m_v["decoder"]["attention_weights"][i][0],
event_times=event_times,
event_labels=event_labels,
idx_times=idx_times,
query=query,
class_names=class_names,
epoch=epoch)
name = os.path.join(
"tgt_attn", k, "head_" + str(i), tgt_name)
Path(os.path.join(
args.plots_dir,
"tgt_attn", k, "head_" + str(i))).mkdir(
parents=True, exist_ok=True)
url = save_fig(fig=fig, name=name, args=args,
save_on_mlflow=save_on_mlflow)
else:
fig = fig_tgt_attn_weights(
attn_weights=
m_v["decoder"]["attention_weights"][0][0],
event_times=event_times,
event_labels=event_labels,
idx_times=idx_times,
query=query,
class_names=class_names,
epoch=epoch)
name = os.path.join("tgt_attn", k, tgt_name)
Path(os.path.join(args.plots_dir, "tgt_attn", k)).mkdir(
parents=True, exist_ok=True)
url = save_fig(fig=fig, name=name, args=args,
save_on_mlflow=save_on_mlflow)
if images_urls is not None:
images_urls['tgt_attn'].append(url)
return images_urls
def log_figures(
model: Process,
test_loader: DataLoader,
args: Namespace,
epoch: Optional[int] = None,
images_urls: Optional[dict] = None,
save_on_mlflow: Optional[bool] = True):
models = dict()
models[model.name.replace("_", "-")] = model
if args.load_from_dir in [None, "hawkes"]:
true_model = HawkesProcess(marks=args.marks)
true_model.alpha.data = th.tensor(args.alpha)
true_model.beta.data = th.tensor(args.beta)
true_model.mu.data = th.tensor(args.mu)
true_model.to(args.device)
models["ground truth"] = true_model
batch = next(iter(test_loader))
times, labels = batch["times"], batch["labels"]
times, labels = times.to(args.device), labels.to(args.device)
length = (times != args.padding_id).sum(-1)
i = th.argmax(length)
times = times[i][:20].reshape(1, -1)
labels = labels[i][:20].reshape(1, -1, args.marks)
mask = (times != args.padding_id).type(times.dtype)
times = times * args.time_scale
window_start, window_end = get_window(times=times, window=args.window)
events = get_events(
times=times, mask=mask, labels=labels,
window_start=window_start, window_end=window_end)
if args.window is not None:
query = th.linspace(
start=0.001, end=args.window, steps=500)
else:
query = th.linspace(
start=0.001, end=float(events.window_end[0]), steps=500)
query = query.reshape(1, -1)
query = query.to(device=args.device)
event_times = events.times.cpu().detach().numpy().reshape(-1)
event_labels = events.labels.cpu().detach().numpy().reshape(
event_times.shape[0], -1)
idx_times = np.where(event_labels == 1.)[0]
event_times = event_times[idx_times]
event_labels = np.where((event_labels == 1.))[1]
unpadded = event_times != args.padding_id
event_times, event_labels = event_times[unpadded], event_labels[unpadded]
model_intensities = {
k: m.intensity(query=query, events=events) for k, m in models.items()}
model_intensities = {
k: (ints.cpu().detach().numpy()[0], mask.cpu().detach().numpy()[0])
for k, (ints, mask) in model_intensities.items()}
model_artifacts = {k:
m.artifacts(query=query, events=events) for k, m in models.items()}
model_cumulative_intensities = {
k: (v[1], v[2]) for k, v in model_artifacts.items()}
model_cumulative_intensities = {
k: (ints.cpu().detach().numpy()[0], mask.cpu().detach().numpy()[0])
for k, (ints, mask) in model_cumulative_intensities.items()}
model_artifacts = {k: v[3] for k, v in model_artifacts.items()}
with open(os.path.join(
args.save_dir, 'int_to_codes_to_plot.json'), 'r') as h:
int_to_codes_to_plot = json.load(h)
with open(os.path.join(
args.save_dir, 'int_to_codes.json'), 'r') as h:
int_to_codes = json.load(h)
with open(os.path.join(args.save_dir, 'codes_to_names.json'), 'r') as h:
codes_to_names = json.load(h)
int_to_names_to_plot = {
k: codes_to_names[v] for k, v in int_to_codes_to_plot.items()}
int_to_names = {k: codes_to_names[v] for k, v in int_to_codes.items()}
query = query.cpu().detach().numpy()[0]
model_intensities = {
k: (filter_by_mask(query, mask=mask),
filter_by_mask(ints, mask=mask))
for k, (ints, mask) in model_intensities.items()}
model_intensities = {
k: (q, ints[:, [int(i) for i in int_to_names_to_plot.keys()]])
for k, (q, ints) in model_intensities.items()}
model_cumulative_intensities = {
k: (filter_by_mask(query, mask=mask),
filter_by_mask(ints, mask=mask))
for k, (ints, mask) in model_cumulative_intensities.items()}
model_cumulative_intensities = {
k: (q, ints[:, [int(i) for i in int_to_names_to_plot.keys()]])
for k, (q, ints) in model_cumulative_intensities.items()}
images_urls = plot_attn_weights(
model_artifacts=model_artifacts,
event_times=event_times,
event_labels=event_labels,
idx_times=idx_times,
query=query,
args=args,
class_names=list(int_to_names.values()),
epoch=epoch,
images_urls=images_urls)
f, a = fig_hawkes(
intensities=model_intensities,
cumulative_intensities=model_cumulative_intensities,
event_times=event_times,
event_labels=event_labels,
class_names=int_to_names_to_plot,
epoch=epoch)
if epoch is not None:
epoch_str = get_epoch_str(epoch=epoch, max_epochs=args.train_epochs)
intensity_dir = os.path.join(args.plots_dir, "intensity")
if epoch is not None:
plot_path = os.path.join(intensity_dir, "epoch_" + epoch_str + ".jpg")
else:
plot_path = intensity_dir + ".jpg"
f.savefig(plot_path, dpi=300, bbox_inches='tight')
if save_on_mlflow:
assert epoch is not None, "Epoch must not be None with mlflow active"
mlflow.log_artifact(plot_path, "intensity/epoch_" + epoch_str)
if images_urls is not None:
images_urls['intensity'].append(plot_path)
return images_urls
| 19,536 | 38.70935 | 91 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/transformer_utils.py | import copy
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from tpp.pytorch.activations import ACTIVATIONS
from tpp.pytorch.activations import AdaptiveGumbel
from tpp.pytorch.activations import AdaptiveGumbelSoftplus
from tpp.pytorch.layers import LAYER_CLASSES
from tpp.pytorch.layers import BatchNorm1d, LayerNorm
from tpp.utils.multi_head_attention import MultiheadAttention
def _get_clones(module, n):
return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])
def generate_square_subsequent_mask(sz, device):
"""Generate a square mask for the sequence. The masked positions are
filled with float('-inf'). Unmasked positions are filled with
float(0.0).
"""
mask = (th.triu(th.ones(sz, sz)) == 1).transpose(0, 1).to(device)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(
mask == 1, float(0.0))
return mask
class TransformerEncoderNetwork(nn.Module):
"""TransformerEncoder is a stack of N encoder layers
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class
(required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(
d_model=512, nhead=8)
>>> transformer_encoder = nn.TransformerEncoder(
encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
def __init__(self, encoder_layer, num_layers, norm=None):
super(TransformerEncoderNetwork, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src, mask=None, src_key_padding_mask=None):
r"""Pass the input through the endocder layers in turn.
Args:
src: the sequnce to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = src
for i in range(self.num_layers):
output = self.layers[i](output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask)
if self.norm:
output = self.norm(output)
return output
class TransformerEncoderLayer(nn.Module):
"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper
"Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar,
Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017.
Attention is all you need. In Advances in
Neural Information Processing Systems,
pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the
feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer,
relu or gelu (default=relu).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(
self,
d_model,
nhead,
dim_feedforward,
dropout,
activation="relu",
attn_activation="softmax"):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = ACTIVATIONS[activation]
self.attn_activation = attn_activation
def forward(self, src, src_mask=None, src_key_padding_mask=None):
"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the
src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
if type(src) == tuple:
src = src[0]
src2, attn_weights = self.self_attn(
src, src, src,
attn_mask=src_mask,
need_weights=True,
key_padding_mask=src_key_padding_mask,
activation=self.attn_activation)
src = src + self.dropout1(src2)
src = self.norm1(src)
if hasattr(self, "activation"):
src2 = self.linear2(
self.dropout(
self.activation(
self.linear1(src)
)
)
)
else: # for backward compatibility
src2 = self.linear2(self.dropout(F.relu(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src, attn_weights
class TransformerDecoderNetwork(nn.Module):
"""TransformerDecoder is a stack of N decoder layers
Args:
decoder_layer: an instance of the TransformerDecoderLayer()
num_layers: the number of sub-decoder-layers in the decoder.
norm: the layer normalization component (optional).
"""
def __init__(self, decoder_layer, num_layers, norm=None):
super(TransformerDecoderNetwork, self).__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(
self,
tgt,
memory,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
update_running_stats=True):
"""Pass the inputs (and mask) through the decoder layer in turn.
Args:
tgt: the sequence to the decoder (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch
(optional).
memory_key_padding_mask: the mask for the memory keys per batch
(optional).
update_running_stats: whether running stats are updated or not
(optional).
Shape:
see the docs in Transformer class.
"""
output = tgt
for i in range(self.num_layers):
output = self.layers[i](
output, memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
update_running_stats=update_running_stats)
if self.norm:
output = self.norm(output)
return output
class TransformerDecoderLayer(nn.Module):
"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and
feedforward network. This standard decoder layer is based on the paper
"Attention Is All You Need". Ashish Vaswani, Noam Shazeer, Niki Parmar,
Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify
or implement in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model
(default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu
(default=relu).
attn_activation: 'identity', 'softmax' or 'sigmoid' to use for the
attention coefficients.
cumulative: Whether to use the cumulative form of the
attention mechanism. If this is `True`, the behaviour follows
<link to Neural TPPs paper>.
"""
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
constraint=None,
activation="relu",
attn_activation="softmax",
normalisation=None):
super(TransformerDecoderLayer, self).__init__()
self.multihead_attn = MultiheadAttention(
embed_dim=d_model,
num_heads=nhead,
dropout=dropout,
constraint=constraint)
self.constraint = constraint
self.normalisation = normalisation
# Implementation of Feedforward model
self.layer_class = nn.Linear
if self.constraint is not None:
self.layer_class = LAYER_CLASSES[self.constraint]
self.linear1 = self.layer_class(
d_model, dim_feedforward, bias=True)
self.dropout = nn.Dropout(dropout)
self.linear2 = self.layer_class(
dim_feedforward, d_model, bias=True)
if self.normalisation == "layernorm_with_running_stats":
self.norm2 = LayerNorm(d_model, use_running_stats=True)
self.norm3 = LayerNorm(d_model, use_running_stats=True)
elif self.normalisation == "layernorm":
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
elif self.normalisation == "batchnorm":
self.norm2 = BatchNorm1d(d_model)
self.norm3 = BatchNorm1d(d_model)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
if activation == "gumbel":
self.activation = AdaptiveGumbel(units=dim_feedforward)
elif activation == "gumbel_softplus":
self.activation = AdaptiveGumbelSoftplus(units=dim_feedforward)
else:
self.activation = ACTIVATIONS[activation]
self.attn_activation = attn_activation
def forward(
self,
tgt,
memory,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
update_running_stats=True):
"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch
(optional).
memory_key_padding_mask: the mask for the memory keys per batch
(optional).
update_running_stats: whether running stats are updated or not
(optional).
Shape:
see the docs in Transformer class.
"""
if type(tgt) == tuple:
tgt = tgt[0]
tgt2 = tgt
if self.normalisation is not None:
if self.normalisation == "layernorm_with_running_stats":
tgt2 = self.norm2(
tgt2, update_running_stats=update_running_stats)
else:
tgt2 = self.norm2(tgt2)
tgt2, attn_weights = self.multihead_attn(
tgt2, memory, memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
activation=self.attn_activation)
tgt = tgt + self.dropout2(tgt2)
tgt2 = tgt
if self.normalisation is not None:
if self.normalisation == "layernorm_with_running_stats":
tgt2 = self.norm3(
tgt2, update_running_stats=update_running_stats)
else:
tgt2 = self.norm2(tgt2)
if hasattr(self, "activation"):
tgt2 = self.linear2(
self.dropout(self.activation(self.linear1(tgt2))))
else: # for backward compatibility
tgt2 = self.linear2(self.dropout(F.relu(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt, attn_weights
| 13,319 | 38.525223 | 81 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/encoding.py | import torch as th
import torch.nn as nn
import math
import numpy as np
from typing import Optional, Callable
class SinusoidalEncoding(nn.Module):
def __init__(self, emb_dim, scaling):
super(SinusoidalEncoding, self).__init__()
self.emb_dim = emb_dim
self.scaling = scaling
def forward(
self,
times,
min_timescale: float = 1.0,
max_timescale: float = 1e4
):
"""
Adaptation of positional encoding to include temporal information
"""
assert self.emb_dim % 2 == 0, "hidden size must be a multiple of 2 " \
"with pos_enc, pos_dec"
num_timescales = self.emb_dim // 2
log_timescale_increment = np.log(max_timescale / min_timescale
) / (num_timescales - 1)
inv_timescales = (
min_timescale * th.exp(
th.arange(
num_timescales, dtype=th.float, device=times.device
) * -log_timescale_increment))
scaled_time = times.type(
th.FloatTensor).to(times.device) * inv_timescales.unsqueeze(
0).unsqueeze(0) * self.scaling
signal = th.cat([th.sin(scaled_time), th.cos(scaled_time)], dim=2)
return signal
def encoding_size(encoding, emb_dim):
if encoding == "times_only":
return 1
elif encoding in ["marks_only", "temporal", "learnable",
"temporal_with_labels", "learnable_with_labels"]:
return emb_dim
elif encoding == "concatenate":
return emb_dim + 1
raise ValueError("Time encoding not understood")
def event_encoder(
times: th.Tensor,
mask: th.Tensor,
encoding: str,
labels: Optional[th.Tensor] = None,
embedding_layer: Optional[Callable[[th.Tensor], th.Tensor]] = None,
temporal_enc: Optional[Callable[[th.Tensor], th.Tensor]] = None):
"""Representation encoder. Switch to determine inputs.
Args:
times: [B,L+1 or T] Delta times of events
labels: [B,L+1,D] Representations of the events
mask: [B,L+1 or T] Mask of event representations
temporal_enc: Positional encoding function
encoding: Switch. Either times_only, marks_only, concatenate,
temporal, learnable, temporal_with_labels or
learnable_with_labels"
embedding_layer: Optional, embedding layer to encoder labels
Returns:
Encoded representations [B,L+1 or T,1 or D or D+1]
Mask: [B,L+1 or T]
"""
# Returns only times
if encoding == "times_only":
return times.unsqueeze(-1), mask # [B,L+1 or T,1]
if encoding == "temporal" or encoding == "learnable":
embeddings = temporal_enc(times.unsqueeze(-1)) # [B,L+1 or T,D]
return embeddings, mask
# Returns only representation
embeddings = embedding_layer(labels)
if encoding == "marks_only":
return embeddings, mask # [B,L+1,D], [B,L+1]
# Returns times concatenated to representations
if encoding == "concatenate":
merged_representations = th.cat(
(embeddings, times.unsqueeze(-1)),
dim=-1) # [B,L+1,D+1], [B,L+1]
return merged_representations, mask
if encoding in ["temporal_with_labels", "learnable_with_labels"]:
representations = embeddings * math.sqrt(
embeddings.shape[-1])
merged_embeddings = representations + temporal_enc(
times.unsqueeze(-1)) # [B,L+1,D]
return merged_embeddings, mask # [B,L+1,D], [B,L+1]
| 3,655 | 34.153846 | 78 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/index.py | import torch as th
def take_3_by_2(x: th.Tensor, index: th.LongTensor) -> th.Tensor:
"""Index into a rank 3 tensor with a rank 2 tensor. Specifically, replace
each index I with the corresponding indexed D-dimensional vector, where I
specifies the location in L, batch-wise.
Args:
x: [B,L,D] The D-dimensional vectors to be indexed.
index: [B,I] The indexes.
Returns:
[B,I,D] The indexed tensor.
"""
b, l, d = x.shape
batch_idx_shift = th.arange(start=0, end=b, device=x.device) * l * d
batch_idx_shift = batch_idx_shift.reshape([b, 1, 1])
rep_idxs = th.arange(start=0, end=d, device=x.device).reshape([1, 1, d])
idxs_shift = batch_idx_shift + rep_idxs # [B,1,D]
idxs_shifted = index.unsqueeze(dim=-1) * d # [B,I,1]
idxs_shifted = idxs_shifted + idxs_shift # [B,I,D]
return th.take(x, index=idxs_shifted) # [B,I,D]
def take_3_by_1(x: th.Tensor, index: th.LongTensor) -> th.Tensor:
return take_3_by_2(x, index=index.unsqueeze(dim=1)).squeeze(dim=1)
def take_2_by_1(x: th.Tensor, index: th.LongTensor) -> th.Tensor:
"""Index into a rank 2 tensor with a rank 1 tensor. Specifically, replace
each index B with the corresponding indexed D-dimensional vector, where I
specifies the location in L, batch-wise.
Args:
x: [B,D] The D-dimensional vectors to be indexed.
index: [B] The indexes.
Returns:
[B] The indexed tensor.
"""
b, d = x.shape
batch_idx_shift = th.arange(start=0, end=b, device=x.device) * d # [B]
idxs_shifted = index + batch_idx_shift # [B]
return th.take(x, index=idxs_shifted) # [B]
def take_2_by_2(x: th.Tensor, index: th.LongTensor) -> th.Tensor:
"""Index into a rank 2 tensor with a rank 2 tensor. Specifically, replace
each index B with the corresponding indexed D-dimensional vector, where I
specifies the location in L, batch-wise.
Args:
x: [B,D] The D-dimensional vectors to be indexed.
index: [B,I] The indexes.
Returns:
[B,I] The indexed tensor.
"""
b, d = x.shape
batch_idx_shift = th.arange(start=0, end=b, device=x.device) * d # [B]
idxs_shifted = index + batch_idx_shift.reshape([b, 1]) # [B,I]
return th.take(x, index=idxs_shifted) # [B,I]
def unravel_index(index, shape):
out = []
for dim in reversed(shape):
out.append(index % dim)
index = index // dim
return tuple(reversed(out))
| 2,668 | 32.3625 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/history.py | import torch as th
from typing import Optional, Tuple
from tpp.utils import batch as bu
from tpp.utils.events import Events
from tpp.utils.utils import smallest_positive
from tpp.utils.index import take_2_by_2
from tpp.utils.history_bst import get_prev_times as get_prev_times_bst
def _get_rank(x: th.Tensor) -> int:
return len(x.shape)
def expand_to_rank(x: th.Tensor, rank: int, dim: int = -1) -> th.Tensor:
"""Expand a tensor to a desired rank.
Args:
x: The tensor to expand.
rank: The target rank.
dim: The dim to expand along. Defaults to `-1`.
Returns:
A tensor expanded to the given rank.
"""
x_rank = _get_rank(x)
if x_rank > rank:
raise ValueError(
"Rank of `x` ({}) greater than desired rank ({})".format(
x_rank, rank))
for _ in range(rank - x_rank):
x = x.unsqueeze(dim)
return x
def build_histories(
query: th.Tensor,
events: Events,
history_size: Optional[int] = 1) -> Tuple[th.Tensor, th.Tensor]:
"""Get the set of times corresponding to the 'history' of a query time of
fixed size.
Args:
query: [B,T] The times to create histories for.
events: [B,L] Times and labels of events to create histories from.
history_size: The size of each history. Defaults to 1.
Returns:
history (th.Tensor): [B,T,H] The history for each query time.
mask (th.Tensor): [B,T] The mask corresponding to whether a
particular query can be used or not based on the required size of
history.
"""
batch_size, max_queries = query.shape
batch_size_s, max_seq_len = events.times.shape
if batch_size_s != batch_size:
raise ValueError(
"The batch size for `query_times` "
"({}) does not match the batch size for `sequences` "
"({}).".format(batch_size, batch_size_s))
if history_size > max_seq_len:
raise ValueError(
"The chosen value for `history_size` "
"({}) is greater than the size of the largest sequence "
"({}).".format(history_size, max_seq_len))
((prev_times, prev_times_idxs),
is_event, pos_delta_mask) = get_prev_times_bst(
query=query, events=events) # ([B,T], [B,T]), [B,T], [B,T]
relative_history_idxs = th.arange(
start=1 - history_size, end=1, device=events.times.device)
batch_idxs_shift = th.arange(
start=0, end=batch_size_s, device=events.times.device) * max_seq_len
batch_idxs_shift = batch_idxs_shift.reshape([batch_size, 1, 1])
history_seq_idxs = prev_times_idxs.reshape([batch_size, max_queries, 1])
history_seq_idxs = history_seq_idxs + relative_history_idxs
batch_history_seq_idxs = history_seq_idxs + batch_idxs_shift
batch_history_seq_idxs = batch_history_seq_idxs.long() # [B,T,H]
history = th.take(events.times, batch_history_seq_idxs)
history_idxs_positive = th.prod(history_seq_idxs >= 0, dim=-1) # [B,T]
history_idxs_positive = history_idxs_positive.type(pos_delta_mask.dtype)
history_mask = pos_delta_mask * history_idxs_positive
history_mask = history_mask.type(history.dtype) # [B,T]
return history, history_mask
def get_prev_times(
query: th.Tensor,
events: Events,
allow_window: Optional[bool] = False
) -> Tuple[Tuple[th.Tensor, th.Tensor], th.Tensor, th.Tensor]:
"""For each query, get the event time that directly precedes it. If no
events precedes it (but the window start does), return the window start.
Otherwise, mask the value.
Args:
query: [B,T] Sequences of query times to evaluate the intensity
function.
events: [B,L] Times and labels of events.
allow_window: If `True`, a previous time can be the window boundary.
Defaults to `False`.
Returns:
`times` is a tuple of tensor of values [B,T] and indices, [B,T] of the
largest time value in the sequence that is strictly smaller than
the query time value, or the window. the index only event indexes
into the events. If the window is returned, it should be dealt with
explicitly at encoding/decoding time.
`is_event` is a tensor [B,T] that indicates whether the time
corresponds to an event or not (a 1 indicates an event and a 0
indicates a window boundary).
`mask` is a tensor [B,T] that indicates whether the time difference to
those times what positive or not.
"""
event_times = events.get_times(prepend_window=allow_window) # [B,L+1]
batch_size, max_seq_len = event_times.shape
time_diffs = bu.batchwise_difference(query, event_times) # [B,T,L+1]
event_mask = events.get_mask(prepend_window=allow_window) # [B,L+1]
event_mask = event_mask.reshape([batch_size, 1, max_seq_len]) # [B,1,L+1]
time_diffs = time_diffs * event_mask # [B,T,L+1]
smallest_time_diffs, mask = smallest_positive(time_diffs, dim=-1) # [B,T]
prev_times_idxs = smallest_time_diffs.indices # [B,T]
prev_times = take_2_by_2(event_times, index=prev_times_idxs) # [B,T]
if allow_window:
# If the first event shares a time with the window boundary, that the
# index returned is the index of the event, rather than the window
# boundary.
idx_is_window = (prev_times_idxs == 0).type(
prev_times_idxs.dtype) # [B,T]
do_idx_shift = events.first_event_on_window.type(
idx_is_window.dtype) # [B]
idx_shift = idx_is_window * do_idx_shift.reshape(-1, 1)
prev_times_idxs = prev_times_idxs + idx_shift
# Check the indexes in case one of the window indexes became an event.
is_event = (prev_times_idxs != 0).type(mask.dtype) # [B,T]
else:
is_event = th.ones_like(prev_times_idxs) # [B,T]
query_above_window = query > events.window_start.reshape(-1, 1)
query_below_window = query <= events.window_end.reshape(-1, 1)
query_within_window = query_above_window & query_below_window
query_within_window = query_within_window.type(mask.dtype)
mask = mask * query_within_window
return (prev_times, prev_times_idxs), is_event, mask # [B,T]
| 6,494 | 38.603659 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/utils/keras_preprocessing/sequence.py | # -*- coding: utf-8 -*-
"""Utilities for preprocessing sequence data.
Copied verbatim from
https://github.com/keras-team/keras-preprocessing/blob/master/keras_preprocessing/sequence.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
def pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.):
"""Pads sequences to the same length.
This function transforms a list of
`num_samples` sequences (lists of integers)
into a 2D Numpy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence otherwise.
Sequences that are shorter than `num_timesteps`
are padded with `value` at the end.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding is the default.
# Arguments
sequences: List of lists, where each element is a sequence.
maxlen: Int, maximum length of all sequences.
dtype: Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, 'pre' or 'post':
pad either before or after each sequence.
truncating: String, 'pre' or 'post':
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value.
# Returns
x: Numpy array with shape `(len(sequences), maxlen)`
# Raises
ValueError: In case of invalid values for `truncating` or `padding`,
or in case of invalid shape for a `sequences` entry.
"""
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
num_samples = len(sequences)
lengths = []
for x in sequences:
try:
lengths.append(len(x))
except TypeError:
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(
dtype, np.unicode_)
if isinstance(
value, six.string_types) and dtype != object and not is_dtype_str:
raise ValueError(
"`dtype` {} is not compatible with `value`'s type: {}\n"
"You should set `dtype=object` for variable length strings."
.format(dtype, type(value)))
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" '
'not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s '
'is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
| 4,092 | 35.873874 | 93 | py |
neuralTPPs | neuralTPPs-master/tpp/pytorch/models.py | import torch.nn as nn
from collections.abc import Iterable
from typing import List, Optional
from tpp.pytorch.activations import ParametricSoftplus, AdaptiveGumbel
from tpp.pytorch.activations import AdaptiveGumbelSoftplus
from tpp.pytorch.layers import LAYER_CLASSES
from tpp.pytorch.activations import ACTIVATIONS
class MLP(nn.Module):
"""Neural network which can put constraints on its weights
Args:
units: list of sizes of linear layers
activations: activation functions. Either a string or a list of strings
constraint: constraint on the weights. Either none, nonneg or softplus
dropout_rates: Either a float or a list of floats
input_shape: shape of the input
activation_final: final activation function
activation_final: final activation function
use_bias: True if we want to use bias, False otherwise.
"""
def __init__(
self,
units: List[int],
input_shape: int,
activations: List[str],
constraint: str,
dropout_rates: Optional[float] = None,
activation_final: Optional[float] = None,
use_bias=True,
**kwargs):
super(MLP, self).__init__()
self.units = units
self.activations = activations
self.constraint = constraint
self.dropout_rates = dropout_rates
self.input_shape = input_shape
self.activation_final = activation_final
self.net = nn.Sequential()
self.layers = list()
self.n_layers = len(units)
self.use_bias = use_bias
if not isinstance(self.dropout_rates, Iterable):
self.dropout_rates = [self.dropout_rates] * len(self.units)
if (not isinstance(self.activations, Iterable) or
isinstance(self.activations, str)):
self.activations = [self.activations] * len(self.units)
self.layer_class = nn.Linear
if self.constraint is not None:
self.layer_class = LAYER_CLASSES[self.constraint]
self.units = [self.input_shape] + self.units
for i in range(len(self.units) - 1):
final_layer = i == self.n_layers - 1
in_features, out_features = self.units[i], self.units[i + 1]
layer = self.layer_class(
in_features=in_features,
out_features=out_features,
bias=use_bias)
if self.constraint == "nonneg":
layer = self.layer_class(
in_features=in_features,
out_features=out_features,
bias=use_bias,
eps=1e-30)
self.layers.append(("linear{}".format(i), layer))
activation = self.activations[i]
if final_layer:
activation = self.activation_final
if activation is not None:
if activation == "parametric_softplus":
activation_fn = ParametricSoftplus(units=out_features)
elif activation == "gumbel":
activation_fn = AdaptiveGumbel(units=out_features)
elif activation == "gumbel_softplus":
activation_fn = AdaptiveGumbelSoftplus(units=out_features)
else:
activation_fn = ACTIVATIONS[activation]
self.layers.append(("activation{}".format(i), activation_fn))
dropout_rate = self.dropout_rates[i]
if dropout_rate is not None and dropout_rate > 0.0:
dropout_fn = nn.Dropout(p=dropout_rate)
self.layers.append(("dropout{}".format(i), dropout_fn))
for n, l in self.layers:
self.net.add_module(n, l)
def forward(self, inputs):
output = self.net(inputs)
return output
| 3,855 | 35.377358 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/pytorch/layers/dense.py | import torch as th
import torch.nn.functional as F
from torch import nn
class NonNegLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True, eps=0.):
super(NonNegLinear, self).__init__(in_features, out_features, bias)
self.eps = eps
self.positivify_weights()
def positivify_weights(self):
mask = (self.weight < 0).float() * - 1
mask = mask + (self.weight >= 0).float()
self.weight.data = self.weight.data * mask
def forward(self, inputs):
weight = self.weight > 0
weight = self.weight * weight.float()
self.weight.data = th.clamp(weight, min=self.eps)
return F.linear(inputs, self.weight, self.bias)
class SigmoidLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True):
super(SigmoidLinear, self).__init__(in_features, out_features, bias)
self.positivify_weights()
def positivify_weights(self):
mask = (self.weight < 0).float() * - 1
mask = mask + (self.weight >= 0).float()
self.weight.data = self.weight.data * mask
def forward(self, inputs):
weight = F.sigmoid(self.weight)
return F.linear(inputs, weight, self.bias)
class SoftPlusLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True,
beta=1., threshold=20):
super(SoftPlusLinear, self).__init__(in_features, out_features, bias)
self.beta = beta
self.threshold = threshold
self.positivify_weights()
def positivify_weights(self):
mask = (self.weight < 0).float() * - 1
mask = mask + (self.weight >= 0).float()
self.weight.data = self.weight.data * mask
def forward(self, inputs):
weight = F.softplus(
self.weight, beta=self.beta, threshold=self.threshold)
return F.linear(inputs, weight, self.bias)
LAYER_CLASSES = {
"nonneg": NonNegLinear,
"sigmoid": SigmoidLinear,
"softplus": SoftPlusLinear}
| 2,008 | 30.888889 | 77 | py |
neuralTPPs | neuralTPPs-master/tpp/pytorch/layers/batchnorm.py | import torch as th
from torch import nn
class BatchNorm1d(nn.BatchNorm1d):
def __init__(self, num_features, eps=1e-5, momentum=0.1,
affine=True, track_running_stats=True,
use_running_estimates=False,
normalise_over_final=False):
super(BatchNorm1d, self).__init__(
num_features, eps, momentum, affine, track_running_stats)
self.use_running_estimates = use_running_estimates
self.normalise_over_final = normalise_over_final
def forward(self, inputs):
self._check_input_dim(inputs)
input_rank, exponential_average_factor = len(inputs.shape), 0.0
if self.training and self.track_running_stats:
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(
self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
if self.use_running_estimates:
with th.no_grad():
running_mean, running_var = self.running_mean, self.running_var
# calculate running estimates
if self.training:
if input_rank == 2:
# use biased var in train
mean, var = inputs.mean([0]), inputs.var([0], unbiased=False)
elif input_rank == 3:
if self.normalise_over_final:
mean = inputs.mean([0, 1])
var = inputs.var([0, 1], unbiased=False)
else:
mean = inputs.mean([0, 2])
var = inputs.var([0, 2], unbiased=False)
else:
raise ValueError("Incorrect input shape.")
if self.normalise_over_final and input_rank == 3:
n = inputs.numel() / inputs.size(2)
else:
n = inputs.numel() / inputs.size(1)
with th.no_grad():
self.running_mean = (
exponential_average_factor * mean
+ (1 - exponential_average_factor) * self.running_mean)
# update running_var with unbiased var
self.running_var = (
exponential_average_factor * var * n / (n - 1)
+ (1 - exponential_average_factor) * self.running_var)
else:
mean = self.running_mean
var = self.running_var
if self.use_running_estimates:
mean, var = running_mean, running_var
if input_rank == 2:
mean = mean.unsqueeze(dim=0)
var = var.unsqueeze(dim=0)
weight = self.weight.unsqueeze(dim=0)
bias = self.bias.unsqueeze(dim=0)
elif input_rank == 3:
if self.normalise_over_final:
mean = mean.unsqueeze(dim=0).unsqueeze(dim=0)
var = var.unsqueeze(dim=0).unsqueeze(dim=0)
weight = self.weight.unsqueeze(dim=0).unsqueeze(dim=0)
bias = self.bias.unsqueeze(dim=0).unsqueeze(dim=0)
else:
mean = mean.unsqueeze(dim=0).unsqueeze(dim=-1)
var = var.unsqueeze(dim=0).unsqueeze(dim=-1)
weight = self.weight.unsqueeze(dim=0).unsqueeze(dim=-1)
bias = self.bias.unsqueeze(dim=0).unsqueeze(dim=-1)
inputs = (inputs - mean) / th.sqrt(var + self.eps)
if self.affine:
inputs = inputs * weight + bias
return inputs
| 3,663 | 39.711111 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/pytorch/layers/layernorm.py | import torch as th
from torch import nn
from typing import Optional
from tpp.utils.nnplus import non_neg_param
class LayerNorm(nn.LayerNorm):
def __init__(
self,
normalized_shape,
eps=1e-5,
elementwise_affine=True,
momentum=.1,
use_running_stats=False):
super(LayerNorm, self).__init__(
normalized_shape, eps, elementwise_affine)
assert isinstance(normalized_shape, int), (
"Only implemented this for final layer normalisation")
self.use_running_stats = use_running_stats
self.momentum = momentum
if self.use_running_stats:
self.register_buffer('running_mean', th.zeros(1))
self.register_buffer('running_var', th.ones(1))
self.register_buffer('num_batches_tracked',
th.tensor(0, dtype=th.long))
self.reset_running_stats()
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.register_parameter('num_batches_tracked', None)
def reset_running_stats(self):
if self.use_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def forward(
self,
inputs: th.Tensor,
update_running_stats: Optional[bool] = True) -> th.Tensor:
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.use_running_stats:
running_mean, running_var = self.running_mean, self.running_var
mu = th.mean(inputs, dim=-1, keepdim=True)
var = th.var(inputs, dim=-1, keepdim=True, unbiased=False)
if self.training and self.use_running_stats:
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(
self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
n = th.Tensor(list(inputs.shape[:-1]))
n = th.prod(n)
if update_running_stats:
with th.no_grad():
self.running_mean = (
exponential_average_factor * th.mean(mu)
+ (1 - exponential_average_factor
) * self.running_mean)
# update running_var with unbiased var
self.running_var = (
exponential_average_factor * th.mean(var) * n / (n - 1)
+ (1 - exponential_average_factor
) * self.running_var)
if self.use_running_stats:
mu, var = running_mean, running_var
outputs = (inputs - mu) / th.sqrt(var + self.eps)
if self.use_running_stats:
self.weight.data = non_neg_param(self.weight)
outputs = self.weight * outputs
outputs = outputs + self.bias
return outputs
| 3,327 | 35.571429 | 83 | py |
neuralTPPs | neuralTPPs-master/tpp/pytorch/layers/log.py | import torch as th
class Log(th.autograd.Function):
"""Safe implementation of x ↦ log(x)."""
@staticmethod
def forward(ctx, x):
log = x.log()
ctx.save_for_backward(x)
return log
@staticmethod
def backward(ctx, grad_output):
x, = ctx.saved_tensors
return th.clamp(grad_output / x, min=-1e1, max=1e1)
| 362 | 21.6875 | 59 | py |
neuralTPPs | neuralTPPs-master/tpp/pytorch/layers/__init__.py | from tpp.pytorch.layers.batchnorm import BatchNorm1d
from tpp.pytorch.layers.dense import LAYER_CLASSES
from tpp.pytorch.layers.dense import NonNegLinear
from tpp.pytorch.layers.dense import SigmoidLinear
from tpp.pytorch.layers.dense import SoftPlusLinear
from tpp.pytorch.layers.layernorm import LayerNorm
| 310 | 33.555556 | 52 | py |
neuralTPPs | neuralTPPs-master/tpp/pytorch/activations/gumbel.py | import math
import torch as th
from torch import nn
from tpp.utils.nnplus import non_neg_param
from tpp.utils.stability import epsilon_like
from tpp.pytorch.activations.softplus import ParametricSoftplus
class AdaptiveGumbel(nn.Module):
def __init__(self, units):
super(AdaptiveGumbel, self).__init__()
self.units = units
self.alpha = nn.Parameter(th.Tensor(self.units))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.alpha, 1.)
def forward(self, x):
if self.units != 1:
assert x.shape[-1] == self.units, (
"Final dim of input shape must match num parameters.")
target_shape = [1] * (len(x.shape) - 1) + [self.units]
self.alpha.data = non_neg_param(self.alpha)
alpha = self.alpha.reshape(target_shape)
alpha = alpha + epsilon_like(alpha)
x = th.clamp(x, max=math.log(1e30))
x = 1. + alpha * th.exp(x)
x = x ** (-1 / alpha)
x = 1. - x
return x
class AdaptiveGumbelSoftplus(nn.Module):
def __init__(self, units):
super(AdaptiveGumbelSoftplus, self).__init__()
self.units = units
self.gumbel = AdaptiveGumbel(units=self.units)
self.softplus = ParametricSoftplus(units=self.units)
def forward(self, x):
gumbel = self.gumbel(x)
softplus = self.softplus(x)
return gumbel * (1 + softplus)
def test_gumbel(do_derivative=False):
x = th.linspace(start=-10., end=100.).reshape(-1, 1)
x.requires_grad = True
gumbel = AdaptiveGumbel(units=1)
gumbel.alpha.data = th.Tensor([1.2])
y = gumbel(x)
y_unsafe = gumbel.forward_unsafe(x)
if do_derivative:
y = th.autograd.grad(y, x, grad_outputs=th.ones_like(y))[0]
# y_unsafe = th.autograd.grad(y_unsafe, x, grad_outputs=th.ones_like(y))[0]
import matplotlib.pyplot as plt
plt.plot(x.detach().numpy(), y.detach().numpy(), label="normal")
# plt.plot(x.detach().numpy(), y_unsafe.detach().numpy(), label="unsafe")
if do_derivative:
plt.title("d/dx act(x)")
else:
plt.title("act(x)")
plt.legend()
plt.show()
if __name__ == "__main__":
test_gumbel()
test_gumbel(do_derivative=True)
| 2,272 | 28.519481 | 83 | py |
neuralTPPs | neuralTPPs-master/tpp/pytorch/activations/softplus.py | import torch as th
from torch import nn
from tpp.utils.nnplus import non_neg_param
from tpp.utils.stability import epsilon_like
class MonotonicSoftplus(nn.Module):
"""A version of the softplus that stays monotonic
"""
def __init__(self, beta=1, threshold=20):
super(MonotonicSoftplus, self).__init__()
self.beta = beta
self.threshold = threshold
def forward(self, x):
mask = x < self.threshold
mask = mask.float().to(device=x.device)
return(th.log(1. + th.exp(x * mask * self.beta)) / self.beta
+ (x + 1e-8) * (1. - mask))
class ParametricSoftplus(nn.Module):
r"""Applies the element-wise function:
.. math::
\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))
SoftPlus is a smooth approximation to the ReLU function and can be used
to constrain the output of a machine to always be positive.
"""
def __init__(self, units, threshold=20):
super(ParametricSoftplus, self).__init__()
self.units = units
self.beta = nn.Parameter(th.Tensor(self.units))
self.threshold = threshold
self.softplus = nn.Softplus(beta=1., threshold=self.threshold)
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.beta)
def forward_alt(self, inputs):
# log(1 + exp (x*beta)) / beta
beta = self.beta
beta_x = beta * inputs
zeros = th.zeros_like(inputs)
exponents = th.stack([zeros, beta_x], dim=-1)
log_1_exp_xbeta = th.logsumexp(exponents, dim=-1)
return log_1_exp_xbeta / beta
def forward(self, inputs):
if self.units != 1:
assert inputs.shape[-1] == self.units, (
"Final dim of input shape must match num parameters.")
target_shape = [1] * (len(inputs.shape) - 1) + [self.units]
self.beta.data = non_neg_param(self.beta)
beta = self.beta.reshape(target_shape)
beta = beta + epsilon_like(beta)
inputs = beta * inputs
outputs = self.softplus(inputs)
outputs = outputs / beta
return outputs
def test_softplus():
units = 1
x = th.linspace(start=-10, end=10)
parametric_softplus = ParametricSoftplus(units=units)
y = parametric_softplus(x)
y2 = parametric_softplus.forward_alt(x)
import matplotlib.pyplot as plt
plt.plot(x.detach().numpy(), y.detach().numpy(), label="normal")
plt.plot(x.detach().numpy(), y.detach().numpy(), label="new")
plt.legend()
plt.show()
if __name__ == "__main__":
test_softplus()
| 2,617 | 29.8 | 75 | py |
neuralTPPs | neuralTPPs-master/tpp/pytorch/activations/pau_utils.py | from time import time
import numpy as np
import torch
import torch.nn as nn
from numpy.random.mtrand import RandomState
def get_constants_for_inits(name, seed=17):
# (numerator: [x, x.pow(1), x.pow(2), x.pow(3), x.pow(4, x.pow(5)], denominator: (x, x.pow(2), center)
if name == "pade_sigmoid_3":
return ((1 / 2, 1 / 4, 1 / 20, 1 / 240),
(0., 1 / 10),
(0,))
elif name == "pade_sigmoid_5":
return ((1 / 2, 1 / 4, 17 / 336, 1 / 224, 0, - 1 / 40320),
(0., 1 / 10),
(0,))
elif name == "pade_softplus":
return ((np.log(2), 1 / 2, (15 + 8 * np.log(2)) / 120, 1 / 30, 1 / 320),
(0.01, 1 / 15),
(0,))
elif name == "pade_optimized_avg":
return [(0.15775171, 0.74704865, 0.82560348, 1.61369449, 0.6371632, 0.10474671),
(0.38940287, 2.19787666, 0.30977883, 0.15976778),
(0.,)]
elif name == "pade_optimized_leakyrelu":
return [(3.35583603e-02, 5.05000375e-01, 1.65343934e+00, 2.01001052e+00, 9.31901999e-01, 1.52424124e-01),
(3.30847488e-06, 3.98021568e+00, 5.12471206e-07, 3.01830109e-01),
(0,)]
elif name == "pade_optimized_leakyrelu2":
return [(0.1494, 0.8779, 1.8259, 2.4658, 1.6976, 0.4414),
(0.0878, 3.3983, 0.0055, 0.3488),
(0,)]
elif name == "pade_random":
rng = RandomState(seed)
return (rng.standard_normal(5), rng.standard_normal(4), (0,))
elif name == "pade_optmized":
return [(0.0034586860882628158, -0.41459839329894876, 4.562452712166459, -16.314813244428276,
18.091669531543833, 0.23550876048241304),
(3.0849791873233383e-28, 3.2072596311394997e-27, 1.0781647589819156e-28, 11.493453196161223),
(0,)]
class PADEACTIVATION(nn.Module):
def __init__(self, init_coefficients="pade_optimized_leakyrelu"):
super(PADEACTIVATION, self).__init__()
constants_for_inits = get_constants_for_inits(init_coefficients)
self.n_numerator = len(constants_for_inits[0])
self.n_denominator = len(constants_for_inits[1])
self.weight_numerator = nn.Parameter(torch.FloatTensor(constants_for_inits[0]), requires_grad=True)
self.weight_denominator = nn.Parameter(torch.FloatTensor(constants_for_inits[1]), requires_grad=True)
def forward(self, x):
raise NotImplementedError()
class PADEACTIVATION_Function_based(PADEACTIVATION):
def __init__(self, init_coefficients="pade_optimized_leakyrelu", act_func_cls=None):
super(PADEACTIVATION_Function_based, self).__init__(init_coefficients=init_coefficients)
if act_func_cls is None:
act_func_cls = PADEACTIVATION_F_python
self.activation_function = act_func_cls.apply
def forward(self, x):
out = self.activation_function(x, self.weight_numerator, self.weight_denominator)
return out
class PADEACTIVATION_F_python(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight_numerator, weight_denominator):
ctx.save_for_backward(input, weight_numerator, weight_denominator)
z = input
clamped_n = weight_numerator
clamped_d = weight_denominator.abs()
numerator = z.mul(clamped_n[1]) + clamped_n[0]
xps = list()
# xp = z
xps.append(z)
for c_n in clamped_n[2:]:
xp = xps[-1].mul(z)
xps.append(xp)
numerator = numerator + c_n.mul(xp)
denominator = z.abs() * clamped_d[0] + 1
for idx, c_d in enumerate(clamped_d[1:]):
xp = xps[idx + 1].abs()
denominator = denominator + c_d.mul(xp)
return numerator.div(denominator)
@staticmethod
def backward(ctx, grad_output):
x, weight_numerator, weight_denominator = ctx.saved_tensors
clamped_n = weight_numerator # .clamp(min=0, max=1.)
clamped_d = weight_denominator.abs()
numerator = x.mul(clamped_n[1]) + clamped_n[0]
xps = list()
# xp = z
xps.append(x)
for c_n in clamped_n[2:]:
xp = xps[-1].mul(x)
xps.append(xp)
numerator = numerator + c_n.mul(xp)
denominator = x.abs() * clamped_d[0] + 1
for idx, c_d in enumerate(clamped_d[1:]):
xp = xps[idx + 1].abs()
denominator = denominator + c_d.mul(xp)
xps = torch.stack(xps)
P = numerator
Q = denominator
dfdn = torch.cat(((1.0 / Q).unsqueeze(dim=0), xps.div(Q)))
dfdd_tmp = (-P.div((Q.mul(Q))))
dfdd = dfdd_tmp.mul(xps[0:clamped_d.size()[0]].abs())
for idx in range(dfdd.shape[0]):
dfdd[idx] = dfdd[idx].mul(weight_denominator[idx].sign())
dfdx1 = 2.0 * clamped_n[2].mul(xps[0]) + clamped_n[1]
for idx, xp in enumerate(xps[1:clamped_n.size()[0] - 2]):
i = (idx + 3)
dfdx1 += i * clamped_n[i].mul(xp)
dfdx1 = dfdx1.div(Q)
dfdx2 = 2.0 * clamped_d[1].mul(xps[0].abs()) + clamped_d[0]
for idx, xp in enumerate(xps[1:clamped_d.size()[0] - 1]):
i = (idx + 3)
dfdx2 += i * clamped_d[idx + 2].mul(xp.abs())
dfdx2_ = dfdx2.mul(xps[0].sign())
dfdx2 = dfdx2_.mul(dfdd_tmp)
dfdx = dfdx1 + dfdx2
rdfdn = torch.mul(grad_output, dfdn)
rdfdd = torch.mul(grad_output, dfdd)
dfdn = rdfdn
dfdd = rdfdd
for _ in range(len(P.shape)):
dfdn = dfdn.sum(-1)
dfdd = dfdd.sum(-1)
dfdx = grad_output.mul(dfdx)
return dfdx, dfdn, dfdd
def exec_act(x, actv):
forward = 0
backward = 0
start = time()
for _ in range(10000):
new_x = actv(x)
torch.cuda.synchronize()
forward += time() - start
start = time()
for _ in range(10000):
(new_x.sum()).backward(retain_graph=True)
torch.cuda.synchronize()
backward += time() - start
print('Forward: {:.3f} us | Backward {:.3f} us'.format(forward * 1e6 / 1e5, backward * 1e6 / 1e5))
return new_x.cpu().detach().numpy()
| 6,193 | 33.220994 | 113 | py |
neuralTPPs | neuralTPPs-master/tpp/pytorch/activations/__init__.py | from torch import nn
from tpp.pytorch.activations.arctan import Arctan
from tpp.pytorch.activations.gumbel import AdaptiveGumbel
from tpp.pytorch.activations.gumbel import AdaptiveGumbelSoftplus
from tpp.pytorch.activations.pau import PAU
from tpp.pytorch.activations.softplus import MonotonicSoftplus
from tpp.pytorch.activations.softplus import ParametricSoftplus
ACTIVATIONS = {
"arctan": Arctan(),
'celu': nn.CELU(),
'elu': nn.ELU(),
'leaky_relu': nn.LeakyReLU(),
'log_softmax': nn.LogSoftmax(dim=-1),
'monotonic_softplus': MonotonicSoftplus(),
'pau': PAU(init_coefficients="pade_sigmoid_3"),
'relu': nn.ReLU(),
'relu6': nn.ReLU6(),
'sigmoid': nn.Sigmoid(),
'softmax': nn.Softmax(dim=-1),
'softplus': nn.Softplus(),
'softsign': nn.Softsign(),
'tanh': nn.Tanh(),
'tanhshrink': nn.Tanhshrink()}
| 860 | 30.888889 | 65 | py |
neuralTPPs | neuralTPPs-master/tpp/pytorch/activations/pau.py | """
Taken from
https://github.com/ml-research/pau/blob/master/pau/cuda/python_imp/Pade.py
"""
import torch as th
from tpp.pytorch.activations.pau_utils import PADEACTIVATION_Function_based
class PAU(PADEACTIVATION_Function_based):
def __init__(
self,
init_coefficients="pade_optimized_leakyrelu",
monotonic=False):
super(PAU, self).__init__(init_coefficients=init_coefficients)
self.monotonic = monotonic
if self.monotonic:
assert self.n_numerator > (self.n_denominator + 1)
def forward(self, x):
# if self.monotonic:
# weight_numerator_subset = self.weight_numerator[
# 1:self.n_denominator + 1]
# denom_greater = self.weight_denominator > weight_numerator_subset
#
# a = 0
out = self.activation_function(
x,
self.weight_numerator,
self.weight_denominator)
return out
def test_pau(do_derivative=False):
x = th.linspace(start=-10., end=10.).reshape(-1, 1)
x.requires_grad = True
pau = PAU(init_coefficients="pade_sigmoid_3", monotonic=True)
y = pau(x)
if do_derivative:
y = th.autograd.grad(y, x, grad_outputs=th.ones_like(y))[0]
import matplotlib.pyplot as plt
plt.plot(x.detach().numpy(), y.detach().numpy())
if do_derivative:
plt.title("d/dx act(x)")
else:
plt.title("act(x)")
plt.show()
if __name__ == "__main__":
test_pau()
test_pau(do_derivative=True)
| 1,563 | 26.928571 | 79 | py |
neuralTPPs | neuralTPPs-master/tpp/pytorch/activations/arctan.py | import torch as th
import torch.nn as nn
class Arctan(nn.Module):
"""Arctan activation function
"""
def __init__(self):
super(Arctan, self).__init__()
def forward(self, x):
return th.atan(x)
| 226 | 16.461538 | 38 | py |
more-or-let | more-or-let-master/pydrobert/mol/callbacks.py | '''Callbacks and callback-related periphery'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from csv import DictReader
from six.moves.cPickle import dump
import numpy as np
from keras.callbacks import Callback
from keras.callbacks import EarlyStopping
from keras.callbacks import History
__author__ = "Sean Robertson"
__email__ = "sdrobert@cs.toronto.edu"
__license__ = "Apache 2.0"
__copyright__ = "Copyright 2017 Sean Robertson"
__all__ = [
'ExtendedHistory',
'ExtendedEarlyStopping',
'RandomStateCheckpoint'
]
try:
FileNotFoundError()
except NameError:
FileNotFoundError = IOError
class ExtendedHistory(History):
'''History callback, enhanced
This callback keeps track of additional metadata and injects some
information into the log dictionary.
``csv_path`` is used to restore the state of the ExtendedHistory
from a previous run.
At the beginning of training, ``ExtendedHistory`` adds the following
info to the log (set to None if not available):
prev_monitored : str
The value stored in 'monitored' from previous epochs
prev_best: float
The value stored in 'best' from previous epochs
prev_mode : str
The value stored in 'mode' from previous epochs
prev_patience : int
The value stored in 'patience' (cast to an int) from the last
epoch
prev_min_delta : float
The value stored in 'min_delta' (cast to a float) from the last
epoch
prev_wait : int
The value stored in 'wait' (cast to an int) from the last epoch
prev_training_stage : str
The value stored in 'training_stage' from the last epoch
training_stage : str
The current setting of 'training_stage'
At the end of an epoch, `ExtendedHistory` adds the following info to
the log (set to None if not available):
training_stage : str
The current setting of 'training_stage'
model_path : str
The model path to save to, whose value is determined by
formatting ``model_formatter`` with the values in the current
log
Additional entries can be added to the log by setting additional
keyword arguments at intialization. They will be perpetuated by
``csv_path``
.. warning:: Do not add any additional metadata
Parameters
----------
csv_path : str, optional
The file to read history from
strict : bool
When reading the CSV, whether to sanity check the CSV
delimiter : str
The delimiter used to delimit fields in the csv
model_formatter : str, optional
The format string used when (if) writing model files
training_stage : str
This run's stage of training
additional_metadata : Keyword arguments, optional
Additional entries to be added to the log. They are considered
static over the course of the experiment.
'''
def __init__(
self, csv_path=None, strict=True, delimiter=',',
model_formatter=None, training_stage='adam',
**additional_metadata):
self.strict = strict
self.delimiter = delimiter
self.model_formatter = model_formatter
self.training_stage = training_stage
self.additional_metadata = additional_metadata
self.csv_path = csv_path
self.epoch = None
self.history = None
def load_csv_history(self):
'''Load up the csv history'''
if self.csv_path:
self.csv_epoch = -float('inf')
self.csv_history = dict()
one_row = False
try:
with open(self.csv_path, 'r') as file_obj:
reader = DictReader(file_obj, delimiter=self.delimiter)
for row in reader:
one_row = True
epoch = int(row.pop('epoch'))
if epoch < self.csv_epoch:
continue
elif self.strict and epoch == self.csv_epoch:
raise ValueError(
'Epoch {} occurs twice in csv {}'.format(
epoch, self.csv_path))
self.csv_epoch = epoch
for key, value in row.items():
if value is None and key in self.csv_history:
continue # skip missing entries
if key in (
'best', 'min_delta', 'loss',
'val_loss', 'acc', 'val_acc', 'lr'):
value = float(value)
elif key in ('patience', 'wait'):
value = int(value)
self.csv_history[key] = value
if not one_row:
raise FileNotFoundError() # pretend the file doesn't exist
for key, value in self.additional_metadata.items():
if self.strict and key not in self.csv_history:
raise ValueError(
'The keyword "{}" was present in initialization '
'but not in the csv file {}'.format(
key, self.csv_path))
act_value = self.csv_history[key]
if self.strict:
try:
close = np.isclose(value, act_value)
except TypeError:
close = (str(value) == str(act_value))
if not close:
raise ValueError(
'Expected "{}" to have the value "{}"; got '
'"{}" from csv {}'.format(
key, value, act_value, self.csv_path))
except FileNotFoundError:
self.csv_history = self.additional_metadata
else:
self.csv_epoch = -float('inf')
self.csv_history = self.additional_metadata
def get_last_epoch(self):
'''Get the last recorded epoch'''
if self.epoch:
return self.epoch[-1]
else:
self.load_csv_history()
return self.csv_epoch
def get_last(self, key):
'''Get the last recorded value of a specific property'''
if key == 'epoch':
return self.get_last_epoch()
if self.epoch and key in self.history:
for value in self.history[key][::-1]:
if value is not None:
return value
self.load_csv_history()
return self.csv_history.get(key)
def on_train_begin(self, logs=None):
logs = logs if logs is not None else dict()
self.epoch = []
self.history = dict()
self.load_csv_history()
for key in (
'monitor', 'best', 'mode', 'patience', 'min_delta', 'wait',
'training_stage'):
if key in self.csv_history:
logs['prev_' + key] = self.csv_history[key]
logs['training_stage'] = self.training_stage
def on_epoch_end(self, epoch, logs=None):
logs = logs if logs is not None else dict()
if epoch in self.epoch:
raise ValueError('Epoch {} occurred twice!'.format(epoch))
# inject all logs but model_path
if 'training_stage' in self.csv_history or self.training_stage:
logs['training_stage'] = self.training_stage
for key, value in self.additional_metadata.items():
logs[key] = value
# now handle model_path
if 'model_path' in self.csv_history or self.model_formatter:
if self.model_formatter:
logs['model_path'] = self.model_formatter.format(
epoch=epoch + 1, **logs)
else:
logs['model_path'] = self.model_formatter
# update history
self.epoch.append(epoch)
for key, value in logs.items():
self.history.setdefault(key, []).append(value)
class ExtendedEarlyStopping(EarlyStopping):
'''Early stopping, enhanced
This subclass of ``EarlyStopping`` puts its hyperparameters in the
logs after every epoch. They include
- the value monitored ("monitor")
- the "mode", i.e. how to compare monitored values (one of "min",
"max", or "auto")
- the best monitored value seen ("best")
- the minimum-magnitude difference from best that can be considered
an improvement ("min_delta")
- "patience", i.e. the number of epochs to wait without improvement
before early stopping
- the number of epochs already waited without improvement ("wait")
When placed after ``ExtendedHistory`` in a callback list, it can
recover its hyperparameters from previous epochs at the beginning
of training. These will clobber whatever is specified here on
initialization.
Parameters
----------
monitor : str
min_delta : float
patience : int
verbose : int
Verbosity mode (``verbose > 0`` is verbose)
mode : {'min', 'max', 'auto'}
In "min" mode, training will stop when the quantity monitored
has stopped decreasing; in "max" mode it will stop when the
quantity monitored has stopped increasing; in "auto" mode,
the direction is automatically inferred from the name of the
monitored quantity.
reset_on_new_training_stage : bool
If, on_train_begin, the logged "prev_training_stage" has been
set and does not match the logged "training_stage", the training
stage will be reset
Attributes
----------
best : float
monitor : str
patience : int
verbose : int
min_delta : float
wait : int
mode : {'min', 'max'}
stopped_epoch : int
reset_on_new_training_stage : bool
mode : str
hyperparams : tuple
A sequence of names of hyperparameters that are stored to logs
on each epoch end
'''
def __init__(
self, monitor='val_loss', min_delta=0, patience=0, verbose=0,
mode='auto', reset_on_new_training_stage=True):
self.reset_on_new_training_stage = reset_on_new_training_stage
self.hyperparams = (
'monitor', 'best', 'mode', 'patience', 'min_delta', 'wait')
super(ExtendedEarlyStopping, self).__init__(
monitor=monitor,
min_delta=min_delta,
patience=patience,
verbose=verbose,
mode=mode,
)
self.mode = 'min' if self.monitor_op == np.less else 'max'
def on_train_begin(self, logs=None):
logs = logs if logs is not None else dict()
super(ExtendedEarlyStopping, self).on_train_begin(logs)
verb_message = ''
if any(logs.get('prev_' + hp) is None for hp in self.hyperparams):
verb_message += 'No record of prior early stopping. '
elif self.reset_on_new_training_stage and (
logs.get('prev_training_stage') is None or
logs['prev_training_stage'] != logs.get('training_stage')):
verb_message += 'New training stage. Resetting early stopping. '
else:
verb_message += 'Loading previous early stopping hyperparams. '
for hyperparam in self.hyperparams:
setattr(self, hyperparam, logs['prev_' + hyperparam])
if self.mode == 'min':
self.monitor_op = np.less
self.min_delta *= -1 # stored as absolute value
else:
self.monitor_op = np.greater
if self.wait >= self.patience:
self.stopped_epoch = -1
self.model.stop_training = True
if self.verbose > 0:
for hyperparam in self.hyperparams:
verb_message += '{}={}, '.format(
hyperparam, getattr(self, hyperparam))
print(verb_message)
def on_epoch_end(self, epoch, logs=None):
logs = logs if logs is not None else dict()
super(ExtendedEarlyStopping, self).on_epoch_end(epoch, logs=logs)
for hyperparam in self.hyperparams:
logs[hyperparam] = getattr(self, hyperparam)
# this is made negative when we monitor with 'min', but we want
# it to look like our setting, so keep it max
logs['min_delta'] = abs(self.min_delta)
class RandomStateCheckpoint(Callback):
'''Save the state of numpy's randomizer every epoch
Parameters
----------
rng_formatter : str
An expression that can be formatted with the logs that will
dictate where the state should be saved to
rng : numpy.random.RandomState, optional
The ``RandomState`` object to save the state from. If not set,
numpy's global randomizer will be used (not recommended)
verbose : {0, 1}, optional
0: quiet. 1: loud
log_entry : str, optional
If set, a log entry will be added with the formatted path of
this epoch's state with this key
Attributes
----------
rng_formatter : str
rng : numpy.random.RandomState or None
verbose : {0, 1}
log_entry : str or None
'''
def __init__(self, rng_formatter, rng=None, verbose=0, log_entry=None):
super(RandomStateCheckpoint, self).__init__()
self.rng_formatter = rng_formatter
self.rng = rng
self.verbose = verbose
self.log_entry = log_entry
def on_epoch_end(self, epoch, logs=None):
logs = logs if logs is not None else dict()
rng_path = self.rng_formatter.format(epoch=epoch + 1, **logs)
if self.log_entry:
logs[self.log_entry] = rng_path
if self.rng is None:
state = np.random.get_state()
else:
state = self.rng.get_state()
with open(rng_path, 'wb') as rng_file:
dump(state, rng_file)
if self.verbose > 0:
print('\nEpoch {:05d}: saving rng state to {}'.format(
epoch + 1, rng_path))
| 14,210 | 37.099196 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.