code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import json
import logging
import os
import numpy as np
import torch
from lavis.common.dist_utils import is_main_process
from lavis.common.registry import registry
from lavis.tasks.base_task import BaseTask
@registry.register_task("retrieval")
class RetrievalTask(BaseTask):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
@classmethod
def setup_task(cls, cfg):
run_cfg = cfg.run_cfg
return cls(cfg=run_cfg)
def evaluation(self, model, data_loader, **kwargs):
# score_i2t, score_t2i = model.compute_sim_matrix(model, data_loader)
score_i2t, score_t2i = model.compute_sim_matrix(data_loader, task_cfg=self.cfg)
if is_main_process():
eval_result = self._report_metrics(
score_i2t,
score_t2i,
data_loader.dataset.txt2img,
data_loader.dataset.img2txt,
)
logging.info(eval_result)
else:
eval_result = None
return eval_result
def after_evaluation(self, val_result, **kwargs):
return val_result
@staticmethod
@torch.no_grad()
def _report_metrics(scores_i2t, scores_t2i, txt2img, img2txt):
# Images->Text
ranks = np.zeros(scores_i2t.shape[0])
for index, score in enumerate(scores_i2t):
inds = np.argsort(score)[::-1]
# Score
rank = 1e20
for i in img2txt[index]:
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
# Compute metrics
tr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
tr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
tr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
# Text->Images
ranks = np.zeros(scores_t2i.shape[0])
for index, score in enumerate(scores_t2i):
inds = np.argsort(score)[::-1]
ranks[index] = np.where(inds == txt2img[index])[0][0]
# Compute metrics
ir1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
ir5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
ir10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
tr_mean = (tr1 + tr5 + tr10) / 3
ir_mean = (ir1 + ir5 + ir10) / 3
r_mean = (tr_mean + ir_mean) / 2
agg_metrics = (tr1 + tr5 + tr10) / 3
eval_result = {
"txt_r1": tr1,
"txt_r5": tr5,
"txt_r10": tr10,
"txt_r_mean": tr_mean,
"img_r1": ir1,
"img_r5": ir5,
"img_r10": ir10,
"img_r_mean": ir_mean,
"r_mean": r_mean,
"agg_metrics": agg_metrics,
}
with open(
os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a"
) as f:
f.write(json.dumps(eval_result) + "\n")
return eval_result | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/tasks/retrieval.py | 0.497803 | 0.212089 | retrieval.py | pypi |
import json
import os
import logging
import numpy as np
import torch
from lavis.common.dist_utils import main_process
from lavis.common.registry import registry
from lavis.tasks.base_task import BaseTask
@registry.register_task("multimodal_classification")
class MultimodalClassificationTask(BaseTask):
def __init__(self):
super().__init__()
def valid_step(self, model, samples):
results = []
outputs = model.predict(samples)
predictions = outputs["predictions"]
targets = outputs["targets"]
predictions = predictions.max(1)[1].cpu().numpy()
targets = targets.cpu().numpy()
indices = samples[self.inst_id_key]
for pred, tgt, index in zip(predictions, targets, indices):
if isinstance(index, torch.Tensor):
index = index.item()
results.append(
{
self.inst_id_key: index,
"prediction": pred.item(),
"target": tgt.item(),
}
)
return results
def after_evaluation(self, val_result, split_name, epoch, **kwargs):
eval_result_file = self.save_result(
result=val_result,
result_dir=registry.get_path("result_dir"),
filename="{}_epoch{}".format(split_name, epoch),
remove_duplicate=self.inst_id_key,
)
metrics = self._report_metrics(
eval_result_file=eval_result_file, split_name=split_name
)
return metrics
@main_process
def _report_metrics(self, eval_result_file, split_name):
results = json.load(open(eval_result_file))
predictions = np.array([res["prediction"] for res in results])
targets = np.array([res["target"] for res in results])
accuracy = (targets == predictions).sum() / targets.shape[0]
metrics = {"agg_metrics": accuracy, "acc": accuracy}
log_stats = {split_name: {k: v for k, v in metrics.items()}}
with open(
os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a"
) as f:
f.write(json.dumps(log_stats) + "\n")
logging.info(metrics)
return metrics | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/tasks/multimodal_classification.py | 0.607663 | 0.160759 | multimodal_classification.py | pypi |
import re
from lavis.common.registry import registry
from lavis.processors.base_processor import BaseProcessor
from lavis.processors.randaugment import RandomAugment
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
import os
from itertools import chain
import numpy as np
import torch
from transformers import GPT2Tokenizer
SPECIAL_TOKENS_DICT = {
"bos_token": "<bos>",
"eos_token": "<eos>",
"additional_special_tokens": ["<speaker1>", "<speaker2>", "<video>", "<cap>"],
"pad_token": "<pad>",
}
SPECIAL_TOKENS = [
"<bos>",
"<eos>",
"<speaker1>",
"<speaker2>",
"<cap>",
"<video>",
"<pad>",
]
class GPTVideoFeatureBaseProcessor(BaseProcessor):
def __init__(self, visual_ft=["i3d_rgb"], audio_ft=["vggish"]):
self.visual_ft = visual_ft
self.audio_ft = audio_ft
@registry.register_processor("gpt_dialogue")
class GPTDialogueProcessor(BaseProcessor):
def __init__(self, max_turns=3, use_caption=True):
self.max_turns = max_turns
self.use_caption = use_caption
self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
self.tokenizer.add_special_tokens(SPECIAL_TOKENS_DICT)
def sample_sequence(self, caption, history, answer):
bos, eos, speaker1, speaker2, cap = self.tokenizer.convert_tokens_to_ids(
SPECIAL_TOKENS[:-2]
)
instance = {}
sequence = [caption] + history + [answer]
sequence = [s + [eos] for s in sequence]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [cap] * len(sequence[0]) + [
speaker2 if i % 2 else speaker1
for i, s in enumerate(sequence[1:])
for _ in s
]
instance["labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + sequence[-1]
assert len(instance["input_ids"]) == len(instance["token_type_ids"])
assert len(instance["token_type_ids"]) == len(instance["labels"])
for k, v in instance.items():
instance[k] = torch.Tensor(v).long()
return instance
def padding(self, seq, pad_token=-1):
if pad_token == -1:
pad_token = self.tokenizer.pad_token_id
padded_seq = torch.nn.utils.rnn.pad_sequence(
seq, batch_first=True, padding_value=pad_token
)
return padded_seq
def get_attention_mask(self, seq, pad_token=-1):
if pad_token == -1:
pad_token = self.tokenizer.pad_token_id
return seq != pad_token
def __call__(self, ann):
if self.use_caption:
caption = " ".join([ann["caption"], ann["summary"]])
caption = self.tokenizer.encode(caption)
else:
caption = []
dial_history = []
for turn in ann["dialog"][-self.max_turns :]:
dial_history.append(turn["question"])
dial_history.append(turn["answer"])
dial_history.append(ann["question"])
dial_history = [self.tokenizer.encode(t) for t in dial_history]
answer = self.tokenizer.encode(ann["answer"])
item = self.sample_sequence(caption, dial_history, answer)
return item
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
use_caption = cfg.get("use_caption", True)
max_turns = cfg.get("max_turns", 3)
return cls(max_turns=max_turns, use_caption=use_caption)
@registry.register_processor("gpt_video_ft")
class GPTVideoFeatureProcessor(GPTVideoFeatureBaseProcessor):
def __init__(self, visual_ft, audio_ft):
super().__init__(visual_ft, audio_ft)
self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
self.tokenizer.add_special_tokens(SPECIAL_TOKENS_DICT)
def padding(self, seq):
padded_seq = torch.nn.utils.rnn.pad_sequence(
seq, batch_first=True, padding_value=1.0
)
return padded_seq
def get_attention_mask(self, seq):
return torch.sum(seq != 1, dim=2) != 0
def __call__(self, ft_root, vname):
all_ft = []
for ft_name in self.visual_ft:
ft_path = os.path.join(ft_root, ft_name, vname)
all_ft.append(np.load(ft_path + ".npy"))
for ft_name in self.audio_ft:
ft_path = os.path.join(ft_root, ft_name, vname)
all_ft.append(np.load(ft_path + ".npy"))
min_len = min([len(ft) for ft in all_ft])
# TODO: use other sampling method (e.g. uniform sampling)
sampled_ft = [ft[:min_len] for ft in all_ft]
sampled_ft = np.concatenate(sampled_ft, axis=1)
item = {}
item["video_fts"] = torch.Tensor(sampled_ft)
video_type_token = self.tokenizer.convert_tokens_to_ids("<video>")
item["token_type_ids"] = torch.Tensor(
[video_type_token] * len(sampled_ft)
).long()
return item
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
visual_ft = cfg.get("visual_ft", ["i3d_rgb"])
audio_ft = cfg.get("audio_ft", ["vggish"])
return cls(visual_ft=visual_ft, audio_ft=audio_ft) | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/processors/gpt_processors.py | 0.545407 | 0.282066 | gpt_processors.py | pypi |
import torch
from lavis.common.registry import registry
from lavis.datasets.data_utils import load_video
from lavis.processors import transforms_video
from lavis.processors.base_processor import BaseProcessor
from lavis.processors.randaugment import VideoRandomAugment
from lavis.processors import functional_video as F
from omegaconf import OmegaConf
from torchvision import transforms
MAX_INT = registry.get("MAX_INT")
class AlproVideoBaseProcessor(BaseProcessor):
def __init__(self, mean=None, std=None, n_frms=MAX_INT):
if mean is None:
mean = (0.48145466, 0.4578275, 0.40821073)
if std is None:
std = (0.26862954, 0.26130258, 0.27577711)
self.normalize = transforms_video.NormalizeVideo(mean, std)
self.n_frms = n_frms
class ToUint8(object):
def __init__(self):
pass
def __call__(self, tensor):
return tensor.to(torch.uint8)
def __repr__(self):
return self.__class__.__name__
class ToTHWC(object):
"""
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (C, T, H, W)
Return:
clip (torch.tensor, dtype=torch.float): Size is (T, H, W, C)
"""
def __init__(self):
pass
def __call__(self, tensor):
return tensor.permute(1, 2, 3, 0)
def __repr__(self):
return self.__class__.__name__
class ResizeVideo(object):
def __init__(self, target_size, interpolation_mode="bilinear"):
self.target_size = target_size
self.interpolation_mode = interpolation_mode
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: central cropping of video clip. Size is
(C, T, crop_size, crop_size)
"""
return F.resize(clip, self.target_size, self.interpolation_mode)
def __repr__(self):
return self.__class__.__name__ + "(resize_size={0})".format(self.target_size)
@registry.register_processor("alpro_video_train")
class AlproVideoTrainProcessor(AlproVideoBaseProcessor):
def __init__(
self,
image_size=384,
mean=None,
std=None,
min_scale=0.5,
max_scale=1.0,
n_frms=MAX_INT,
):
super().__init__(mean=mean, std=std, n_frms=n_frms)
self.image_size = image_size
self.transform = transforms.Compose(
[
# Video size is (C, T, H, W)
transforms_video.RandomResizedCropVideo(
image_size,
scale=(min_scale, max_scale),
interpolation_mode="bicubic",
),
transforms_video.RandomHorizontalFlipVideo(),
ToTHWC(), # C, T, H, W -> T, H, W, C
VideoRandomAugment(
2,
5,
augs=[
"Identity",
"AutoContrast",
"Brightness",
"Sharpness",
"Equalize",
"ShearX",
"ShearY",
"TranslateX",
"TranslateY",
"Rotate",
],
),
ToUint8(),
transforms_video.ToTensorVideo(), # T, H, W, C -> C, T, H, W
self.normalize,
]
)
def __call__(self, vpath):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: video clip after transforms. Size is (C, T, size, size).
"""
clip = load_video(
video_path=vpath,
n_frms=self.n_frms,
height=self.image_size,
width=self.image_size,
sampling="headtail",
)
return self.transform(clip)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 256)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
min_scale = cfg.get("min_scale", 0.5)
max_scale = cfg.get("max_scale", 1.0)
n_frms = cfg.get("n_frms", MAX_INT)
return cls(
image_size=image_size,
mean=mean,
std=std,
min_scale=min_scale,
max_scale=max_scale,
n_frms=n_frms,
)
@registry.register_processor("alpro_video_eval")
class AlproVideoEvalProcessor(AlproVideoBaseProcessor):
def __init__(self, image_size=256, mean=None, std=None, n_frms=MAX_INT):
super().__init__(mean=mean, std=std, n_frms=n_frms)
self.image_size = image_size
# Input video size is (C, T, H, W)
self.transform = transforms.Compose(
[
# frames will be resized during decord loading.
ToUint8(), # C, T, H, W
ToTHWC(), # T, H, W, C
transforms_video.ToTensorVideo(), # C, T, H, W
self.normalize, # C, T, H, W
]
)
def __call__(self, vpath):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: video clip after transforms. Size is (C, T, size, size).
"""
clip = load_video(
video_path=vpath,
n_frms=self.n_frms,
height=self.image_size,
width=self.image_size,
)
return self.transform(clip)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 256)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
n_frms = cfg.get("n_frms", MAX_INT)
return cls(image_size=image_size, mean=mean, std=std, n_frms=n_frms) | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/processors/alpro_processors.py | 0.875415 | 0.200871 | alpro_processors.py | pypi |
import numbers
import random
from torchvision.transforms import (
RandomCrop,
RandomResizedCrop,
)
import lavis.processors.functional_video as F
__all__ = [
"RandomCropVideo",
"RandomResizedCropVideo",
"CenterCropVideo",
"NormalizeVideo",
"ToTensorVideo",
"RandomHorizontalFlipVideo",
]
class RandomCropVideo(RandomCrop):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, OH, OW)
"""
i, j, h, w = self.get_params(clip, self.size)
return F.crop(clip, i, j, h, w)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size})"
class RandomResizedCropVideo(RandomResizedCrop):
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation_mode="bilinear",
):
if isinstance(size, tuple):
if len(size) != 2:
raise ValueError(
f"size should be tuple (height, width), instead got {size}"
)
self.size = size
else:
self.size = (size, size)
self.interpolation_mode = interpolation_mode
self.scale = scale
self.ratio = ratio
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, H, W)
"""
i, j, h, w = self.get_params(clip, self.scale, self.ratio)
return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})"
class CenterCropVideo:
def __init__(self, crop_size):
if isinstance(crop_size, numbers.Number):
self.crop_size = (int(crop_size), int(crop_size))
else:
self.crop_size = crop_size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: central cropping of video clip. Size is
(C, T, crop_size, crop_size)
"""
return F.center_crop(clip, self.crop_size)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(crop_size={self.crop_size})"
class NormalizeVideo:
"""
Normalize the video clip by mean subtraction and division by standard deviation
Args:
mean (3-tuple): pixel RGB mean
std (3-tuple): pixel RGB standard deviation
inplace (boolean): whether do in-place normalization
"""
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, clip):
"""
Args:
clip (torch.tensor): video clip to be normalized. Size is (C, T, H, W)
"""
return F.normalize(clip, self.mean, self.std, self.inplace)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(mean={self.mean}, std={self.std}, inplace={self.inplace})"
class ToTensorVideo:
"""
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimensions of clip tensor
"""
def __init__(self):
pass
def __call__(self, clip):
"""
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
Return:
clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
"""
return F.to_tensor(clip)
def __repr__(self) -> str:
return self.__class__.__name__
class RandomHorizontalFlipVideo:
"""
Flip the video clip along the horizonal direction with a given probability
Args:
p (float): probability of the clip being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Size is (C, T, H, W)
Return:
clip (torch.tensor): Size is (C, T, H, W)
"""
if random.random() < self.p:
clip = F.hflip(clip)
return clip
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})" | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/processors/transforms_video.py | 0.87289 | 0.220217 | transforms_video.py | pypi |
import re
from lavis.common.registry import registry
from lavis.processors.base_processor import BaseProcessor
from lavis.processors.randaugment import RandomAugment
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
class BlipImageBaseProcessor(BaseProcessor):
def __init__(self, mean=None, std=None):
if mean is None:
mean = (0.48145466, 0.4578275, 0.40821073)
if std is None:
std = (0.26862954, 0.26130258, 0.27577711)
self.normalize = transforms.Normalize(mean, std)
@registry.register_processor("blip_caption")
class BlipCaptionProcessor(BaseProcessor):
def __init__(self, prompt="", max_words=50):
self.prompt = prompt
self.max_words = max_words
def __call__(self, caption):
caption = self.prompt + self.pre_caption(caption)
return caption
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
prompt = cfg.get("prompt", "")
max_words = cfg.get("max_words", 50)
return cls(prompt=prompt, max_words=max_words)
def pre_caption(self, caption):
caption = re.sub(
r"([.!\"()*#:;~])",
" ",
caption.lower(),
)
caption = re.sub(
r"\s{2,}",
" ",
caption,
)
caption = caption.rstrip("\n")
caption = caption.strip(" ")
# truncate caption
caption_words = caption.split(" ")
if len(caption_words) > self.max_words:
caption = " ".join(caption_words[: self.max_words])
return caption
@registry.register_processor("blip_question")
class BlipQuestionProcessor(BaseProcessor):
def __init__(self, max_words=50):
self.max_words = max_words
def __call__(self, question):
return self.pre_question(question)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
max_words = cfg.get("max_words", 50)
return cls(max_words=max_words)
def pre_question(self, question):
question = re.sub(
r"([.!\"()*#:;~])",
"",
question.lower(),
)
question = question.rstrip(" ")
# truncate question
question_words = question.split(" ")
if len(question_words) > self.max_words:
question = " ".join(question_words[: self.max_words])
return question
@registry.register_processor("blip_image_train")
class BlipImageTrainProcessor(BlipImageBaseProcessor):
def __init__(
self, image_size=384, mean=None, std=None, min_scale=0.5, max_scale=1.0
):
super().__init__(mean=mean, std=std)
self.transform = transforms.Compose(
[
transforms.RandomResizedCrop(
image_size,
scale=(min_scale, max_scale),
interpolation=InterpolationMode.BICUBIC,
),
transforms.RandomHorizontalFlip(),
RandomAugment(
2,
5,
isPIL=True,
augs=[
"Identity",
"AutoContrast",
"Brightness",
"Sharpness",
"Equalize",
"ShearX",
"ShearY",
"TranslateX",
"TranslateY",
"Rotate",
],
),
transforms.ToTensor(),
self.normalize,
]
)
def __call__(self, item):
return self.transform(item)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 384)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
min_scale = cfg.get("min_scale", 0.5)
max_scale = cfg.get("max_scale", 1.0)
return cls(
image_size=image_size,
mean=mean,
std=std,
min_scale=min_scale,
max_scale=max_scale,
)
@registry.register_processor("blip_image_eval")
class BlipImageEvalProcessor(BlipImageBaseProcessor):
def __init__(self, image_size=384, mean=None, std=None):
super().__init__(mean=mean, std=std)
self.transform = transforms.Compose(
[
transforms.Resize(
(image_size, image_size), interpolation=InterpolationMode.BICUBIC
),
transforms.ToTensor(),
self.normalize,
]
)
def __call__(self, item):
return self.transform(item)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 384)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
return cls(image_size=image_size, mean=mean, std=std)
@registry.register_processor("blip2_image_train")
class Blip2ImageTrainProcessor(BlipImageBaseProcessor):
def __init__(
self, image_size=364, mean=None, std=None, min_scale=0.5, max_scale=1.0
):
super().__init__(mean=mean, std=std)
self.transform = transforms.Compose(
[
transforms.RandomResizedCrop(
image_size,
scale=(min_scale, max_scale),
interpolation=InterpolationMode.BICUBIC,
),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize,
]
)
def __call__(self, item):
return self.transform(item)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 364)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
min_scale = cfg.get("min_scale", 0.5)
max_scale = cfg.get("max_scale", 1.0)
return cls(
image_size=image_size,
mean=mean,
std=std,
min_scale=min_scale,
max_scale=max_scale,
) | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/processors/blip_processors.py | 0.798933 | 0.177811 | blip_processors.py | pypi |
import warnings
import torch
def _is_tensor_video_clip(clip):
if not torch.is_tensor(clip):
raise TypeError("clip should be Tensor. Got %s" % type(clip))
if not clip.ndimension() == 4:
raise ValueError("clip should be 4D. Got %dD" % clip.dim())
return True
def crop(clip, i, j, h, w):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
"""
if len(clip.size()) != 4:
raise ValueError("clip should be a 4D tensor")
return clip[..., i : i + h, j : j + w]
def resize(clip, target_size, interpolation_mode):
if len(target_size) != 2:
raise ValueError(
f"target size should be tuple (height, width), instead got {target_size}"
)
return torch.nn.functional.interpolate(
clip, size=target_size, mode=interpolation_mode, align_corners=False
)
def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"):
"""
Do spatial cropping and resizing to the video clip
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
i (int): i in (i,j) i.e coordinates of the upper left corner.
j (int): j in (i,j) i.e coordinates of the upper left corner.
h (int): Height of the cropped region.
w (int): Width of the cropped region.
size (tuple(int, int)): height and width of resized clip
Returns:
clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W)
"""
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
clip = crop(clip, i, j, h, w)
clip = resize(clip, size, interpolation_mode)
return clip
def center_crop(clip, crop_size):
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
h, w = clip.size(-2), clip.size(-1)
th, tw = crop_size
if h < th or w < tw:
raise ValueError("height and width must be no smaller than crop_size")
i = int(round((h - th) / 2.0))
j = int(round((w - tw) / 2.0))
return crop(clip, i, j, th, tw)
def to_tensor(clip):
"""
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimensions of clip tensor
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
Return:
clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
"""
_is_tensor_video_clip(clip)
if not clip.dtype == torch.uint8:
raise TypeError(
"clip tensor should have data type uint8. Got %s" % str(clip.dtype)
)
return clip.float().permute(3, 0, 1, 2) / 255.0
def normalize(clip, mean, std, inplace=False):
"""
Args:
clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
mean (tuple): pixel RGB mean. Size is (3)
std (tuple): pixel standard deviation. Size is (3)
Returns:
normalized clip (torch.tensor): Size is (C, T, H, W)
"""
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
if not inplace:
clip = clip.clone()
mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device)
std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device)
clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])
return clip
def hflip(clip):
"""
Args:
clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
Returns:
flipped clip (torch.tensor): Size is (C, T, H, W)
"""
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
return clip.flip(-1) | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/processors/functional_video.py | 0.900612 | 0.551272 | functional_video.py | pypi |
import cv2
import numpy as np
import torch
## aug functions
def identity_func(img):
return img
def autocontrast_func(img, cutoff=0):
"""
same output as PIL.ImageOps.autocontrast
"""
n_bins = 256
def tune_channel(ch):
n = ch.size
cut = cutoff * n // 100
if cut == 0:
high, low = ch.max(), ch.min()
else:
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
low = np.argwhere(np.cumsum(hist) > cut)
low = 0 if low.shape[0] == 0 else low[0]
high = np.argwhere(np.cumsum(hist[::-1]) > cut)
high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0]
if high <= low:
table = np.arange(n_bins)
else:
scale = (n_bins - 1) / (high - low)
offset = -low * scale
table = np.arange(n_bins) * scale + offset
table[table < 0] = 0
table[table > n_bins - 1] = n_bins - 1
table = table.clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def equalize_func(img):
"""
same output as PIL.ImageOps.equalize
PIL's implementation is different from cv2.equalize
"""
n_bins = 256
def tune_channel(ch):
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
non_zero_hist = hist[hist != 0].reshape(-1)
step = np.sum(non_zero_hist[:-1]) // (n_bins - 1)
if step == 0:
return ch
n = np.empty_like(hist)
n[0] = step // 2
n[1:] = hist[:-1]
table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def rotate_func(img, degree, fill=(0, 0, 0)):
"""
like PIL, rotate by degree, not radians
"""
H, W = img.shape[0], img.shape[1]
center = W / 2, H / 2
M = cv2.getRotationMatrix2D(center, degree, 1)
out = cv2.warpAffine(img, M, (W, H), borderValue=fill)
return out
def solarize_func(img, thresh=128):
"""
same output as PIL.ImageOps.posterize
"""
table = np.array([el if el < thresh else 255 - el for el in range(256)])
table = table.clip(0, 255).astype(np.uint8)
out = table[img]
return out
def color_func(img, factor):
"""
same output as PIL.ImageEnhance.Color
"""
## implementation according to PIL definition, quite slow
# degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]
# out = blend(degenerate, img, factor)
# M = (
# np.eye(3) * factor
# + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor)
# )[np.newaxis, np.newaxis, :]
M = np.float32(
[[0.886, -0.114, -0.114], [-0.587, 0.413, -0.587], [-0.299, -0.299, 0.701]]
) * factor + np.float32([[0.114], [0.587], [0.299]])
out = np.matmul(img, M).clip(0, 255).astype(np.uint8)
return out
def contrast_func(img, factor):
"""
same output as PIL.ImageEnhance.Contrast
"""
mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299]))
table = (
np.array([(el - mean) * factor + mean for el in range(256)])
.clip(0, 255)
.astype(np.uint8)
)
out = table[img]
return out
def brightness_func(img, factor):
"""
same output as PIL.ImageEnhance.Contrast
"""
table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8)
out = table[img]
return out
def sharpness_func(img, factor):
"""
The differences the this result and PIL are all on the 4 boundaries, the center
areas are same
"""
kernel = np.ones((3, 3), dtype=np.float32)
kernel[1][1] = 5
kernel /= 13
degenerate = cv2.filter2D(img, -1, kernel)
if factor == 0.0:
out = degenerate
elif factor == 1.0:
out = img
else:
out = img.astype(np.float32)
degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :]
out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate)
out = out.astype(np.uint8)
return out
def shear_x_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, factor, 0], [0, 1, 0]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def translate_x_func(img, offset, fill=(0, 0, 0)):
"""
same output as PIL.Image.transform
"""
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, -offset], [0, 1, 0]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def translate_y_func(img, offset, fill=(0, 0, 0)):
"""
same output as PIL.Image.transform
"""
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [0, 1, -offset]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def posterize_func(img, bits):
"""
same output as PIL.ImageOps.posterize
"""
out = np.bitwise_and(img, np.uint8(255 << (8 - bits)))
return out
def shear_y_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [factor, 1, 0]])
out = cv2.warpAffine(
img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
).astype(np.uint8)
return out
def cutout_func(img, pad_size, replace=(0, 0, 0)):
replace = np.array(replace, dtype=np.uint8)
H, W = img.shape[0], img.shape[1]
rh, rw = np.random.random(2)
pad_size = pad_size // 2
ch, cw = int(rh * H), int(rw * W)
x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H)
y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W)
out = img.copy()
out[x1:x2, y1:y2, :] = replace
return out
### level to args
def enhance_level_to_args(MAX_LEVEL):
def level_to_args(level):
return ((level / MAX_LEVEL) * 1.8 + 0.1,)
return level_to_args
def shear_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 0.3
if np.random.random() > 0.5:
level = -level
return (level, replace_value)
return level_to_args
def translate_level_to_args(translate_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * float(translate_const)
if np.random.random() > 0.5:
level = -level
return (level, replace_value)
return level_to_args
def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = int((level / MAX_LEVEL) * cutout_const)
return (level, replace_value)
return level_to_args
def solarize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 256)
return (level,)
return level_to_args
def none_level_to_args(level):
return ()
def posterize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 4)
return (level,)
return level_to_args
def rotate_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 30
if np.random.random() < 0.5:
level = -level
return (level, replace_value)
return level_to_args
func_dict = {
"Identity": identity_func,
"AutoContrast": autocontrast_func,
"Equalize": equalize_func,
"Rotate": rotate_func,
"Solarize": solarize_func,
"Color": color_func,
"Contrast": contrast_func,
"Brightness": brightness_func,
"Sharpness": sharpness_func,
"ShearX": shear_x_func,
"TranslateX": translate_x_func,
"TranslateY": translate_y_func,
"Posterize": posterize_func,
"ShearY": shear_y_func,
}
translate_const = 10
MAX_LEVEL = 10
replace_value = (128, 128, 128)
arg_dict = {
"Identity": none_level_to_args,
"AutoContrast": none_level_to_args,
"Equalize": none_level_to_args,
"Rotate": rotate_level_to_args(MAX_LEVEL, replace_value),
"Solarize": solarize_level_to_args(MAX_LEVEL),
"Color": enhance_level_to_args(MAX_LEVEL),
"Contrast": enhance_level_to_args(MAX_LEVEL),
"Brightness": enhance_level_to_args(MAX_LEVEL),
"Sharpness": enhance_level_to_args(MAX_LEVEL),
"ShearX": shear_level_to_args(MAX_LEVEL, replace_value),
"TranslateX": translate_level_to_args(translate_const, MAX_LEVEL, replace_value),
"TranslateY": translate_level_to_args(translate_const, MAX_LEVEL, replace_value),
"Posterize": posterize_level_to_args(MAX_LEVEL),
"ShearY": shear_level_to_args(MAX_LEVEL, replace_value),
}
class RandomAugment(object):
def __init__(self, N=2, M=10, isPIL=False, augs=[]):
self.N = N
self.M = M
self.isPIL = isPIL
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N)
return [(op, 0.5, self.M) for op in sampled_ops]
def __call__(self, img):
if self.isPIL:
img = np.array(img)
ops = self.get_random_ops()
for name, prob, level in ops:
if np.random.random() > prob:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return img
class VideoRandomAugment(object):
def __init__(self, N=2, M=10, p=0.0, tensor_in_tensor_out=True, augs=[]):
self.N = N
self.M = M
self.p = p
self.tensor_in_tensor_out = tensor_in_tensor_out
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N, replace=False)
return [(op, self.M) for op in sampled_ops]
def __call__(self, frames):
assert (
frames.shape[-1] == 3
), "Expecting last dimension for 3-channels RGB (b, h, w, c)."
if self.tensor_in_tensor_out:
frames = frames.numpy().astype(np.uint8)
num_frames = frames.shape[0]
ops = num_frames * [self.get_random_ops()]
apply_or_not = num_frames * [np.random.random(size=self.N) > self.p]
frames = torch.stack(
list(map(self._aug, frames, ops, apply_or_not)), dim=0
).float()
return frames
def _aug(self, img, ops, apply_or_not):
for i, (name, level) in enumerate(ops):
if not apply_or_not[i]:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return torch.from_numpy(img)
if __name__ == "__main__":
a = RandomAugment()
img = np.random.randn(32, 32, 3)
a(img) | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/processors/randaugment.py | 0.714329 | 0.696087 | randaugment.py | pypi |
from lavis.common.registry import registry
from lavis.processors.blip_processors import BlipImageBaseProcessor
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
def _convert_to_rgb(image):
return image.convert("RGB")
@registry.register_processor("clip_image_train")
class ClipImageTrainProcessor(BlipImageBaseProcessor):
def __init__(
self, image_size=224, mean=None, std=None, min_scale=0.9, max_scale=1.0
):
super().__init__(mean=mean, std=std)
self.transform = transforms.Compose(
[
transforms.RandomResizedCrop(
image_size,
scale=(min_scale, max_scale),
interpolation=InterpolationMode.BICUBIC,
),
_convert_to_rgb,
transforms.ToTensor(),
self.normalize,
]
)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 224)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
min_scale = cfg.get("min_scale", 0.9)
max_scale = cfg.get("max_scale", 1.0)
return cls(
image_size=image_size,
mean=mean,
std=std,
min_scale=min_scale,
max_scale=max_scale,
)
@registry.register_processor("clip_image_eval")
class ClipImageEvalProcessor(BlipImageBaseProcessor):
def __init__(self, image_size=224, mean=None, std=None):
super().__init__(mean=mean, std=std)
self.transform = transforms.Compose(
[
transforms.Resize(image_size, interpolation=InterpolationMode.BICUBIC),
transforms.CenterCrop(image_size),
_convert_to_rgb,
transforms.ToTensor(),
self.normalize,
]
)
@classmethod
def from_config(cls, cfg=None):
if cfg is None:
cfg = OmegaConf.create()
image_size = cfg.get("image_size", 224)
mean = cfg.get("mean", None)
std = cfg.get("std", None)
return cls(
image_size=image_size,
mean=mean,
std=std,
) | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/processors/clip_processors.py | 0.866726 | 0.242329 | clip_processors.py | pypi |
import os
from lavis.common.registry import registry
from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from lavis.datasets.datasets.imagefolder_dataset import ImageFolderDataset
@registry.register_builder("imagenet")
class ImageNetBuilder(BaseDatasetBuilder):
train_dataset_cls = ImageFolderDataset
eval_dataset_cls = ImageFolderDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/imagenet/defaults.yaml"}
def _download_ann(self):
pass
def build(self):
self.build_processors()
build_info = self.config.build_info
vis_info = build_info.get(self.data_type)
datasets = dict()
for split in build_info.splits:
assert split in [
"train",
"val",
], "Invalid split name {}, must be one of 'train', 'val' and 'test'."
is_train = split == "train"
vis_processor = (
self.vis_processors["train"]
if is_train
else self.vis_processors["eval"]
)
vis_path = os.path.join(vis_info.storage, split)
# create datasets
dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls
datasets[split] = dataset_cls(
vis_processor=vis_processor,
vis_root=vis_path,
classnames=imagenet_classnames,
)
return datasets
imagenet_classnames = [
"tench",
"goldfish",
"great white shark",
"tiger shark",
"hammerhead shark",
"electric ray",
"stingray",
"rooster",
"hen",
"ostrich",
"brambling",
"goldfinch",
"house finch",
"junco",
"indigo bunting",
"American robin",
"bulbul",
"jay",
"magpie",
"chickadee",
"American dipper",
"kite (bird of prey)",
"bald eagle",
"vulture",
"great grey owl",
"fire salamander",
"smooth newt",
"newt",
"spotted salamander",
"axolotl",
"American bullfrog",
"tree frog",
"tailed frog",
"loggerhead sea turtle",
"leatherback sea turtle",
"mud turtle",
"terrapin",
"box turtle",
"banded gecko",
"green iguana",
"Carolina anole",
"desert grassland whiptail lizard",
"agama",
"frilled-necked lizard",
"alligator lizard",
"Gila monster",
"European green lizard",
"chameleon",
"Komodo dragon",
"Nile crocodile",
"American alligator",
"triceratops",
"worm snake",
"ring-necked snake",
"eastern hog-nosed snake",
"smooth green snake",
"kingsnake",
"garter snake",
"water snake",
"vine snake",
"night snake",
"boa constrictor",
"African rock python",
"Indian cobra",
"green mamba",
"sea snake",
"Saharan horned viper",
"eastern diamondback rattlesnake",
"sidewinder rattlesnake",
"trilobite",
"harvestman",
"scorpion",
"yellow garden spider",
"barn spider",
"European garden spider",
"southern black widow",
"tarantula",
"wolf spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse",
"prairie grouse",
"peafowl",
"quail",
"partridge",
"african grey parrot",
"macaw",
"sulphur-crested cockatoo",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"duck",
"red-breasted merganser",
"goose",
"black swan",
"tusker",
"echidna",
"platypus",
"wallaby",
"koala",
"wombat",
"jellyfish",
"sea anemone",
"brain coral",
"flatworm",
"nematode",
"conch",
"snail",
"slug",
"sea slug",
"chiton",
"chambered nautilus",
"Dungeness crab",
"rock crab",
"fiddler crab",
"red king crab",
"American lobster",
"spiny lobster",
"crayfish",
"hermit crab",
"isopod",
"white stork",
"black stork",
"spoonbill",
"flamingo",
"little blue heron",
"great egret",
"bittern bird",
"crane bird",
"limpkin",
"common gallinule",
"American coot",
"bustard",
"ruddy turnstone",
"dunlin",
"common redshank",
"dowitcher",
"oystercatcher",
"pelican",
"king penguin",
"albatross",
"grey whale",
"killer whale",
"dugong",
"sea lion",
"Chihuahua",
"Japanese Chin",
"Maltese",
"Pekingese",
"Shih Tzu",
"King Charles Spaniel",
"Papillon",
"toy terrier",
"Rhodesian Ridgeback",
"Afghan Hound",
"Basset Hound",
"Beagle",
"Bloodhound",
"Bluetick Coonhound",
"Black and Tan Coonhound",
"Treeing Walker Coonhound",
"English foxhound",
"Redbone Coonhound",
"borzoi",
"Irish Wolfhound",
"Italian Greyhound",
"Whippet",
"Ibizan Hound",
"Norwegian Elkhound",
"Otterhound",
"Saluki",
"Scottish Deerhound",
"Weimaraner",
"Staffordshire Bull Terrier",
"American Staffordshire Terrier",
"Bedlington Terrier",
"Border Terrier",
"Kerry Blue Terrier",
"Irish Terrier",
"Norfolk Terrier",
"Norwich Terrier",
"Yorkshire Terrier",
"Wire Fox Terrier",
"Lakeland Terrier",
"Sealyham Terrier",
"Airedale Terrier",
"Cairn Terrier",
"Australian Terrier",
"Dandie Dinmont Terrier",
"Boston Terrier",
"Miniature Schnauzer",
"Giant Schnauzer",
"Standard Schnauzer",
"Scottish Terrier",
"Tibetan Terrier",
"Australian Silky Terrier",
"Soft-coated Wheaten Terrier",
"West Highland White Terrier",
"Lhasa Apso",
"Flat-Coated Retriever",
"Curly-coated Retriever",
"Golden Retriever",
"Labrador Retriever",
"Chesapeake Bay Retriever",
"German Shorthaired Pointer",
"Vizsla",
"English Setter",
"Irish Setter",
"Gordon Setter",
"Brittany dog",
"Clumber Spaniel",
"English Springer Spaniel",
"Welsh Springer Spaniel",
"Cocker Spaniel",
"Sussex Spaniel",
"Irish Water Spaniel",
"Kuvasz",
"Schipperke",
"Groenendael dog",
"Malinois",
"Briard",
"Australian Kelpie",
"Komondor",
"Old English Sheepdog",
"Shetland Sheepdog",
"collie",
"Border Collie",
"Bouvier des Flandres dog",
"Rottweiler",
"German Shepherd Dog",
"Dobermann",
"Miniature Pinscher",
"Greater Swiss Mountain Dog",
"Bernese Mountain Dog",
"Appenzeller Sennenhund",
"Entlebucher Sennenhund",
"Boxer",
"Bullmastiff",
"Tibetan Mastiff",
"French Bulldog",
"Great Dane",
"St. Bernard",
"husky",
"Alaskan Malamute",
"Siberian Husky",
"Dalmatian",
"Affenpinscher",
"Basenji",
"pug",
"Leonberger",
"Newfoundland dog",
"Great Pyrenees dog",
"Samoyed",
"Pomeranian",
"Chow Chow",
"Keeshond",
"brussels griffon",
"Pembroke Welsh Corgi",
"Cardigan Welsh Corgi",
"Toy Poodle",
"Miniature Poodle",
"Standard Poodle",
"Mexican hairless dog (xoloitzcuintli)",
"grey wolf",
"Alaskan tundra wolf",
"red wolf or maned wolf",
"coyote",
"dingo",
"dhole",
"African wild dog",
"hyena",
"red fox",
"kit fox",
"Arctic fox",
"grey fox",
"tabby cat",
"tiger cat",
"Persian cat",
"Siamese cat",
"Egyptian Mau",
"cougar",
"lynx",
"leopard",
"snow leopard",
"jaguar",
"lion",
"tiger",
"cheetah",
"brown bear",
"American black bear",
"polar bear",
"sloth bear",
"mongoose",
"meerkat",
"tiger beetle",
"ladybug",
"ground beetle",
"longhorn beetle",
"leaf beetle",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant",
"grasshopper",
"cricket insect",
"stick insect",
"cockroach",
"praying mantis",
"cicada",
"leafhopper",
"lacewing",
"dragonfly",
"damselfly",
"red admiral butterfly",
"ringlet butterfly",
"monarch butterfly",
"small white butterfly",
"sulphur butterfly",
"gossamer-winged butterfly",
"starfish",
"sea urchin",
"sea cucumber",
"cottontail rabbit",
"hare",
"Angora rabbit",
"hamster",
"porcupine",
"fox squirrel",
"marmot",
"beaver",
"guinea pig",
"common sorrel horse",
"zebra",
"pig",
"wild boar",
"warthog",
"hippopotamus",
"ox",
"water buffalo",
"bison",
"ram (adult male sheep)",
"bighorn sheep",
"Alpine ibex",
"hartebeest",
"impala (antelope)",
"gazelle",
"arabian camel",
"llama",
"weasel",
"mink",
"European polecat",
"black-footed ferret",
"otter",
"skunk",
"badger",
"armadillo",
"three-toed sloth",
"orangutan",
"gorilla",
"chimpanzee",
"gibbon",
"siamang",
"guenon",
"patas monkey",
"baboon",
"macaque",
"langur",
"black-and-white colobus",
"proboscis monkey",
"marmoset",
"white-headed capuchin",
"howler monkey",
"titi monkey",
"Geoffroy's spider monkey",
"common squirrel monkey",
"ring-tailed lemur",
"indri",
"Asian elephant",
"African bush elephant",
"red panda",
"giant panda",
"snoek fish",
"eel",
"silver salmon",
"rock beauty fish",
"clownfish",
"sturgeon",
"gar fish",
"lionfish",
"pufferfish",
"abacus",
"abaya",
"academic gown",
"accordion",
"acoustic guitar",
"aircraft carrier",
"airliner",
"airship",
"altar",
"ambulance",
"amphibious vehicle",
"analog clock",
"apiary",
"apron",
"trash can",
"assault rifle",
"backpack",
"bakery",
"balance beam",
"balloon",
"ballpoint pen",
"Band-Aid",
"banjo",
"baluster / handrail",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel",
"wheelbarrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"swimming cap",
"bath towel",
"bathtub",
"station wagon",
"lighthouse",
"beaker",
"military hat (bearskin or shako)",
"beer bottle",
"beer glass",
"bell tower",
"baby bib",
"tandem bicycle",
"bikini",
"ring binder",
"binoculars",
"birdhouse",
"boathouse",
"bobsleigh",
"bolo tie",
"poke bonnet",
"bookcase",
"bookstore",
"bottle cap",
"hunting bow",
"bow tie",
"brass memorial plaque",
"bra",
"breakwater",
"breastplate",
"broom",
"bucket",
"buckle",
"bulletproof vest",
"high-speed train",
"butcher shop",
"taxicab",
"cauldron",
"candle",
"cannon",
"canoe",
"can opener",
"cardigan",
"car mirror",
"carousel",
"tool kit",
"cardboard box / carton",
"car wheel",
"automated teller machine",
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello",
"mobile phone",
"chain",
"chain-link fence",
"chain mail",
"chainsaw",
"storage chest",
"chiffonier",
"bell or wind chime",
"china cabinet",
"Christmas stocking",
"church",
"movie theater",
"cleaver",
"cliff dwelling",
"cloak",
"clogs",
"cocktail shaker",
"coffee mug",
"coffeemaker",
"spiral or coil",
"combination lock",
"computer keyboard",
"candy store",
"container ship",
"convertible",
"corkscrew",
"cornet",
"cowboy boot",
"cowboy hat",
"cradle",
"construction crane",
"crash helmet",
"crate",
"infant bed",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam",
"desk",
"desktop computer",
"rotary dial telephone",
"diaper",
"digital clock",
"digital watch",
"dining table",
"dishcloth",
"dishwasher",
"disc brake",
"dock",
"dog sled",
"dome",
"doormat",
"drilling rig",
"drum",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso machine",
"face powder",
"feather boa",
"filing cabinet",
"fireboat",
"fire truck",
"fire screen",
"flagpole",
"flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster bed",
"freight car",
"French horn",
"frying pan",
"fur coat",
"garbage truck",
"gas mask or respirator",
"gas pump",
"goblet",
"go-kart",
"golf ball",
"golf cart",
"gondola",
"gong",
"gown",
"grand piano",
"greenhouse",
"radiator grille",
"grocery store",
"guillotine",
"hair clip",
"hair spray",
"half-track",
"hammer",
"hamper",
"hair dryer",
"hand-held computer",
"handkerchief",
"hard disk drive",
"harmonica",
"harp",
"combine harvester",
"hatchet",
"holster",
"home theater",
"honeycomb",
"hook",
"hoop skirt",
"gymnastic horizontal bar",
"horse-drawn vehicle",
"hourglass",
"iPod",
"clothes iron",
"carved pumpkin",
"jeans",
"jeep",
"T-shirt",
"jigsaw puzzle",
"rickshaw",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat",
"ladle",
"lampshade",
"laptop computer",
"lawn mower",
"lens cap",
"letter opener",
"library",
"lifeboat",
"lighter",
"limousine",
"ocean liner",
"lipstick",
"slip-on shoe",
"lotion",
"music speaker",
"loupe magnifying glass",
"sawmill",
"magnetic compass",
"messenger bag",
"mailbox",
"tights",
"one-piece bathing suit",
"manhole cover",
"maraca",
"marimba",
"mask",
"matchstick",
"maypole",
"maze",
"measuring cup",
"medicine cabinet",
"megalith",
"microphone",
"microwave oven",
"military uniform",
"milk can",
"minibus",
"miniskirt",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home",
"ford model t",
"modem",
"monastery",
"monitor",
"moped",
"mortar and pestle",
"graduation cap",
"mosque",
"mosquito net",
"vespa",
"mountain bike",
"tent",
"computer mouse",
"mousetrap",
"moving van",
"muzzle",
"metal nail",
"neck brace",
"necklace",
"baby pacifier",
"notebook computer",
"obelisk",
"oboe",
"ocarina",
"odometer",
"oil filter",
"pipe organ",
"oscilloscope",
"overskirt",
"bullock cart",
"oxygen mask",
"product packet / packaging",
"paddle",
"paddle wheel",
"padlock",
"paintbrush",
"pajamas",
"palace",
"pan flute",
"paper towel",
"parachute",
"parallel bars",
"park bench",
"parking meter",
"railroad car",
"patio",
"payphone",
"pedestal",
"pencil case",
"pencil sharpener",
"perfume",
"Petri dish",
"photocopier",
"plectrum",
"Pickelhaube",
"picket fence",
"pickup truck",
"pier",
"piggy bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate ship",
"drink pitcher",
"block plane",
"planetarium",
"plastic bag",
"plate rack",
"farm plow",
"plunger",
"Polaroid camera",
"pole",
"police van",
"poncho",
"pool table",
"soda bottle",
"plant pot",
"potter's wheel",
"power drill",
"prayer rug",
"printer",
"prison",
"missile",
"projector",
"hockey puck",
"punching bag",
"purse",
"quill",
"quilt",
"race car",
"racket",
"radiator",
"radio",
"radio telescope",
"rain barrel",
"recreational vehicle",
"fishing casting reel",
"reflex camera",
"refrigerator",
"remote control",
"restaurant",
"revolver",
"rifle",
"rocking chair",
"rotisserie",
"eraser",
"rugby ball",
"ruler measuring stick",
"sneaker",
"safe",
"safety pin",
"salt shaker",
"sandal",
"sarong",
"saxophone",
"scabbard",
"weighing scale",
"school bus",
"schooner",
"scoreboard",
"CRT monitor",
"screw",
"screwdriver",
"seat belt",
"sewing machine",
"shield",
"shoe store",
"shoji screen / room divider",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"balaclava ski mask",
"sleeping bag",
"slide rule",
"sliding door",
"slot machine",
"snorkel",
"snowmobile",
"snowplow",
"soap dispenser",
"soccer ball",
"sock",
"solar thermal collector",
"sombrero",
"soup bowl",
"keyboard space bar",
"space heater",
"space shuttle",
"spatula",
"motorboat",
"spider web",
"spindle",
"sports car",
"spotlight",
"stage",
"steam locomotive",
"through arch bridge",
"steel drum",
"stethoscope",
"scarf",
"stone wall",
"stopwatch",
"stove",
"strainer",
"tram",
"stretcher",
"couch",
"stupa",
"submarine",
"suit",
"sundial",
"sunglasses",
"sunglasses",
"sunscreen",
"suspension bridge",
"mop",
"sweatshirt",
"swim trunks / shorts",
"swing",
"electrical switch",
"syringe",
"table lamp",
"tank",
"tape player",
"teapot",
"teddy bear",
"television",
"tennis ball",
"thatched roof",
"front curtain",
"thimble",
"threshing machine",
"throne",
"tile roof",
"toaster",
"tobacco shop",
"toilet seat",
"torch",
"totem pole",
"tow truck",
"toy store",
"tractor",
"semi-trailer truck",
"tray",
"trench coat",
"tricycle",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus",
"trombone",
"hot tub",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle",
"upright piano",
"vacuum cleaner",
"vase",
"vaulted or arched ceiling",
"velvet fabric",
"vending machine",
"vestment",
"viaduct",
"violin",
"volleyball",
"waffle iron",
"wall clock",
"wallet",
"wardrobe",
"military aircraft",
"sink",
"washing machine",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"hair wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"airplane wing",
"wok",
"wooden spoon",
"wool",
"split-rail fence",
"shipwreck",
"sailboat",
"yurt",
"website",
"comic book",
"crossword",
"traffic or street sign",
"traffic light",
"dust jacket",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot",
"trifle",
"ice cream",
"popsicle",
"baguette",
"bagel",
"pretzel",
"cheeseburger",
"hot dog",
"mashed potatoes",
"cabbage",
"broccoli",
"cauliflower",
"zucchini",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber",
"artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith apple",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple",
"banana",
"jackfruit",
"cherimoya (custard apple)",
"pomegranate",
"hay",
"carbonara",
"chocolate syrup",
"dough",
"meatloaf",
"pizza",
"pot pie",
"burrito",
"red wine",
"espresso",
"tea cup",
"eggnog",
"mountain",
"bubble",
"cliff",
"coral reef",
"geyser",
"lakeshore",
"promontory",
"sandbar",
"beach",
"valley",
"volcano",
"baseball player",
"bridegroom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper",
"corn",
"acorn",
"rose hip",
"horse chestnut seed",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn mushroom",
"earth star fungus",
"hen of the woods mushroom",
"bolete",
"corn cob",
"toilet paper",
] | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/datasets/builders/imagefolder_builder.py | 0.445288 | 0.425486 | imagefolder_builder.py | pypi |
from lavis.datasets.builders.base_dataset_builder import load_dataset_config
from lavis.datasets.builders.caption_builder import (
COCOCapBuilder,
MSRVTTCapBuilder,
MSVDCapBuilder,
VATEXCapBuilder,
)
from lavis.datasets.builders.image_text_pair_builder import (
ConceptualCaption12MBuilder,
ConceptualCaption3MBuilder,
VGCaptionBuilder,
SBUCaptionBuilder,
)
from lavis.datasets.builders.classification_builder import (
NLVRBuilder,
SNLIVisualEntailmentBuilder,
)
from lavis.datasets.builders.imagefolder_builder import ImageNetBuilder
from lavis.datasets.builders.video_qa_builder import MSRVTTQABuilder, MSVDQABuilder
from lavis.datasets.builders.vqa_builder import (
COCOVQABuilder,
OKVQABuilder,
VGVQABuilder,
GQABuilder,
)
from lavis.datasets.builders.retrieval_builder import (
MSRVTTRetrievalBuilder,
DiDeMoRetrievalBuilder,
COCORetrievalBuilder,
Flickr30kBuilder,
)
from lavis.datasets.builders.dialogue_builder import AVSDDialBuilder
from lavis.common.registry import registry
__all__ = [
"COCOCapBuilder",
"COCORetrievalBuilder",
"COCOVQABuilder",
"ConceptualCaption12MBuilder",
"ConceptualCaption3MBuilder",
"DiDeMoRetrievalBuilder",
"Flickr30kBuilder",
"GQABuilder",
"ImageNetBuilder",
"MSRVTTCapBuilder",
"MSRVTTQABuilder",
"MSRVTTRetrievalBuilder",
"MSVDCapBuilder",
"MSVDQABuilder",
"NLVRBuilder",
"OKVQABuilder",
"SBUCaptionBuilder",
"SNLIVisualEntailmentBuilder",
"VATEXCapBuilder",
"VGCaptionBuilder",
"VGVQABuilder",
"AVSDDialBuilder",
]
def load_dataset(name, cfg_path=None, vis_path=None, data_type=None):
"""
Example
>>> dataset = load_dataset("coco_caption", cfg=None)
>>> splits = dataset.keys()
>>> print([len(dataset[split]) for split in splits])
"""
if cfg_path is None:
cfg = None
else:
cfg = load_dataset_config(cfg_path)
try:
builder = registry.get_builder_class(name)(cfg)
except TypeError:
print(
f"Dataset {name} not found. Available datasets:\n"
+ ", ".join([str(k) for k in dataset_zoo.get_names()])
)
exit(1)
if vis_path is not None:
if data_type is None:
# use default data type in the config
data_type = builder.config.data_type
assert (
data_type in builder.config.build_info
), f"Invalid data_type {data_type} for {name}."
builder.config.build_info.get(data_type).storage = vis_path
dataset = builder.build_datasets()
return dataset
class DatasetZoo:
def __init__(self) -> None:
self.dataset_zoo = {
k: list(v.DATASET_CONFIG_DICT.keys())
for k, v in sorted(registry.mapping["builder_name_mapping"].items())
}
def get_names(self):
return list(self.dataset_zoo.keys())
dataset_zoo = DatasetZoo() | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/datasets/builders/__init__.py | 0.514888 | 0.24447 | __init__.py | pypi |
import os
from lavis.common.registry import registry
from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from lavis.datasets.datasets.image_text_pair_datasets import ImageTextPairDataset
from lavis.datasets.datasets.laion_dataset import LaionDataset
@registry.register_builder("conceptual_caption_3m")
class ConceptualCaption3MBuilder(BaseDatasetBuilder):
train_dataset_cls = ImageTextPairDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/conceptual_caption/defaults_3m.yaml"
}
@registry.register_builder("conceptual_caption_12m")
class ConceptualCaption12MBuilder(BaseDatasetBuilder):
train_dataset_cls = ImageTextPairDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/conceptual_caption/defaults_12m.yaml"
}
@registry.register_builder("sbu_caption")
class SBUCaptionBuilder(BaseDatasetBuilder):
train_dataset_cls = ImageTextPairDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/sbu_caption/defaults.yaml"}
@registry.register_builder("vg_caption")
class VGCaptionBuilder(BaseDatasetBuilder):
train_dataset_cls = ImageTextPairDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/vg/defaults_caption.yaml"}
@registry.register_builder("laion2B_multi")
class Laion2BMultiBuilder(BaseDatasetBuilder):
train_dataset_cls = LaionDataset
DATASET_CONFIG_DICT = {"default": "configs/datasets/laion/defaults_2B_multi.yaml"}
def _download_ann(self):
pass
def _download_vis(self):
pass
def build(self):
self.build_processors()
build_info = self.config.build_info
datasets = dict()
split = "train" # laion dataset only has train split
# create datasets
# [NOTE] return inner_datasets (wds.DataPipeline)
dataset_cls = self.train_dataset_cls
datasets[split] = dataset_cls(
vis_processor=self.vis_processors[split],
text_processor=self.text_processors[split],
location=build_info.storage,
).inner_dataset
return datasets | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/datasets/builders/image_text_pair_builder.py | 0.40592 | 0.279202 | image_text_pair_builder.py | pypi |
import os
import json
from PIL import Image
from lavis.datasets.datasets.vqa_datasets import VQADataset, VQAEvalDataset
from collections import OrderedDict
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file": ann["image"],
"question": ann["question"],
"question_id": ann["question_id"],
"answers": "; ".join(ann["answer"]),
"image": sample["image"],
}
)
class COCOVQADataset(VQADataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
answer_weight = {}
for answer in ann["answer"]:
if answer in answer_weight.keys():
answer_weight[answer] += 1 / len(ann["answer"])
else:
answer_weight[answer] = 1 / len(ann["answer"])
answers = list(answer_weight.keys())
weights = list(answer_weight.values())
return {
"image": image,
"text_input": question,
"answers": answers,
"weights": weights,
}
class COCOVQAEvalDataset(VQAEvalDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
"""
self.vis_root = vis_root
self.annotation = json.load(open(ann_paths[0]))
answer_list_path = ann_paths[1]
if os.path.exists(answer_list_path):
self.answer_list = json.load(open(answer_list_path))
else:
self.answer_list = None
try:
self.coco_fmt_qust_file = ann_paths[2]
self.coco_fmt_anno_file = ann_paths[3]
except IndexError:
self.coco_fmt_qust_file = None
self.coco_fmt_anno_file = None
self.vis_processor = vis_processor
self.text_processor = text_processor
self._add_instance_ids()
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
return {
"image": image,
"text_input": question,
"question_id": ann["question_id"],
"instance_id": ann["instance_id"],
} | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/datasets/datasets/coco_vqa_datasets.py | 0.58818 | 0.267543 | coco_vqa_datasets.py | pypi |
from collections import OrderedDict
import json
import os
import torch
from PIL import Image
from lavis.datasets.datasets.vqa_datasets import VQADataset, VQAEvalDataset
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file": ann["image"],
"question": ann["question"],
"question_id": ann["question_id"],
"direct_answers": "; ".join(ann["direct_answers"]),
"choices": "; ".join(ann["choices"]),
"correct_choice": ann["choices"][ann["correct_choice_idx"]],
"image": sample["image"],
}
)
class AOKVQADataset(VQADataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
answer_key = "direct_answers"
answer_weight = {}
for answer in ann[answer_key]:
if answer in answer_weight.keys():
answer_weight[answer] += 1 / len(ann[answer_key])
else:
answer_weight[answer] = 1 / len(ann[answer_key])
answers = list(answer_weight.keys())
weights = list(answer_weight.values())
return {
"image": image,
"text_input": question,
"answers": answers,
"weights": weights,
}
class AOKVQAEvalDataset(VQAEvalDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
"""
self.vis_root = vis_root
self.annotation = json.load(open(ann_paths[0]))
answer_list_path = ann_paths[1]
if os.path.exists(answer_list_path):
self.answer_list = json.load(open(answer_list_path))
else:
self.answer_list = None
try:
self.coco_fmt_qust_file = ann_paths[2]
self.coco_fmt_anno_file = ann_paths[3]
except IndexError:
self.coco_fmt_qust_file = None
self.coco_fmt_anno_file = None
self.vis_processor = vis_processor
self.text_processor = text_processor
self._add_instance_ids()
def collater(self, samples):
(
image_list,
question_list,
question_id_list,
instance_id_list,
choices_list,
correct_choice_idx_list,
direct_answers_list,
) = ([], [], [], [], [], [], [])
for sample in samples:
image_list.append(sample["image"])
question_list.append(sample["text_input"])
question_id_list.append(sample["question_id"])
instance_id_list.append(sample["instance_id"])
choices_list.append(sample["choices"])
correct_choice_idx_list.append(sample["correct_choice_idx"])
direct_answers_list.append(sample["direct_answers"])
return {
"image": torch.stack(image_list, dim=0),
"text_input": question_list,
"question_id": question_id_list,
"instance_id": instance_id_list,
"choices": choices_list,
"correct_choice_idx": correct_choice_idx_list,
"direct_answers": direct_answers_list,
}
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
choices = ann["choices"]
if "correct_choice_idx" in ann:
correct_choice_idx = ann["correct_choice_idx"]
else:
correct_choice_idx = None
if "direct_answers" in ann:
direct_answers = ann["direct_answers"]
else:
direct_answers = None
return {
"image": image,
"text_input": question,
"question_id": ann["question_id"],
"instance_id": ann["instance_id"],
"choices": choices,
"correct_choice_idx": correct_choice_idx,
"direct_answers": direct_answers,
} | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/datasets/datasets/aok_vqa_datasets.py | 0.630116 | 0.248968 | aok_vqa_datasets.py | pypi |
import os
import random
from collections import OrderedDict
from lavis.datasets.datasets.multimodal_classification_datasets import (
MultimodalClassificationDataset,
)
from PIL import Image
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file_L": ann["images"][0],
"file_R": ann["images"][1],
"sentence": ann["sentence"],
"label": ann["label"],
"image": [sample["image0"], sample["image1"]],
}
)
class NLVRDataset(MultimodalClassificationDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.class_labels = self._build_class_labels()
def _build_class_labels(self):
return {"False": 0, "True": 1}
@staticmethod
def _flip(samples):
sentence = samples["text_input"]
image0, image1 = samples["image0"], samples["image1"]
if "left" not in sentence and "right" not in sentence:
if random.random() < 0.5:
image0, image1 = image1, image0
else:
if random.random() < 0.5:
sentence = sentence.replace("left", "[TEMP_TOKEN]")
sentence = sentence.replace("right", "left")
sentence = sentence.replace("[TEMP_TOKEN]", "right")
image0, image1 = image1, image0
samples["text_input"] = sentence
samples["image0"] = image0
samples["image1"] = image1
return samples
def __getitem__(self, index):
ann = self.annotation[index]
image0_path = os.path.join(self.vis_root, ann["images"][0])
image0 = Image.open(image0_path).convert("RGB")
image0 = self.vis_processor(image0)
image1_path = os.path.join(self.vis_root, ann["images"][1])
image1 = Image.open(image1_path).convert("RGB")
image1 = self.vis_processor(image1)
sentence = self.text_processor(ann["sentence"])
label = self.class_labels[ann["label"]]
return self._flip(
{
"image0": image0,
"image1": image1,
"text_input": sentence,
"label": label,
# "image_id": ann["image_id"],
"instance_id": ann["instance_id"],
}
)
class NLVREvalDataset(NLVRDataset):
@staticmethod
def _flip(samples):
return samples | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/datasets/datasets/nlvr_datasets.py | 0.554712 | 0.335106 | nlvr_datasets.py | pypi |
import os
import json
from PIL import Image
from lavis.datasets.datasets.vqa_datasets import VQADataset, VQAEvalDataset
from collections import OrderedDict
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file": ann["image"],
"question": ann["question"],
"question_id": ann["question_id"],
"answers": "; ".join(ann["answer"]),
"image": sample["image"],
}
)
class GQADataset(VQADataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
answers = [ann["answer"]]
weights = [1]
return {
"image": image,
"text_input": question,
"answers": answers,
"weights": weights,
}
class GQAEvalDataset(VQAEvalDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. gqa/images/)
ann_root (string): directory to store the annotation file
"""
self.vis_root = vis_root
self.annotation = json.load(open(ann_paths[0]))
## TODO: support inference method == 'ranking'
answer_list_path = ann_paths[1] if len(ann_paths) > 1 else ''
if os.path.exists(answer_list_path):
self.answer_list = json.load(open(answer_list_path))
else:
self.answer_list = None
self.vis_processor = vis_processor
self.text_processor = text_processor
self._add_instance_ids()
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
question = self.text_processor(ann["question"])
if "answer" in ann:
# answer is a string
answer = ann["answer"]
else:
answer = None
return {
"image": image,
"text_input": question,
"answer": answer,
"question_id": ann["question_id"],
"instance_id": ann["instance_id"],
} | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/datasets/datasets/gqa_datasets.py | 0.563738 | 0.258841 | gqa_datasets.py | pypi |
import torch
from lavis.datasets.datasets.dialogue_datasets import (
DialogueDataset,
DialogueEvalDataset,
)
class AVSDDialDataset(DialogueDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
split (string): val or test
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
vname = ann["image_id"]
video = self.vis_processor(self.vis_root, vname)
dialogue = self.text_processor(ann)
# "image_id" is kept to stay compatible with the COCO evaluation format
return {
"video_fts": video["video_fts"],
"video_token_type_ids": video["token_type_ids"],
"input_ids": dialogue["input_ids"],
"token_type_ids": dialogue["token_type_ids"],
"labels": dialogue["labels"],
"image_id": ann["image_id"],
"instance_id": ann["instance_id"],
}
def collater(self, samples):
input_ids, token_type_ids, labels, video_fts, video_token_type_ids = (
[],
[],
[],
[],
[],
)
for i in samples:
input_ids.append(i["input_ids"])
token_type_ids.append(i["token_type_ids"])
labels.append(i["labels"])
video_fts.append(i["video_fts"])
video_token_type_ids.append(i["video_token_type_ids"])
input_ids = self.text_processor.padding(input_ids)
labels = self.text_processor.padding(
labels, -1
) # ignore token indice -1 by default
video_fts = self.vis_processor.padding(video_fts)
token_type_ids = self.text_processor.padding(token_type_ids)
video_token_type_ids = self.text_processor.padding(video_token_type_ids)
token_type_ids = torch.cat([video_token_type_ids, token_type_ids], dim=1)
attn_mask = self.text_processor.get_attention_mask(input_ids)
video_mask = self.vis_processor.get_attention_mask(video_fts)
attn_mask = torch.cat([video_mask, attn_mask], dim=1)
video_labels = (
torch.ones((video_fts.size(0), video_fts.size(1))).long() * -1
) # ignore token indice -1 by default
labels = torch.cat([video_labels, labels], dim=1)
samples = {}
samples["input_ids"] = input_ids
samples["token_type_ids"] = token_type_ids
samples["labels"] = labels
samples["video_fts"] = video_fts
samples["attn_mask"] = attn_mask
return samples
class AVSDDialEvalDataset(DialogueEvalDataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
split (string): val or test
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
vname = ann["image_id"]
video = self.vis_processor(self.vis_root, vname)
dialogue = self.text_processor(ann)
# "image_id" is kept to stay compatible with the COCO evaluation format
return {
"video_fts": video["video_fts"],
"video_token_type_ids": video["token_type_ids"],
"input_ids": dialogue["input_ids"],
"token_type_ids": dialogue["token_type_ids"],
"labels": dialogue["labels"],
"image_id": ann["image_id"],
"instance_id": ann["instance_id"],
}
def collater(self, samples):
input_ids, token_type_ids, labels, video_fts, video_token_type_ids = (
[],
[],
[],
[],
[],
)
for i in samples:
input_ids.append(i["input_ids"])
token_type_ids.append(i["token_type_ids"])
labels.append(i["labels"])
video_fts.append(i["video_fts"])
video_token_type_ids.append(i["video_token_type_ids"])
input_ids = self.text_processor.padding(input_ids)
labels = self.text_processor.padding(
labels, -1
) # ignore token indice -1 by default
video_fts = self.vis_processor.padding(video_fts)
token_type_ids = self.text_processor.padding(token_type_ids)
video_token_type_ids = self.text_processor.padding(video_token_type_ids)
token_type_ids = torch.cat([video_token_type_ids, token_type_ids], dim=1)
attn_mask = self.text_processor.get_attention_mask(input_ids)
video_mask = self.vis_processor.get_attention_mask(video_fts)
attn_mask = torch.cat([video_mask, attn_mask], dim=1)
video_labels = (
torch.ones((video_fts.size(0), video_fts.size(1))).long() * -1
) # ignore token indice -1 by default
labels = torch.cat([video_labels, labels], dim=1)
samples = {}
samples["input_ids"] = input_ids
samples["token_type_ids"] = token_type_ids
samples["labels"] = labels
samples["video_fts"] = video_fts
samples["attn_mask"] = attn_mask
return samples | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/datasets/datasets/avsd_dialogue_datasets.py | 0.775945 | 0.381796 | avsd_dialogue_datasets.py | pypi |
import os
from collections import OrderedDict
from lavis.datasets.datasets.base_dataset import BaseDataset
from PIL import Image
class __DisplMixin:
def displ_item(self, index):
sample, ann = self.__getitem__(index), self.annotation[index]
return OrderedDict(
{
"file": ann["image"],
"caption": ann["caption"],
"image": sample["image"],
}
)
class CaptionDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.img_ids = {}
n = 0
for ann in self.annotation:
img_id = ann["image_id"]
if img_id not in self.img_ids.keys():
self.img_ids[img_id] = n
n += 1
def __getitem__(self, index):
# TODO this assumes image input, not general enough
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
caption = self.text_processor(ann["caption"])
return {
"image": image,
"text_input": caption,
"image_id": self.img_ids[ann["image_id"]],
}
class CaptionEvalDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
"""
vis_root (string): Root directory of images (e.g. coco/images/)
ann_root (string): directory to store the annotation file
split (string): val or test
"""
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.vis_processor(image)
return {
"image": image,
"image_id": ann["image_id"],
"instance_id": ann["instance_id"],
} | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/datasets/datasets/caption_datasets.py | 0.478773 | 0.235372 | caption_datasets.py | pypi |
import datetime
import logging
import time
from collections import defaultdict, deque
import torch
import torch.distributed as dist
from lavis.common import dist_utils
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not dist_utils.is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, attr)
)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def global_avg(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {:.4f}".format(name, meter.global_avg))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
log_msg = [
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
if torch.cuda.is_available():
log_msg.append("max mem: {memory:.0f}")
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(
"{} Total time: {} ({:.4f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def setup_logger():
logging.basicConfig(
level=logging.INFO if dist_utils.is_main_process() else logging.WARN,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[logging.StreamHandler()],
) | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/common/logger.py | 0.807347 | 0.324155 | logger.py | pypi |
import io
import json
import logging
import os
import pickle
import re
import shutil
import urllib
import urllib.error
import urllib.request
from typing import Optional
from urllib.parse import urlparse
import numpy as np
import pandas as pd
import yaml
from iopath.common.download import download
from iopath.common.file_io import file_lock, g_pathmgr
from lavis.common.registry import registry
from torch.utils.model_zoo import tqdm
from torchvision.datasets.utils import (
check_integrity,
download_file_from_google_drive,
extract_archive,
)
def now():
from datetime import datetime
return datetime.now().strftime("%Y%m%d%H%M")[:-1]
def is_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def get_cache_path(rel_path):
return os.path.expanduser(os.path.join(registry.get_path("cache_root"), rel_path))
def get_abs_path(rel_path):
return os.path.join(registry.get_path("library_root"), rel_path)
def load_json(filename):
with open(filename, "r") as f:
return json.load(f)
# The following are adapted from torchvision and vissl
# torchvision: https://github.com/pytorch/vision
# vissl: https://github.com/facebookresearch/vissl/blob/main/vissl/utils/download.py
def makedir(dir_path):
"""
Create the directory if it does not exist.
"""
is_success = False
try:
if not g_pathmgr.exists(dir_path):
g_pathmgr.mkdirs(dir_path)
is_success = True
except BaseException:
print(f"Error creating directory: {dir_path}")
return is_success
def get_redirected_url(url: str):
"""
Given a URL, returns the URL it redirects to or the
original URL in case of no indirection
"""
import requests
with requests.Session() as session:
with session.get(url, stream=True, allow_redirects=True) as response:
if response.history:
return response.url
else:
return url
def to_google_drive_download_url(view_url: str) -> str:
"""
Utility function to transform a view URL of google drive
to a download URL for google drive
Example input:
https://drive.google.com/file/d/137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp/view
Example output:
https://drive.google.com/uc?export=download&id=137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp
"""
splits = view_url.split("/")
assert splits[-1] == "view"
file_id = splits[-2]
return f"https://drive.google.com/uc?export=download&id={file_id}"
def download_google_drive_url(url: str, output_path: str, output_file_name: str):
"""
Download a file from google drive
Downloading an URL from google drive requires confirmation when
the file of the size is too big (google drive notifies that
anti-viral checks cannot be performed on such files)
"""
import requests
with requests.Session() as session:
# First get the confirmation token and append it to the URL
with session.get(url, stream=True, allow_redirects=True) as response:
for k, v in response.cookies.items():
if k.startswith("download_warning"):
url = url + "&confirm=" + v
# Then download the content of the file
with session.get(url, stream=True, verify=True) as response:
makedir(output_path)
path = os.path.join(output_path, output_file_name)
total_size = int(response.headers.get("Content-length", 0))
with open(path, "wb") as file:
from tqdm import tqdm
with tqdm(total=total_size) as progress_bar:
for block in response.iter_content(
chunk_size=io.DEFAULT_BUFFER_SIZE
):
file.write(block)
progress_bar.update(len(block))
def _get_google_drive_file_id(url: str) -> Optional[str]:
parts = urlparse(url)
if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None:
return None
match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path)
if match is None:
return None
return match.group("id")
def _urlretrieve(url: str, filename: str, chunk_size: int = 1024) -> None:
with open(filename, "wb") as fh:
with urllib.request.urlopen(
urllib.request.Request(url, headers={"User-Agent": "vissl"})
) as response:
with tqdm(total=response.length) as pbar:
for chunk in iter(lambda: response.read(chunk_size), ""):
if not chunk:
break
pbar.update(chunk_size)
fh.write(chunk)
def download_url(
url: str,
root: str,
filename: Optional[str] = None,
md5: Optional[str] = None,
) -> None:
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under.
If None, use the basename of the URL.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir(root)
# check if file is already present locally
if check_integrity(fpath, md5):
print("Using downloaded and verified file: " + fpath)
return
# expand redirect chain if needed
url = get_redirected_url(url)
# check if file is located on Google Drive
file_id = _get_google_drive_file_id(url)
if file_id is not None:
return download_file_from_google_drive(file_id, root, filename, md5)
# download the file
try:
print("Downloading " + url + " to " + fpath)
_urlretrieve(url, fpath)
except (urllib.error.URLError, IOError) as e: # type: ignore[attr-defined]
if url[:5] == "https":
url = url.replace("https:", "http:")
print(
"Failed download. Trying https -> http instead."
" Downloading " + url + " to " + fpath
)
_urlretrieve(url, fpath)
else:
raise e
# check integrity of downloaded file
if not check_integrity(fpath, md5):
raise RuntimeError("File not found or corrupted.")
def download_and_extract_archive(
url: str,
download_root: str,
extract_root: Optional[str] = None,
filename: Optional[str] = None,
md5: Optional[str] = None,
remove_finished: bool = False,
) -> None:
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
def cache_url(url: str, cache_dir: str) -> str:
"""
This implementation downloads the remote resource and caches it locally.
The resource will only be downloaded if not previously requested.
"""
parsed_url = urlparse(url)
dirname = os.path.join(cache_dir, os.path.dirname(parsed_url.path.lstrip("/")))
makedir(dirname)
filename = url.split("/")[-1]
cached = os.path.join(dirname, filename)
with file_lock(cached):
if not os.path.isfile(cached):
logging.info(f"Downloading {url} to {cached} ...")
cached = download(url, dirname, filename=filename)
logging.info(f"URL {url} cached in {cached}")
return cached
# TODO (prigoyal): convert this into RAII-style API
def create_file_symlink(file1, file2):
"""
Simply create the symlinks for a given file1 to file2.
Useful during model checkpointing to symlinks to the
latest successful checkpoint.
"""
try:
if g_pathmgr.exists(file2):
g_pathmgr.rm(file2)
g_pathmgr.symlink(file1, file2)
except Exception as e:
logging.info(f"Could NOT create symlink. Error: {e}")
def save_file(data, filename, append_to_json=True, verbose=True):
"""
Common i/o utility to handle saving data to various file formats.
Supported:
.pkl, .pickle, .npy, .json
Specifically for .json, users have the option to either append (default)
or rewrite by passing in Boolean value to append_to_json.
"""
if verbose:
logging.info(f"Saving data to file: {filename}")
file_ext = os.path.splitext(filename)[1]
if file_ext in [".pkl", ".pickle"]:
with g_pathmgr.open(filename, "wb") as fopen:
pickle.dump(data, fopen, pickle.HIGHEST_PROTOCOL)
elif file_ext == ".npy":
with g_pathmgr.open(filename, "wb") as fopen:
np.save(fopen, data)
elif file_ext == ".json":
if append_to_json:
with g_pathmgr.open(filename, "a") as fopen:
fopen.write(json.dumps(data, sort_keys=True) + "\n")
fopen.flush()
else:
with g_pathmgr.open(filename, "w") as fopen:
fopen.write(json.dumps(data, sort_keys=True) + "\n")
fopen.flush()
elif file_ext == ".yaml":
with g_pathmgr.open(filename, "w") as fopen:
dump = yaml.dump(data)
fopen.write(dump)
fopen.flush()
else:
raise Exception(f"Saving {file_ext} is not supported yet")
if verbose:
logging.info(f"Saved data to file: {filename}")
def load_file(filename, mmap_mode=None, verbose=True, allow_pickle=False):
"""
Common i/o utility to handle loading data from various file formats.
Supported:
.pkl, .pickle, .npy, .json
For the npy files, we support reading the files in mmap_mode.
If the mmap_mode of reading is not successful, we load data without the
mmap_mode.
"""
if verbose:
logging.info(f"Loading data from file: {filename}")
file_ext = os.path.splitext(filename)[1]
if file_ext == ".txt":
with g_pathmgr.open(filename, "r") as fopen:
data = fopen.readlines()
elif file_ext in [".pkl", ".pickle"]:
with g_pathmgr.open(filename, "rb") as fopen:
data = pickle.load(fopen, encoding="latin1")
elif file_ext == ".npy":
if mmap_mode:
try:
with g_pathmgr.open(filename, "rb") as fopen:
data = np.load(
fopen,
allow_pickle=allow_pickle,
encoding="latin1",
mmap_mode=mmap_mode,
)
except ValueError as e:
logging.info(
f"Could not mmap {filename}: {e}. Trying without g_pathmgr"
)
data = np.load(
filename,
allow_pickle=allow_pickle,
encoding="latin1",
mmap_mode=mmap_mode,
)
logging.info("Successfully loaded without g_pathmgr")
except Exception:
logging.info("Could not mmap without g_pathmgr. Trying without mmap")
with g_pathmgr.open(filename, "rb") as fopen:
data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1")
else:
with g_pathmgr.open(filename, "rb") as fopen:
data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1")
elif file_ext == ".json":
with g_pathmgr.open(filename, "r") as fopen:
data = json.load(fopen)
elif file_ext == ".yaml":
with g_pathmgr.open(filename, "r") as fopen:
data = yaml.load(fopen, Loader=yaml.FullLoader)
elif file_ext == ".csv":
with g_pathmgr.open(filename, "r") as fopen:
data = pd.read_csv(fopen)
else:
raise Exception(f"Reading from {file_ext} is not supported yet")
return data
def abspath(resource_path: str):
"""
Make a path absolute, but take into account prefixes like
"http://" or "manifold://"
"""
regex = re.compile(r"^\w+://")
if regex.match(resource_path) is None:
return os.path.abspath(resource_path)
else:
return resource_path
def makedir(dir_path):
"""
Create the directory if it does not exist.
"""
is_success = False
try:
if not g_pathmgr.exists(dir_path):
g_pathmgr.mkdirs(dir_path)
is_success = True
except BaseException:
logging.info(f"Error creating directory: {dir_path}")
return is_success
def is_url(input_url):
"""
Check if an input string is a url. look for http(s):// and ignoring the case
"""
is_url = re.match(r"^(?:http)s?://", input_url, re.IGNORECASE) is not None
return is_url
def cleanup_dir(dir):
"""
Utility for deleting a directory. Useful for cleaning the storage space
that contains various training artifacts like checkpoints, data etc.
"""
if os.path.exists(dir):
logging.info(f"Deleting directory: {dir}")
shutil.rmtree(dir)
logging.info(f"Deleted contents of directory: {dir}")
def get_file_size(filename):
"""
Given a file, get the size of file in MB
"""
size_in_mb = os.path.getsize(filename) / float(1024**2)
return size_in_mb | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/common/utils.py | 0.651355 | 0.213521 | utils.py | pypi |
import math
from lavis.common.registry import registry
@registry.register_lr_scheduler("linear_warmup_step_lr")
class LinearWarmupStepLRScheduler:
def __init__(
self,
optimizer,
max_epoch,
min_lr,
init_lr,
decay_rate=1,
warmup_start_lr=-1,
warmup_steps=0,
**kwargs
):
self.optimizer = optimizer
self.max_epoch = max_epoch
self.min_lr = min_lr
self.decay_rate = decay_rate
self.init_lr = init_lr
self.warmup_steps = warmup_steps
self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr
def step(self, cur_epoch, cur_step):
if cur_epoch == 0:
warmup_lr_schedule(
step=cur_step,
optimizer=self.optimizer,
max_step=self.warmup_steps,
init_lr=self.warmup_start_lr,
max_lr=self.init_lr,
)
else:
step_lr_schedule(
epoch=cur_epoch,
optimizer=self.optimizer,
init_lr=self.init_lr,
min_lr=self.min_lr,
decay_rate=self.decay_rate,
)
@registry.register_lr_scheduler("linear_warmup_cosine_lr")
class LinearWarmupCosineLRScheduler:
def __init__(
self,
optimizer,
max_epoch,
min_lr,
init_lr,
warmup_steps=0,
warmup_start_lr=-1,
**kwargs
):
self.optimizer = optimizer
self.max_epoch = max_epoch
self.min_lr = min_lr
self.init_lr = init_lr
self.warmup_steps = warmup_steps
self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr
def step(self, cur_epoch, cur_step):
# assuming the warmup iters less than one epoch
if cur_epoch == 0:
warmup_lr_schedule(
step=cur_step,
optimizer=self.optimizer,
max_step=self.warmup_steps,
init_lr=self.warmup_start_lr,
max_lr=self.init_lr,
)
else:
cosine_lr_schedule(
epoch=cur_epoch,
optimizer=self.optimizer,
max_epoch=self.max_epoch,
init_lr=self.init_lr,
min_lr=self.min_lr,
)
def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr):
"""Decay the learning rate"""
lr = (init_lr - min_lr) * 0.5 * (
1.0 + math.cos(math.pi * epoch / max_epoch)
) + min_lr
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr):
"""Warmup the learning rate"""
lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max(max_step, 1))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate):
"""Decay the learning rate"""
lr = max(min_lr, init_lr * (decay_rate**epoch))
for param_group in optimizer.param_groups:
param_group["lr"] = lr | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/common/optims.py | 0.73431 | 0.161254 | optims.py | pypi |
__author__ = "aagrawal"
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/eval.py).
import sys
import re
class VQAEval:
def __init__(self, vqa=None, vqaRes=None, n=2):
self.n = n
self.accuracy = {}
self.evalQA = {}
self.evalQuesType = {}
self.evalAnsType = {}
self.vqa = vqa
self.vqaRes = vqaRes
if vqa is not None:
self.params = {"question_id": vqa.getQuesIds()}
self.contractions = {
"aint": "ain't",
"arent": "aren't",
"cant": "can't",
"couldve": "could've",
"couldnt": "couldn't",
"couldn'tve": "couldn't've",
"couldnt've": "couldn't've",
"didnt": "didn't",
"doesnt": "doesn't",
"dont": "don't",
"hadnt": "hadn't",
"hadnt've": "hadn't've",
"hadn'tve": "hadn't've",
"hasnt": "hasn't",
"havent": "haven't",
"hed": "he'd",
"hed've": "he'd've",
"he'dve": "he'd've",
"hes": "he's",
"howd": "how'd",
"howll": "how'll",
"hows": "how's",
"Id've": "I'd've",
"I'dve": "I'd've",
"Im": "I'm",
"Ive": "I've",
"isnt": "isn't",
"itd": "it'd",
"itd've": "it'd've",
"it'dve": "it'd've",
"itll": "it'll",
"let's": "let's",
"maam": "ma'am",
"mightnt": "mightn't",
"mightnt've": "mightn't've",
"mightn'tve": "mightn't've",
"mightve": "might've",
"mustnt": "mustn't",
"mustve": "must've",
"neednt": "needn't",
"notve": "not've",
"oclock": "o'clock",
"oughtnt": "oughtn't",
"ow's'at": "'ow's'at",
"'ows'at": "'ow's'at",
"'ow'sat": "'ow's'at",
"shant": "shan't",
"shed've": "she'd've",
"she'dve": "she'd've",
"she's": "she's",
"shouldve": "should've",
"shouldnt": "shouldn't",
"shouldnt've": "shouldn't've",
"shouldn'tve": "shouldn't've",
"somebody'd": "somebodyd",
"somebodyd've": "somebody'd've",
"somebody'dve": "somebody'd've",
"somebodyll": "somebody'll",
"somebodys": "somebody's",
"someoned": "someone'd",
"someoned've": "someone'd've",
"someone'dve": "someone'd've",
"someonell": "someone'll",
"someones": "someone's",
"somethingd": "something'd",
"somethingd've": "something'd've",
"something'dve": "something'd've",
"somethingll": "something'll",
"thats": "that's",
"thered": "there'd",
"thered've": "there'd've",
"there'dve": "there'd've",
"therere": "there're",
"theres": "there's",
"theyd": "they'd",
"theyd've": "they'd've",
"they'dve": "they'd've",
"theyll": "they'll",
"theyre": "they're",
"theyve": "they've",
"twas": "'twas",
"wasnt": "wasn't",
"wed've": "we'd've",
"we'dve": "we'd've",
"weve": "we've",
"werent": "weren't",
"whatll": "what'll",
"whatre": "what're",
"whats": "what's",
"whatve": "what've",
"whens": "when's",
"whered": "where'd",
"wheres": "where's",
"whereve": "where've",
"whod": "who'd",
"whod've": "who'd've",
"who'dve": "who'd've",
"wholl": "who'll",
"whos": "who's",
"whove": "who've",
"whyll": "why'll",
"whyre": "why're",
"whys": "why's",
"wont": "won't",
"wouldve": "would've",
"wouldnt": "wouldn't",
"wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've",
"yall": "y'all",
"yall'll": "y'all'll",
"y'allll": "y'all'll",
"yall'd've": "y'all'd've",
"y'alld've": "y'all'd've",
"y'all'dve": "y'all'd've",
"youd": "you'd",
"youd've": "you'd've",
"you'dve": "you'd've",
"youll": "you'll",
"youre": "you're",
"youve": "you've",
}
self.manualMap = {
"none": "0",
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
self.articles = ["a", "an", "the"]
self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
self.commaStrip = re.compile("(\d)(,)(\d)")
self.punct = [
";",
r"/",
"[",
"]",
'"',
"{",
"}",
"(",
")",
"=",
"+",
"\\",
"_",
"-",
">",
"<",
"@",
"`",
",",
"?",
"!",
]
def evaluate(self, quesIds=None):
if quesIds == None:
quesIds = [quesId for quesId in self.params["question_id"]]
gts = {}
res = {}
for quesId in quesIds:
gts[quesId] = self.vqa.qa[quesId]
res[quesId] = self.vqaRes.qa[quesId]
# =================================================
# Compute accuracy
# =================================================
accQA = []
accQuesType = {}
accAnsType = {}
print("computing accuracy")
step = 0
for quesId in quesIds:
resAns = res[quesId]["answer"]
resAns = resAns.replace("\n", " ")
resAns = resAns.replace("\t", " ")
resAns = resAns.strip()
resAns = self.processPunctuation(resAns)
resAns = self.processDigitArticle(resAns)
gtAcc = []
gtAnswers = [ans["answer"] for ans in gts[quesId]["answers"]]
if len(set(gtAnswers)) > 1:
for ansDic in gts[quesId]["answers"]:
ansDic["answer"] = self.processPunctuation(ansDic["answer"])
for gtAnsDatum in gts[quesId]["answers"]:
otherGTAns = [
item for item in gts[quesId]["answers"] if item != gtAnsDatum
]
matchingAns = [item for item in otherGTAns if item["answer"] == resAns]
acc = min(1, float(len(matchingAns)) / 3)
gtAcc.append(acc)
quesType = gts[quesId]["question_type"]
ansType = gts[quesId]["answer_type"]
avgGTAcc = float(sum(gtAcc)) / len(gtAcc)
accQA.append(avgGTAcc)
if quesType not in accQuesType:
accQuesType[quesType] = []
accQuesType[quesType].append(avgGTAcc)
if ansType not in accAnsType:
accAnsType[ansType] = []
accAnsType[ansType].append(avgGTAcc)
self.setEvalQA(quesId, avgGTAcc)
self.setEvalQuesType(quesId, quesType, avgGTAcc)
self.setEvalAnsType(quesId, ansType, avgGTAcc)
if step % 100 == 0:
self.updateProgress(step / float(len(quesIds)))
step = step + 1
self.setAccuracy(accQA, accQuesType, accAnsType)
print("Done computing accuracy")
def processPunctuation(self, inText):
outText = inText
for p in self.punct:
if (p + " " in inText or " " + p in inText) or (
re.search(self.commaStrip, inText) != None
):
outText = outText.replace(p, "")
else:
outText = outText.replace(p, " ")
outText = self.periodStrip.sub("", outText, re.UNICODE)
return outText
def processDigitArticle(self, inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = self.manualMap.setdefault(word, word)
if word not in self.articles:
outText.append(word)
else:
pass
for wordId, word in enumerate(outText):
if word in self.contractions:
outText[wordId] = self.contractions[word]
outText = " ".join(outText)
return outText
def setAccuracy(self, accQA, accQuesType, accAnsType):
self.accuracy["overall"] = round(100 * float(sum(accQA)) / len(accQA), self.n)
self.accuracy["perQuestionType"] = {
quesType: round(
100 * float(sum(accQuesType[quesType])) / len(accQuesType[quesType]),
self.n,
)
for quesType in accQuesType
}
self.accuracy["perAnswerType"] = {
ansType: round(
100 * float(sum(accAnsType[ansType])) / len(accAnsType[ansType]), self.n
)
for ansType in accAnsType
}
def setEvalQA(self, quesId, acc):
self.evalQA[quesId] = round(100 * acc, self.n)
def setEvalQuesType(self, quesId, quesType, acc):
if quesType not in self.evalQuesType:
self.evalQuesType[quesType] = {}
self.evalQuesType[quesType][quesId] = round(100 * acc, self.n)
def setEvalAnsType(self, quesId, ansType, acc):
if ansType not in self.evalAnsType:
self.evalAnsType[ansType] = {}
self.evalAnsType[ansType][quesId] = round(100 * acc, self.n)
def updateProgress(self, progress):
barLength = 20
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength * progress))
text = "\rFinshed Percent: [{0}] {1}% {2}".format(
"#" * block + "-" * (barLength - block), int(progress * 100), status
)
sys.stdout.write(text)
sys.stdout.flush() | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/common/vqa_tools/vqa_eval.py | 0.600774 | 0.275945 | vqa_eval.py | pypi |
__author__ = "aagrawal"
__version__ = "0.9"
# Interface for accessing the VQA dataset.
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py).
# The following functions are defined:
# VQA - VQA class that loads VQA annotation file and prepares data structures.
# getQuesIds - Get question ids that satisfy given filter conditions.
# getImgIds - Get image ids that satisfy given filter conditions.
# loadQA - Load questions and answers with the specified question ids.
# showQA - Display the specified questions and answers.
# loadRes - Load result file and create result object.
# Help on each function can be accessed by: "help(COCO.function)"
import json
import datetime
import copy
class VQA:
def __init__(self, annotation_file=None, question_file=None):
"""
Constructor of VQA helper class for reading and visualizing questions and answers.
:param annotation_file (str): location of VQA annotation file
:return:
"""
# load dataset
self.dataset = {}
self.questions = {}
self.qa = {}
self.qqa = {}
self.imgToQA = {}
if not annotation_file == None and not question_file == None:
print("loading VQA annotations and questions into memory...")
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, "r"))
questions = json.load(open(question_file, "r"))
self.dataset = dataset
self.questions = questions
self.createIndex()
def createIndex(self):
# create index
print("creating index...")
imgToQA = {ann["image_id"]: [] for ann in self.dataset["annotations"]}
qa = {ann["question_id"]: [] for ann in self.dataset["annotations"]}
qqa = {ann["question_id"]: [] for ann in self.dataset["annotations"]}
for ann in self.dataset["annotations"]:
imgToQA[ann["image_id"]] += [ann]
qa[ann["question_id"]] = ann
for ques in self.questions["questions"]:
qqa[ques["question_id"]] = ques
print("index created!")
# create class members
self.qa = qa
self.qqa = qqa
self.imgToQA = imgToQA
def info(self):
"""
Print information about the VQA annotation file.
:return:
"""
for key, value in self.datset["info"].items():
print("%s: %s" % (key, value))
def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]):
"""
Get question ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get question ids for given imgs
quesTypes (str array) : get question ids for given question types
ansTypes (str array) : get question ids for given answer types
:return: ids (int array) : integer array of question ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(imgIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset["annotations"]
else:
if not len(imgIds) == 0:
anns = sum(
[self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA],
[],
)
else:
anns = self.dataset["annotations"]
anns = (
anns
if len(quesTypes) == 0
else [ann for ann in anns if ann["question_type"] in quesTypes]
)
anns = (
anns
if len(ansTypes) == 0
else [ann for ann in anns if ann["answer_type"] in ansTypes]
)
ids = [ann["question_id"] for ann in anns]
return ids
def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]):
"""
Get image ids that satisfy given filter conditions. default skips that filter
:param quesIds (int array) : get image ids for given question ids
quesTypes (str array) : get image ids for given question types
ansTypes (str array) : get image ids for given answer types
:return: ids (int array) : integer array of image ids
"""
quesIds = quesIds if type(quesIds) == list else [quesIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(quesIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset["annotations"]
else:
if not len(quesIds) == 0:
anns = sum(
[self.qa[quesId] for quesId in quesIds if quesId in self.qa], []
)
else:
anns = self.dataset["annotations"]
anns = (
anns
if len(quesTypes) == 0
else [ann for ann in anns if ann["question_type"] in quesTypes]
)
anns = (
anns
if len(ansTypes) == 0
else [ann for ann in anns if ann["answer_type"] in ansTypes]
)
ids = [ann["image_id"] for ann in anns]
return ids
def loadQA(self, ids=[]):
"""
Load questions and answers with the specified question ids.
:param ids (int array) : integer ids specifying question ids
:return: qa (object array) : loaded qa objects
"""
if type(ids) == list:
return [self.qa[id] for id in ids]
elif type(ids) == int:
return [self.qa[ids]]
def showQA(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
for ann in anns:
quesId = ann["question_id"]
print("Question: %s" % (self.qqa[quesId]["question"]))
for ans in ann["answers"]:
print("Answer %d: %s" % (ans["answer_id"], ans["answer"]))
def loadRes(self, resFile, quesFile):
"""
Load result file and return a result object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = VQA()
res.questions = json.load(open(quesFile))
res.dataset["info"] = copy.deepcopy(self.questions["info"])
res.dataset["task_type"] = copy.deepcopy(self.questions["task_type"])
res.dataset["data_type"] = copy.deepcopy(self.questions["data_type"])
res.dataset["data_subtype"] = copy.deepcopy(self.questions["data_subtype"])
res.dataset["license"] = copy.deepcopy(self.questions["license"])
print("Loading and preparing results... ")
time_t = datetime.datetime.utcnow()
anns = json.load(open(resFile))
assert type(anns) == list, "results is not an array of objects"
annsQuesIds = [ann["question_id"] for ann in anns]
assert set(annsQuesIds) == set(
self.getQuesIds()
), "Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file."
for ann in anns:
quesId = ann["question_id"]
if res.dataset["task_type"] == "Multiple Choice":
assert (
ann["answer"] in self.qqa[quesId]["multiple_choices"]
), "predicted answer is not one of the multiple choices"
qaAnn = self.qa[quesId]
ann["image_id"] = qaAnn["image_id"]
ann["question_type"] = qaAnn["question_type"]
ann["answer_type"] = qaAnn["answer_type"]
print(
"DONE (t=%0.2fs)" % ((datetime.datetime.utcnow() - time_t).total_seconds())
)
res.dataset["annotations"] = anns
res.createIndex()
return res | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/lavis/common/vqa_tools/vqa.py | 0.789923 | 0.484136 | vqa.py | pypi |
import numpy as np
import streamlit as st
import torch
from lavis.models.blip_models.blip_image_text_matching import compute_gradcam
from lavis.processors import load_processor
from PIL import Image
from app import device, load_demo_image
from app.utils import getAttMap, init_bert_tokenizer, load_blip_itm_model
def app():
model_type = st.sidebar.selectbox("Model:", ["BLIP_base", "BLIP_large"])
if model_type.startswith("BLIP"):
blip_type = model_type.split("_")[1]
model = load_blip_itm_model(device, model_type=blip_type)
vis_processor = load_processor("blip_image_eval").build(image_size=384)
st.markdown(
"<h1 style='text-align: center;'>Image Text Matching</h1>",
unsafe_allow_html=True,
)
values = list(range(1, 12))
default_layer_num = values.index(7)
layer_num = (
st.sidebar.selectbox("Layer number", values, index=default_layer_num) - 1
)
instructions = """Try the provided image or upload your own:"""
file = st.file_uploader(instructions)
col1, col2 = st.columns(2)
col1.header("Image")
col2.header("GradCam")
if file:
raw_img = Image.open(file).convert("RGB")
else:
raw_img = load_demo_image()
w, h = raw_img.size
scaling_factor = 720 / w
resized_image = raw_img.resize((int(w * scaling_factor), int(h * scaling_factor)))
col1.image(resized_image, use_column_width=True)
col3, col4 = st.columns(2)
col3.header("Text")
user_question = col3.text_input(
"Input your sentence!", "a woman sitting on the beach with a dog"
)
submit_button = col3.button("Submit")
col4.header("Matching score")
if submit_button:
tokenizer = init_bert_tokenizer()
img = vis_processor(raw_img).unsqueeze(0).to(device)
text_processor = load_processor("blip_caption").build()
qry = text_processor(user_question)
norm_img = np.float32(resized_image) / 255
qry_tok = tokenizer(qry, return_tensors="pt").to(device)
gradcam, output = compute_gradcam(model, img, qry, qry_tok, block_num=layer_num)
avg_gradcam = getAttMap(norm_img, gradcam[0][1], blur=True)
col2.image(avg_gradcam, use_column_width=True, clamp=True)
# output = model(img, question)
itm_score = torch.nn.functional.softmax(output, dim=1)
new_title = (
'<p style="text-align: left; font-size: 25px;">\n{:.3f}%</p>'.format(
itm_score[0][1].item() * 100
)
)
col4.markdown(new_title, unsafe_allow_html=True) | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/app/image_text_match.py | 0.522446 | 0.239772 | image_text_match.py | pypi |
import math
import numpy as np
import streamlit as st
from lavis.models.blip_models.blip_image_text_matching import compute_gradcam
from lavis.processors import load_processor
from PIL import Image
from app import device, load_demo_image
from app.utils import getAttMap, init_bert_tokenizer, load_blip_itm_model
def app():
model_type = st.sidebar.selectbox("Model:", ["BLIP_base", "BLIP_large"])
values = list(range(1, 12))
default_layer_num = values.index(7)
layer_num = (
st.sidebar.selectbox("Layer number", values, index=default_layer_num) - 1
)
st.markdown(
"<h1 style='text-align: center;'>Text Localization</h1>", unsafe_allow_html=True
)
vis_processor = load_processor("blip_image_eval").build(image_size=384)
text_processor = load_processor("blip_caption")
tokenizer = init_bert_tokenizer()
instructions = "Try the provided image and text or use your own ones."
file = st.file_uploader(instructions)
query = st.text_input(
"Try a different input.", "A girl playing with her dog on the beach."
)
submit_button = st.button("Submit")
col1, col2 = st.columns(2)
if file:
raw_img = Image.open(file).convert("RGB")
else:
raw_img = load_demo_image()
col1.header("Image")
w, h = raw_img.size
scaling_factor = 720 / w
resized_image = raw_img.resize((int(w * scaling_factor), int(h * scaling_factor)))
col1.image(resized_image, use_column_width=True)
col2.header("GradCam")
if submit_button:
if model_type.startswith("BLIP"):
blip_type = model_type.split("_")[1]
model = load_blip_itm_model(device, model_type=blip_type)
img = vis_processor(raw_img).unsqueeze(0).to(device)
qry = text_processor(query)
qry_tok = tokenizer(qry, return_tensors="pt").to(device)
norm_img = np.float32(resized_image) / 255
gradcam, _ = compute_gradcam(model, img, qry, qry_tok, block_num=layer_num)
avg_gradcam = getAttMap(norm_img, gradcam[0][1], blur=True)
col2.image(avg_gradcam, use_column_width=True, clamp=True)
num_cols = 4.0
num_tokens = len(qry_tok.input_ids[0]) - 2
num_rows = int(math.ceil(num_tokens / num_cols))
gradcam_iter = iter(gradcam[0][2:-1])
token_id_iter = iter(qry_tok.input_ids[0][1:-1])
for _ in range(num_rows):
with st.container():
for col in st.columns(int(num_cols)):
token_id = next(token_id_iter, None)
if not token_id:
break
gradcam_img = next(gradcam_iter)
word = tokenizer.decode([token_id])
gradcam_todraw = getAttMap(norm_img, gradcam_img, blur=True)
new_title = (
'<p style="text-align: center; font-size: 25px;">{}</p>'.format(
word
)
)
col.markdown(new_title, unsafe_allow_html=True)
# st.image(image, channels="BGR")
col.image(gradcam_todraw, use_column_width=True, clamp=True) | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/app/text_localization.py | 0.434701 | 0.243361 | text_localization.py | pypi |
import os
import numpy as np
import streamlit as st
import torch
import torch.nn.functional as F
from app import cache_root, device
from app.utils import (
getAttMap,
init_bert_tokenizer,
load_blip_itm_model,
read_img,
resize_img,
)
from lavis.models import load_model
from lavis.processors import load_processor
@st.cache(
hash_funcs={
torch.nn.parameter.Parameter: lambda parameter: parameter.data.detach()
.cpu()
.numpy()
},
allow_output_mutation=True,
)
def load_feat():
from lavis.common.utils import download_url
dirname = os.path.join(os.path.dirname(__file__), "assets")
filename = "path2feat_coco_train2014.pth"
filepath = os.path.join(dirname, filename)
url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/path2feat_coco_train2014.pth"
if not os.path.exists(filepath):
download_url(url=url, root=dirname, filename="path2feat_coco_train2014.pth")
path2feat = torch.load(filepath)
paths = sorted(path2feat.keys())
all_img_feats = torch.stack([path2feat[k] for k in paths], dim=0).to(device)
return path2feat, paths, all_img_feats
@st.cache(
hash_funcs={
torch.nn.parameter.Parameter: lambda parameter: parameter.data.detach()
.cpu()
.numpy()
},
allow_output_mutation=True,
)
def load_feature_extractor_model(device):
model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base.pth"
model = load_model(
"blip_feature_extractor", model_type="base", is_eval=True, device=device
)
model.load_from_pretrained(model_url)
return model
def app():
# === layout ===
model_type = st.sidebar.selectbox("Model:", ["BLIP_base", "BLIP_large"])
file_root = os.path.join(cache_root, "coco/images/train2014/")
values = [12, 24, 48]
default_layer_num = values.index(24)
num_display = st.sidebar.selectbox(
"Number of images:", values, index=default_layer_num
)
show_gradcam = st.sidebar.selectbox("Show GradCam:", [True, False], index=1)
itm_ranking = st.sidebar.selectbox("Multimodal re-ranking:", [True, False], index=0)
# st.title('Multimodal Search')
st.markdown(
"<h1 style='text-align: center;'>Multimodal Search</h1>", unsafe_allow_html=True
)
# === event ===
vis_processor = load_processor("blip_image_eval").build(image_size=384)
text_processor = load_processor("blip_caption")
user_question = st.text_input(
"Search query", "A dog running on the grass.", help="Type something to search."
)
user_question = text_processor(user_question)
feature_extractor = load_feature_extractor_model(device)
# ======= ITC =========
sample = {"text_input": user_question}
with torch.no_grad():
text_feature = feature_extractor.extract_features(
sample, mode="text"
).text_embeds_proj[0, 0]
path2feat, paths, all_img_feats = load_feat()
all_img_feats.to(device)
all_img_feats = F.normalize(all_img_feats, dim=1)
num_cols = 4
num_rows = int(num_display / num_cols)
similarities = text_feature @ all_img_feats.T
indices = torch.argsort(similarities, descending=True)[:num_display]
top_paths = [paths[ind.detach().cpu().item()] for ind in indices]
sorted_similarities = [similarities[idx] for idx in indices]
filenames = [os.path.join(file_root, p) for p in top_paths]
# ========= ITM and GradCam ==========
bsz = 4 # max number of images to avoid cuda oom
if model_type.startswith("BLIP"):
blip_type = model_type.split("_")[1]
itm_model = load_blip_itm_model(device, model_type=blip_type)
tokenizer = init_bert_tokenizer()
queries_batch = [user_question] * bsz
queries_tok_batch = tokenizer(queries_batch, return_tensors="pt").to(device)
num_batches = int(num_display / bsz)
avg_gradcams = []
all_raw_images = []
itm_scores = []
for i in range(num_batches):
filenames_in_batch = filenames[i * bsz : (i + 1) * bsz]
raw_images, images = read_and_process_images(filenames_in_batch, vis_processor)
gradcam, itm_output = compute_gradcam_batch(
itm_model, images, queries_batch, queries_tok_batch
)
all_raw_images.extend([resize_img(r_img) for r_img in raw_images])
norm_imgs = [np.float32(r_img) / 255 for r_img in raw_images]
for norm_img, grad_cam in zip(norm_imgs, gradcam):
avg_gradcam = getAttMap(norm_img, grad_cam[0], blur=True)
avg_gradcams.append(avg_gradcam)
with torch.no_grad():
itm_score = torch.nn.functional.softmax(itm_output, dim=1)
itm_scores.append(itm_score)
# ========= ITM re-ranking =========
itm_scores = torch.cat(itm_scores)[:, 1]
if itm_ranking:
itm_scores_sorted, indices = torch.sort(itm_scores, descending=True)
avg_gradcams_sorted = []
all_raw_images_sorted = []
for idx in indices:
avg_gradcams_sorted.append(avg_gradcams[idx])
all_raw_images_sorted.append(all_raw_images[idx])
avg_gradcams = avg_gradcams_sorted
all_raw_images = all_raw_images_sorted
if show_gradcam:
images_to_show = iter(avg_gradcams)
else:
images_to_show = iter(all_raw_images)
for _ in range(num_rows):
with st.container():
for col in st.columns(num_cols):
col.image(next(images_to_show), use_column_width=True, clamp=True)
def read_and_process_images(image_paths, vis_processor):
raw_images = [read_img(path) for path in image_paths]
images = [vis_processor(r_img) for r_img in raw_images]
images_tensors = torch.stack(images).to(device)
return raw_images, images_tensors
def compute_gradcam_batch(model, visual_input, text_input, tokenized_text, block_num=6):
model.text_encoder.base_model.base_model.encoder.layer[
block_num
].crossattention.self.save_attention = True
output = model({"image": visual_input, "text_input": text_input}, match_head="itm")
loss = output[:, 1].sum()
model.zero_grad()
loss.backward()
with torch.no_grad():
mask = tokenized_text.attention_mask.view(
tokenized_text.attention_mask.size(0), 1, -1, 1, 1
) # (bsz,1,token_len, 1,1)
token_length = mask.sum() - 2
token_length = token_length.cpu()
# grads and cams [bsz, num_head, seq_len, image_patch]
grads = model.text_encoder.base_model.base_model.encoder.layer[
block_num
].crossattention.self.get_attn_gradients()
cams = model.text_encoder.base_model.base_model.encoder.layer[
block_num
].crossattention.self.get_attention_map()
# assume using vit large with 576 num image patch
cams = cams[:, :, :, 1:].reshape(visual_input.size(0), 12, -1, 24, 24) * mask
grads = (
grads[:, :, :, 1:].clamp(0).reshape(visual_input.size(0), 12, -1, 24, 24)
* mask
)
gradcam = cams * grads
# [enc token gradcam, average gradcam across token, gradcam for individual token]
# gradcam = torch.cat((gradcam[0:1,:], gradcam[1:token_length+1, :].sum(dim=0, keepdim=True)/token_length, gradcam[1:, :]))
gradcam = gradcam.mean(1).cpu().detach()
gradcam = (
gradcam[:, 1 : token_length + 1, :].sum(dim=1, keepdim=True) / token_length
)
return gradcam, output | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/app/multimodal_search.py | 0.528047 | 0.291447 | multimodal_search.py | pypi |
import plotly.graph_objects as go
import requests
import streamlit as st
import torch
from lavis.models import load_model
from lavis.processors import load_processor
from lavis.processors.blip_processors import BlipCaptionProcessor
from PIL import Image
from app import device, load_demo_image
from app.utils import load_blip_itm_model
from lavis.processors.clip_processors import ClipImageEvalProcessor
@st.cache()
def load_demo_image(img_url=None):
if not img_url:
img_url = "https://img.atlasobscura.com/yDJ86L8Ou6aIjBsxnlAy5f164w1rjTgcHZcx2yUs4mo/rt:fit/w:1200/q:81/sm:1/scp:1/ar:1/aHR0cHM6Ly9hdGxh/cy1kZXYuczMuYW1h/em9uYXdzLmNvbS91/cGxvYWRzL3BsYWNl/X2ltYWdlcy85MDll/MDRjOS00NTJjLTQx/NzQtYTY4MS02NmQw/MzI2YWIzNjk1ZGVk/MGZhMTJiMTM5MmZi/NGFfUmVhcl92aWV3/X29mX3RoZV9NZXJs/aW9uX3N0YXR1ZV9h/dF9NZXJsaW9uX1Bh/cmssX1NpbmdhcG9y/ZSxfd2l0aF9NYXJp/bmFfQmF5X1NhbmRz/X2luX3RoZV9kaXN0/YW5jZV8tXzIwMTQw/MzA3LmpwZw.jpg"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
return raw_image
@st.cache(
hash_funcs={
torch.nn.parameter.Parameter: lambda parameter: parameter.data.detach()
.cpu()
.numpy()
},
allow_output_mutation=True,
)
def load_model_cache(model_type, device):
if model_type == "blip":
model = load_model(
"blip_feature_extractor", model_type="base", is_eval=True, device=device
)
elif model_type == "albef":
model = load_model(
"albef_feature_extractor", model_type="base", is_eval=True, device=device
)
elif model_type == "CLIP_ViT-B-32":
model = load_model(
"clip_feature_extractor", "ViT-B-32", is_eval=True, device=device
)
elif model_type == "CLIP_ViT-B-16":
model = load_model(
"clip_feature_extractor", "ViT-B-16", is_eval=True, device=device
)
elif model_type == "CLIP_ViT-L-14":
model = load_model(
"clip_feature_extractor", "ViT-L-14", is_eval=True, device=device
)
return model
def app():
model_type = st.sidebar.selectbox(
"Model:",
["ALBEF", "BLIP_Base", "CLIP_ViT-B-32", "CLIP_ViT-B-16", "CLIP_ViT-L-14"],
)
score_type = st.sidebar.selectbox("Score type:", ["Cosine", "Multimodal"])
# ===== layout =====
st.markdown(
"<h1 style='text-align: center;'>Zero-shot Classification</h1>",
unsafe_allow_html=True,
)
instructions = """Try the provided image or upload your own:"""
file = st.file_uploader(instructions)
st.header("Image")
if file:
raw_img = Image.open(file).convert("RGB")
else:
raw_img = load_demo_image()
st.image(raw_img) # , use_column_width=True)
col1, col2 = st.columns(2)
col1.header("Categories")
cls_0 = col1.text_input("category 1", value="merlion")
cls_1 = col1.text_input("category 2", value="sky")
cls_2 = col1.text_input("category 3", value="giraffe")
cls_3 = col1.text_input("category 4", value="fountain")
cls_4 = col1.text_input("category 5", value="marina bay")
cls_names = [cls_0, cls_1, cls_2, cls_3, cls_4]
cls_names = [cls_nm for cls_nm in cls_names if len(cls_nm) > 0]
if len(cls_names) != len(set(cls_names)):
st.error("Please provide unique class names")
return
button = st.button("Submit")
col2.header("Prediction")
# ===== event =====
if button:
if model_type.startswith("BLIP"):
text_processor = BlipCaptionProcessor(prompt="A picture of ")
cls_prompt = [text_processor(cls_nm) for cls_nm in cls_names]
if score_type == "Cosine":
vis_processor = load_processor("blip_image_eval").build(image_size=224)
img = vis_processor(raw_img).unsqueeze(0).to(device)
feature_extractor = load_model_cache(model_type="blip", device=device)
sample = {"image": img, "text_input": cls_prompt}
with torch.no_grad():
image_features = feature_extractor.extract_features(
sample, mode="image"
).image_embeds_proj[:, 0]
text_features = feature_extractor.extract_features(
sample, mode="text"
).text_embeds_proj[:, 0]
sims = (image_features @ text_features.t())[
0
] / feature_extractor.temp
else:
vis_processor = load_processor("blip_image_eval").build(image_size=384)
img = vis_processor(raw_img).unsqueeze(0).to(device)
model = load_blip_itm_model(device)
output = model(img, cls_prompt, match_head="itm")
sims = output[:, 1]
sims = torch.nn.Softmax(dim=0)(sims)
inv_sims = [sim * 100 for sim in sims.tolist()[::-1]]
elif model_type.startswith("ALBEF"):
vis_processor = load_processor("blip_image_eval").build(image_size=224)
img = vis_processor(raw_img).unsqueeze(0).to(device)
text_processor = BlipCaptionProcessor(prompt="A picture of ")
cls_prompt = [text_processor(cls_nm) for cls_nm in cls_names]
feature_extractor = load_model_cache(model_type="albef", device=device)
sample = {"image": img, "text_input": cls_prompt}
with torch.no_grad():
image_features = feature_extractor.extract_features(
sample, mode="image"
).image_embeds_proj[:, 0]
text_features = feature_extractor.extract_features(
sample, mode="text"
).text_embeds_proj[:, 0]
st.write(image_features.shape)
st.write(text_features.shape)
sims = (image_features @ text_features.t())[0] / feature_extractor.temp
sims = torch.nn.Softmax(dim=0)(sims)
inv_sims = [sim * 100 for sim in sims.tolist()[::-1]]
elif model_type.startswith("CLIP"):
if model_type == "CLIP_ViT-B-32":
model = load_model_cache(model_type="CLIP_ViT-B-32", device=device)
elif model_type == "CLIP_ViT-B-16":
model = load_model_cache(model_type="CLIP_ViT-B-16", device=device)
elif model_type == "CLIP_ViT-L-14":
model = load_model_cache(model_type="CLIP_ViT-L-14", device=device)
else:
raise ValueError(f"Unknown model type {model_type}")
if score_type == "Cosine":
# image_preprocess = ClipImageEvalProcessor(image_size=336)
image_preprocess = ClipImageEvalProcessor(image_size=224)
img = image_preprocess(raw_img).unsqueeze(0).to(device)
sample = {"image": img, "text_input": cls_names}
with torch.no_grad():
clip_features = model.extract_features(sample)
image_features = clip_features.image_embeds_proj
text_features = clip_features.text_embeds_proj
sims = (100.0 * image_features @ text_features.T)[0].softmax(dim=-1)
inv_sims = sims.tolist()[::-1]
else:
st.warning("CLIP does not support multimodal scoring.")
return
fig = go.Figure(
go.Bar(
x=inv_sims,
y=cls_names[::-1],
text=["{:.2f}".format(s) for s in inv_sims],
orientation="h",
)
)
fig.update_traces(
textfont_size=12,
textangle=0,
textposition="outside",
cliponaxis=False,
)
col2.plotly_chart(fig, use_container_width=True) | /salesforce-lavis-1.0.2.tar.gz/salesforce-lavis-1.0.2/app/classification.py | 0.410874 | 0.21626 | classification.py | pypi |
<div align="center">
<img alt="Logo" src="https://github.com/salesforce/Merlion/raw/main/merlion_logo.svg" width="80%"/>
</div>
<div align="center">
<a href="https://github.com/salesforce/Merlion/actions">
<img alt="Tests" src="https://github.com/salesforce/Merlion/actions/workflows/tests.yml/badge.svg?branch=main"/>
</a>
<a href="https://github.com/salesforce/Merlion/actions">
<img alt="Coverage" src="https://github.com/salesforce/Merlion/raw/badges/coverage.svg"/>
</a>
<a href="https://pypi.python.org/pypi/salesforce-merlion">
<img alt="PyPI Version" src="https://img.shields.io/pypi/v/salesforce-merlion.svg"/>
</a>
<a href="https://opensource.salesforce.com/Merlion/index.html">
<img alt="docs" src="https://github.com/salesforce/Merlion/actions/workflows/docs.yml/badge.svg"/>
</a>
</div>
# Merlion: A Machine Learning Library for Time Series
## Table of Contents
1. [Introduction](#introduction)
1. [Comparison with Related Libraries](#comparison-with-related-libraries)
1. [Installation](#installation)
1. [Documentation](#documentation)
1. [Getting Started](#getting-started)
1. [Anomaly Detection](#anomaly-detection)
1. [Forecasting](#forecasting)
1. [Evaluation and Benchmarking](#evaluation-and-benchmarking)
1. [Technical Report and Citing Merlion](#technical-report-and-citing-merlion)
## Introduction
Merlion is a Python library for time series intelligence. It provides an end-to-end machine learning framework that
includes loading and transforming data, building and training models, post-processing model outputs, and evaluating
model performance. It supports various time series learning tasks, including forecasting, anomaly detection,
and change point detection for both univariate and multivariate time series. This library aims to provide engineers and
researchers a one-stop solution to rapidly develop models for their specific time series needs, and benchmark them
across multiple time series datasets.
Merlion's key features are
- Standardized and easily extensible data loading & benchmarking for a wide range of forecasting and anomaly
detection datasets. This includes transparent support for custom datasets.
- A library of diverse models for anomaly detection, forecasting, and change point detection, all
unified under a shared interface. Models include classic statistical methods, tree ensembles, and deep
learning approaches. Advanced users may fully configure each model as desired.
- Abstract `DefaultDetector` and `DefaultForecaster` models that are efficient, robustly achieve good performance,
and provide a starting point for new users.
- AutoML for automated hyperaparameter tuning and model selection.
- Unified API for using a wide range of models to forecast with
[exogenous regressors](https://opensource.salesforce.com/Merlion/tutorials/forecast/3_ForecastExogenous.html).
- Practical, industry-inspired post-processing rules for anomaly detectors that make anomaly scores more interpretable,
while also reducing the number of false positives.
- Easy-to-use ensembles that combine the outputs of multiple models to achieve more robust performance.
- Flexible evaluation pipelines that simulate the live deployment & re-training of a model in production,
and evaluate performance on both forecasting and anomaly detection.
- Native support for visualizing model predictions, including with a clickable visual UI.
- Distributed computation [backend](https://opensource.salesforce.com/Merlion/merlion.spark.html) using PySpark,
which can be used to serve time series applications at industrial scale.
## Comparison with Related Libraries
The table below provides a visual overview of how Merlion's key features compare to other libraries for time series
anomaly detection and/or forecasting.
| | Merlion | Prophet | Alibi Detect | Kats | darts | statsmodels | nixtla | GluonTS | RRCF | STUMPY | Greykite |pmdarima
:--- | :---: | :---:| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :----: | :---:
| Univariate Forecasting | ✅ | ✅| | ✅ | ✅ | ✅ | ✅ | ✅ | | |✅| ✅
| Multivariate Forecasting | ✅ | | | ✅ | ✅ | ✅| ✅ | ✅ | | | | |
| Univariate Anomaly Detection | ✅ | ✅ | ✅ | ✅ | ✅ | | | | ✅ | ✅ | ✅ | ✅ |
| Multivariate Anomaly Detection | ✅ | | ✅ | ✅ | ✅ | | | | ✅ | ✅ | | | |
| Pre Processing | ✅ | | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅
| Post Processing | ✅ | | ✅ | | | | | | | | | |
| AutoML | ✅ | | | ✅ | | | | | | | | ✅ | | ✅
| Ensembles | ✅ | | | ✅ | ✅ | | | | | ✅ | | | |
| Benchmarking | ✅ | | | | ✅ | ✅ | ✅ | | | | ✅ |
| Visualization | ✅ | ✅ | | ✅ | ✅ | | | | | | ✅ |
The following features are new in Merlion 2.0:
| | Merlion | Prophet | Alibi Detect | Kats | darts | statsmodels | nixtla | GluonTS | RRCF | STUMPY | Greykite |pmdarima
:--- | :---: | :---:| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :----: | :---:
| Exogenous Regressors | ✅ | ✅ | | |✅ | ✅ | | | | | ✅ | ✅
| Change Point Detection | ✅ | ✅ | ✅ | ✅ | | | | | | | ✅ |
| Clickable Visual UI | ✅ | | | | | | | | | | |
| Distributed Backend | ✅ | | | | | | ✅ | | | | |
## Installation
Merlion consists of two sub-repos: `merlion` implements the library's core time series intelligence features,
and `ts_datasets` provides standardized data loaders for multiple time series datasets. These loaders load
time series as ``pandas.DataFrame`` s with accompanying metadata.
You can install `merlion` from PyPI by calling ``pip install salesforce-merlion``. You may install from source by
cloning this repoand calling ``pip install Merlion/``, or ``pip install -e Merlion/`` to install in editable mode.
You may install additional dependencies via ``pip install salesforce-merlion[all]``, or by calling
``pip install "Merlion/[all]"`` if installing from source.
Individually, the optional dependencies include ``dashboard`` for a GUI dashboard,
``spark`` for a distributed computation backend with PySpark, and ``deep-learning`` for all deep learning models.
To install the data loading package `ts_datasets`, clone this repo and call ``pip install -e Merlion/ts_datasets/``.
This package must be installed in editable mode (i.e. with the ``-e`` flag) if you don't want to manually specify the
root directory of every dataset when initializing its data loader.
Note the following external dependencies:
1. Some of our forecasting models depend on OpenMP. If using ``conda``, please ``conda install -c conda-forge lightgbm``
before installing our package. This will ensure that OpenMP is configured to work with the ``lightgbm`` package
(one of our dependencies) in your ``conda`` environment. If using Mac, please install [Homebrew](https://brew.sh/)
and call ``brew install libomp`` so that the OpenMP libary is available for the model.
2. Some of our anomaly detection models depend on the Java Development Kit (JDK). For Ubuntu, call
``sudo apt-get install openjdk-11-jdk``. For Mac OS, install [Homebrew](<https://brew.sh/>) and call
``brew tap adoptopenjdk/openjdk && brew install --cask adoptopenjdk11``. Also ensure that ``java`` can be found
on your ``PATH``, and that the ``JAVA_HOME`` environment variable is set.
## Documentation
For example code and an introduction to Merlion, see the Jupyter notebooks in
[`examples`](https://github.com/salesforce/Merlion/tree/main/examples), and the guided walkthrough
[here](https://opensource.salesforce.com/Merlion/tutorials.html). You may find detailed API documentation (including the
example code) [here](https://opensource.salesforce.com/Merlion/index.html). The
[technical report](https://arxiv.org/abs/2109.09265) outlines Merlion's overall architecture
and presents experimental results on time series anomaly detection & forecasting for both univariate and multivariate
time series.
## Getting Started
The easiest way to get started is to use the GUI web-based
[dashboard](https://opensource.salesforce.com/Merlion/merlion.dashboard.html).
This dashboard provides a great way to quickly experiment with many models on your own custom datasets.
To use it, install Merlion with the optional ``dashboard`` dependency (i.e.
``pip install salesforce-merlion[dashboard]``), and call ``python -m merlion.dashboard`` from the command line.
You can view the dashboard at http://localhost:8050.
Below, we show some screenshots of the dashboard for both anomaly detection and forecasting.


To help you get started with using Merlion in your own code, we provide below some minimal examples using Merlion
default models for both anomaly detection and forecasting.
### Anomaly Detection
Here, we show the code to replicate the results from the anomaly detection dashboard above.
We begin by importing Merlion’s `TimeSeries` class and the data loader for the Numenta Anomaly Benchmark `NAB`.
We can then divide a specific time series from this dataset into training and testing splits.
```python
from merlion.utils import TimeSeries
from ts_datasets.anomaly import NAB
# Data loader returns pandas DataFrames, which we convert to Merlion TimeSeries
time_series, metadata = NAB(subset="realKnownCause")[3]
train_data = TimeSeries.from_pd(time_series[metadata.trainval])
test_data = TimeSeries.from_pd(time_series[~metadata.trainval])
test_labels = TimeSeries.from_pd(metadata.anomaly[~metadata.trainval])
```
We can then initialize and train Merlion’s `DefaultDetector`, which is an anomaly detection model that
balances performance with efficiency. We also obtain its predictions on the test split.
```python
from merlion.models.defaults import DefaultDetectorConfig, DefaultDetector
model = DefaultDetector(DefaultDetectorConfig())
model.train(train_data=train_data)
test_pred = model.get_anomaly_label(time_series=test_data)
```
Next, we visualize the model's predictions.
```python
from merlion.plot import plot_anoms
import matplotlib.pyplot as plt
fig, ax = model.plot_anomaly(time_series=test_data)
plot_anoms(ax=ax, anomaly_labels=test_labels)
plt.show()
```

Finally, we can quantitatively evaluate the model. The precision and recall come from the fact that the model
fired 3 alarms, with 2 true positives, 1 false negative, and 1 false positive. We also evaluate the mean time
the model took to detect each anomaly that it correctly detected.
```python
from merlion.evaluate.anomaly import TSADMetric
p = TSADMetric.Precision.value(ground_truth=test_labels, predict=test_pred)
r = TSADMetric.Recall.value(ground_truth=test_labels, predict=test_pred)
f1 = TSADMetric.F1.value(ground_truth=test_labels, predict=test_pred)
mttd = TSADMetric.MeanTimeToDetect.value(ground_truth=test_labels, predict=test_pred)
print(f"Precision: {p:.4f}, Recall: {r:.4f}, F1: {f1:.4f}\n"
f"Mean Time To Detect: {mttd}")
```
```
Precision: 0.6667, Recall: 0.6667, F1: 0.6667
Mean Time To Detect: 1 days 10:22:30
```
### Forecasting
Here, we show the code to replicate the results from the forecasting dashboard above.
We begin by importing Merlion’s `TimeSeries` class and the data loader for the `M4` dataset. We can then divide a
specific time series from this dataset into training and testing splits.
```python
from merlion.utils import TimeSeries
from ts_datasets.forecast import M4
# Data loader returns pandas DataFrames, which we convert to Merlion TimeSeries
time_series, metadata = M4(subset="Hourly")[0]
train_data = TimeSeries.from_pd(time_series[metadata.trainval])
test_data = TimeSeries.from_pd(time_series[~metadata.trainval])
```
We can then initialize and train Merlion’s `DefaultForecaster`, which is an forecasting model that balances
performance with efficiency. We also obtain its predictions on the test split.
```python
from merlion.models.defaults import DefaultForecasterConfig, DefaultForecaster
model = DefaultForecaster(DefaultForecasterConfig())
model.train(train_data=train_data)
test_pred, test_err = model.forecast(time_stamps=test_data.time_stamps)
```
Next, we visualize the model’s predictions.
```python
import matplotlib.pyplot as plt
fig, ax = model.plot_forecast(time_series=test_data, plot_forecast_uncertainty=True)
plt.show()
```

Finally, we quantitatively evaluate the model. sMAPE measures the error of the prediction on a scale of 0 to 100
(lower is better), while MSIS evaluates the quality of the 95% confidence band on a scale of 0 to 100 (lower is better).
```python
# Evaluate the model's predictions quantitatively
from scipy.stats import norm
from merlion.evaluate.forecast import ForecastMetric
# Compute the sMAPE of the predictions (0 to 100, smaller is better)
smape = ForecastMetric.sMAPE.value(ground_truth=test_data, predict=test_pred)
# Compute the MSIS of the model's 95% confidence interval (0 to 100, smaller is better)
lb = TimeSeries.from_pd(test_pred.to_pd() + norm.ppf(0.025) * test_err.to_pd().values)
ub = TimeSeries.from_pd(test_pred.to_pd() + norm.ppf(0.975) * test_err.to_pd().values)
msis = ForecastMetric.MSIS.value(ground_truth=test_data, predict=test_pred,
insample=train_data, lb=lb, ub=ub)
print(f"sMAPE: {smape:.4f}, MSIS: {msis:.4f}")
```
```
sMAPE: 4.1944, MSIS: 18.9331
```
## Evaluation and Benchmarking
One of Merlion's key features is an evaluation pipeline that simulates the live deployment
of a model on historical data. This enables you to compare models on the datasets relevant
to them, under the conditions that they may encounter in a production environment. Our
evaluation pipeline proceeds as follows:
1. Train an initial model on recent historical training data (designated as the training split of the time series)
1. At a regular interval (e.g. once per day), retrain the entire model on the most recent data. This can be either the
entire history of the time series, or a more limited window (e.g. 4 weeks).
1. Obtain the model's predictions (anomaly scores or forecasts) for the time series values that occur between
re-trainings. You may customize whether this should be done in batch (predicting all values at once),
streaming (updating the model's internal state after each data point without fully re-training it),
or some intermediate cadence.
1. Compare the model's predictions against the ground truth (labeled anomalies for anomaly detection, or the actual
time series values for forecasting), and report quantitative evaluation metrics.
We provide scripts that allow you to use this pipeline to evaluate arbitrary models on arbitrary datasets.
For example, invoking
```shell script
python benchmark_anomaly.py --dataset NAB_realAWSCloudwatch --model IsolationForest --retrain_freq 1d
```
will evaluate the anomaly detection performance of the `IsolationForest` (retrained once a day) on the
"realAWSCloudwatch" subset of the NAB dataset. Similarly, invoking
```shell script
python benchmark_forecast.py --dataset M4_Hourly --model ETS
```
will evaluate the batch forecasting performance (i.e. no retraining) of `ETS` on the "Hourly" subset of the M4 dataset.
You can find the results produced by running these scripts in the Experiments section of the
[technical report](https://arxiv.org/abs/2109.09265).
## Technical Report and Citing Merlion
You can find more details in our technical report: https://arxiv.org/abs/2109.09265
If you're using Merlion in your research or applications, please cite using this BibTeX:
```
@article{bhatnagar2021merlion,
title={Merlion: A Machine Learning Library for Time Series},
author={Aadyot Bhatnagar and Paul Kassianik and Chenghao Liu and Tian Lan and Wenzhuo Yang
and Rowan Cassius and Doyen Sahoo and Devansh Arpit and Sri Subramanian and Gerald Woo
and Amrita Saha and Arun Kumar Jagota and Gokulakrishnan Gopalakrishnan and Manpreet Singh
and K C Krithika and Sukumar Maddineni and Daeki Cho and Bo Zong and Yingbo Zhou
and Caiming Xiong and Silvio Savarese and Steven Hoi and Huan Wang},
year={2021},
eprint={2109.09265},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
| /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/README.md | 0.934895 | 0.94366 | README.md | pypi |
# Salesforce Open Source Community Code of Conduct
## About the Code of Conduct
Equality is a core value at Salesforce. We believe a diverse and inclusive
community fosters innovation and creativity, and are committed to building a
culture where everyone feels included.
Salesforce open-source projects are committed to providing a friendly, safe, and
welcoming environment for all, regardless of gender identity and expression,
sexual orientation, disability, physical appearance, body size, ethnicity, nationality,
race, age, religion, level of experience, education, socioeconomic status, or
other similar personal characteristics.
The goal of this code of conduct is to specify a baseline standard of behavior so
that people with different social values and communication styles can work
together effectively, productively, and respectfully in our open source community.
It also establishes a mechanism for reporting issues and resolving conflicts.
All questions and reports of abusive, harassing, or otherwise unacceptable behavior
in a Salesforce open-source project may be reported by contacting the Salesforce
Open Source Conduct Committee at ossconduct@salesforce.com.
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of gender
identity and expression, sexual orientation, disability, physical appearance,
body size, ethnicity, nationality, race, age, religion, level of experience, education,
socioeconomic status, or other similar personal characteristics.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy toward other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Personal attacks, insulting/derogatory comments, or trolling
* Public or private harassment
* Publishing, or threatening to publish, others' private information—such as
a physical or electronic address—without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
* Advocating for or encouraging any of the above behaviors
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned with this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project email
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the Salesforce Open Source Conduct Committee
at ossconduct@salesforce.com. All complaints will be reviewed and investigated
and will result in a response that is deemed necessary and appropriate to the
circumstances. The committee is obligated to maintain confidentiality with
regard to the reporter of an incident. Further details of specific enforcement
policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership and the Salesforce Open Source Conduct
Committee.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][contributor-covenant-home],
version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html.
It includes adaptions and additions from [Go Community Code of Conduct][golang-coc],
[CNCF Code of Conduct][cncf-coc], and [Microsoft Open Source Code of Conduct][microsoft-coc].
This Code of Conduct is licensed under the [Creative Commons Attribution 3.0 License][cc-by-3-us].
[contributor-covenant-home]: https://www.contributor-covenant.org (https://www.contributor-covenant.org/)
[golang-coc]: https://golang.org/conduct
[cncf-coc]: https://github.com/cncf/foundation/blob/master/code-of-conduct.md
[microsoft-coc]: https://opensource.microsoft.com/codeofconduct/
[cc-by-3-us]: https://creativecommons.org/licenses/by/3.0/us/ | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/CODE_OF_CONDUCT.md | 0.642208 | 0.832951 | CODE_OF_CONDUCT.md | pypi |
import argparse
import json
import re
from pyspark.sql import SparkSession
from pyspark.sql.types import DateType, FloatType, StructField, StructType
from merlion.spark.dataset import read_dataset, write_dataset, TSID_COL_NAME
from merlion.spark.pandas_udf import anomaly
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--data", required=True, help="Path at which the dataset is stored.")
parser.add_argument("--output_path", required=True, help="Path at which to save output anomaly scores.")
parser.add_argument(
"--train_test_split", required=True, help="First timestamp in the dataset which should be used for testing."
)
parser.add_argument("--file_format", default="csv", help="File format of train data & output file.")
parser.add_argument(
"--model",
default=json.dumps({"name": "DefaultDetector"}),
help="JSON dict specifying the model we wish to use for anomaly detection.",
)
parser.add_argument(
"--index_cols",
default="[]",
help="JSON list of columns used to demarcate different time series. For example, if the dataset contains sales "
'for multiple items at different stores, this could be \'["store", "item"]\'. '
"If not given, we assume the dataset contains only 1 time series.",
)
parser.add_argument(
"--time_col",
default=None,
help="Name of the column containing timestamps. If not given, use the first non-index column.",
)
parser.add_argument(
"--data_cols",
default="[]",
help="JSON list of columns to use when modeling the data. If not given, use all non-index, non-time columns.",
)
parser.add_argument(
"--predict_on_train", action="store_true", help="Whether to return the model's prediction on the training data."
)
args = parser.parse_args()
# Parse index_cols JSON string
try:
index_cols = json.loads(re.sub("'", '"', args.index_cols))
assert isinstance(index_cols, list)
except (json.decoder.JSONDecodeError, AssertionError) as e:
parser.error(
f"Expected --index_cols to be a JSON list. Got {args.index_cols}.\n" f"Caught {type(e).__name__}({e})"
)
else:
args.index_cols = index_cols
# Parse data_cols JSON string
try:
data_cols = json.loads(re.sub("'", '"', args.data_cols))
assert isinstance(data_cols, list)
except (json.decoder.JSONDecodeError, AssertionError) as e:
parser.error(
f"Expected --data_cols to be a JSON list if given. Got {args.data_cols}.\n"
f"Caught {type(e).__name__}({e})"
)
else:
args.data_cols = data_cols
# Parse JSON string for the model and set the model's target_seq_index
try:
model = json.loads(re.sub("'", '"', args.model))
assert isinstance(model, dict)
except (json.decoder.JSONDecodeError, AssertionError) as e:
parser.error(
f"Expected --model to be a JSON dict specifying a Merlion model. Got {args.model}.\n"
f"Caught {type(e).__name__}({e})"
)
else:
args.model = model
return args
def main():
args = parse_args()
# Read the dataset as a Spark DataFrame, and process it.
# This will add a TSID_COL_NAME column to identify each time series with a single integer.
spark = SparkSession.builder.appName("anomaly").getOrCreate()
df = read_dataset(
spark=spark,
file_format=args.file_format,
path=args.data,
time_col=args.time_col,
index_cols=args.index_cols,
data_cols=args.data_cols,
)
if args.time_col is None:
args.time_col = df.schema.fieldNames()[0]
args.index_cols = args.index_cols + [TSID_COL_NAME]
# Use spark to predict anomaly scores for each time series in parallel
index_fields = [df.schema[c] for c in args.index_cols]
pred_fields = [StructField(args.time_col, DateType()), StructField("anom_score", FloatType())]
output_schema = StructType(index_fields + pred_fields)
anomaly_df = df.groupBy(args.index_cols).applyInPandas(
lambda pdf: anomaly(
pdf,
index_cols=args.index_cols,
time_col=args.time_col,
train_test_split=args.train_test_split,
model=args.model,
predict_on_train=args.predict_on_train,
),
schema=output_schema,
)
write_dataset(df=anomaly_df, time_col=args.time_col, path=args.output_path, file_format=args.file_format)
if __name__ == "__main__":
main() | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/spark_apps/anomaly.py | 0.70028 | 0.536009 | anomaly.py | pypi |
import argparse
import json
import re
from warnings import warn
from pyspark.sql import SparkSession
from pyspark.sql.types import DateType, FloatType, StructField, StructType
from merlion.spark.dataset import create_hier_dataset, read_dataset, write_dataset, TSID_COL_NAME
from merlion.spark.pandas_udf import forecast, reconciliation
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--train_data", required=True, help="Path at which the train data is stored.")
parser.add_argument("--output_path", required=True, help="Path at which to save output forecasts.")
parser.add_argument(
"--time_stamps",
required=True,
help='JSON list of times we want to forecast, e.g. \'["2022-01-01 00:00:00", "2020-01-01 00:01:00"]\'.',
)
parser.add_argument("--target_col", required=True, help="Name of the column whose value we want to forecast.")
parser.add_argument(
"--predict_on_train", action="store_true", help="Whether to return the model's prediction on the training data."
)
parser.add_argument("--file_format", default="csv", help="File format of train data & output file.")
parser.add_argument(
"--model",
default=json.dumps({"name": "DefaultForecaster"}),
help="JSON dict specifying the model we wish to use for forecasting.",
)
parser.add_argument(
"--index_cols",
default="[]",
help="JSON list of columns used to demarcate different time series. For example, if the dataset contains sales "
'for multiple items at different stores, this could be \'["store", "item"]\'. '
"If not given, we assume the dataset contains only 1 time series.",
)
parser.add_argument(
"--hierarchical",
action="store_true",
default=False,
help="Whether the time series have a hierarchical structure. If true, we aggregate the time series in the "
"dataset (by summation), in the order specified by index_cols. For example, if index_cols is "
'\'["store", "item"]\', we first sum the sales of all items within store, and then sum the global '
"sales of all stores and all items.",
)
parser.add_argument(
"--agg_dict",
default="{}",
help="JSON dict indicating how different data columns should be aggregated if working with hierarchical time "
"series. Keys are column names, values are names of standard aggregations (e.g. sum, mean, max, etc.). "
"If a column is not specified, it is not aggregated. Note that we always sum the target column, regardless of "
"whether it is specified. This ensures that hierarchical time series reconciliation works correctly.",
)
parser.add_argument(
"--time_col",
default=None,
help="Name of the column containing timestamps. We use the first non-index column if one is not given.",
)
parser.add_argument(
"--data_cols",
default=None,
help="JSON list of columns to use when modeling the data."
"If not given, we do univariate forecasting using only target_col.",
)
args = parser.parse_args()
# Parse time_stamps JSON string
try:
time_stamps = json.loads(re.sub("'", '"', args.time_stamps))
assert isinstance(time_stamps, list) and len(time_stamps) > 0
except (json.decoder.JSONDecodeError, AssertionError) as e:
parser.error(
f"Expected --time_stamps to be a non-empty JSON list. Got {args.time_stamps}.\n Caught {type(e).__name__}({e})"
)
else:
args.time_stamps = time_stamps
# Parse index_cols JSON string
try:
index_cols = json.loads(re.sub("'", '"', args.index_cols)) or []
assert isinstance(index_cols, list)
except (json.decoder.JSONDecodeError, AssertionError) as e:
parser.error(
f"Expected --index_cols to be a JSON list. Got {args.index_cols}.\n Caught {type(e).__name__}({e})"
)
else:
args.index_cols = index_cols
# Parse agg_dict JSON string
try:
agg_dict = json.loads(re.sub("'", '"', args.agg_dict)) or {}
assert isinstance(agg_dict, dict)
except (json.decoder.JSONDecodeError, AssertionError) as e:
parser.error(f"Expected --agg_dict to be a JSON dict. Got {args.agg_dict}.\n Caught {type(e).__name__}({e})")
else:
if args.target_col not in agg_dict:
agg_dict[args.target_col] = "sum"
elif agg_dict[args.target_col] != "sum":
warn(
f'Expected the agg_dict to specify "sum" for target_col {args.target_col}, '
f'but got {agg_dict[args.target_col]}. Manually changing to "sum".'
)
agg_dict[args.target_col] = "sum"
args.agg_dict = agg_dict
# Set default data_cols if needed & make sure target_col is in data_cols
if args.data_cols is None:
args.data_cols = [args.target_col]
else:
try:
data_cols = json.loads(re.sub("'", '"', args.data_cols))
assert isinstance(data_cols, list)
except (json.decoder.JSONDecodeError, AssertionError) as e:
parser.error(
f"Expected --data_cols to be a JSON list if given. Got {args.data_cols}.\n"
f"Caught {type(e).__name__}({e})"
)
else:
args.data_cols = data_cols
if args.target_col not in args.data_cols:
parser.error(f"Expected --data_cols {args.data_cols} to contain --target_col {args.target_col}.")
# Parse JSON string for the model and set the model's target_seq_index
try:
model = json.loads(re.sub("'", '"', args.model))
assert isinstance(model, dict)
except (json.decoder.JSONDecodeError, AssertionError) as e:
parser.error(
f"Expected --model to be a JSON dict specifying a Merlion model. Got {args.model}.\n"
f"Caught {type(e).__name__}({e})"
)
else:
target_seq_index = {v: i for i, v in enumerate(args.data_cols)}[args.target_col]
model["target_seq_index"] = target_seq_index
args.model = model
# Only do hierarchical forecasting if there are index columns specifying a hierarchy
args.hierarchical = args.hierarchical and len(args.index_cols) > 0
return args
def main():
args = parse_args()
# Read the dataset as a Spark DataFrame, and process it.
# This will add a TSID_COL_NAME column to identify each time series with a single integer.
spark = SparkSession.builder.appName("forecast").getOrCreate()
df = read_dataset(
spark=spark,
file_format=args.file_format,
path=args.train_data,
time_col=args.time_col,
index_cols=args.index_cols,
data_cols=args.data_cols,
)
if args.time_col is None:
args.time_col = df.schema.fieldNames()[0]
args.index_cols = args.index_cols + [TSID_COL_NAME]
# Convert to a hierarchical dataset if desired
if args.hierarchical:
df, hier_matrix = create_hier_dataset(
spark=spark, df=df, time_col=args.time_col, index_cols=args.index_cols, agg_dict=args.agg_dict
)
# Use spark to generate forecasts for each time series in parallel
index_fields = [df.schema[c] for c in args.index_cols]
pred_fields = [
StructField(args.time_col, DateType()),
StructField(args.target_col, FloatType()),
StructField(f"{args.target_col}_err", FloatType()),
]
output_schema = StructType(index_fields + pred_fields)
forecast_df = df.groupBy(args.index_cols).applyInPandas(
lambda pdf: forecast(
pdf,
index_cols=args.index_cols,
time_col=args.time_col,
target_col=args.target_col,
time_stamps=args.time_stamps,
model=args.model,
predict_on_train=args.predict_on_train,
agg_dict=args.agg_dict,
),
schema=output_schema,
)
# Apply hierarchical time series reconciliation if desired
if args.hierarchical:
forecast_df = forecast_df.groupBy(args.time_col).applyInPandas(
lambda pdf: reconciliation(pdf, hier_matrix=hier_matrix, target_col=args.target_col), schema=output_schema
)
write_dataset(df=forecast_df, time_col=args.time_col, path=args.output_path, file_format=args.file_format)
if __name__ == "__main__":
main() | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/spark_apps/forecast.py | 0.702326 | 0.426411 | forecast.py | pypi |
import logging
from typing import Dict
from copy import copy
from matplotlib.colors import to_rgb
from matplotlib.dates import AutoDateLocator, AutoDateFormatter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from merlion.utils import TimeSeries, UnivariateTimeSeries
logger = logging.getLogger(__name__)
def plot_anoms(ax: plt.Axes, anomaly_labels: TimeSeries):
"""
Plots anomalies as pink windows on the matplotlib ``Axes`` object ``ax``.
"""
if anomaly_labels is None:
return ax
anomaly_labels = anomaly_labels.to_pd()
t, y = anomaly_labels.index, anomaly_labels.values
splits = np.where(y[1:] != y[:-1])[0] + 1
splits = np.concatenate(([0], splits, [len(y) - 1]))
for k in range(len(splits) - 1):
if y[splits[k]]: # If splits[k] is anomalous
ax.axvspan(t[splits[k]], t[splits[k + 1]], color="#e07070", alpha=0.5)
return ax
def plot_anoms_plotly(fig, anomaly_labels: TimeSeries):
"""
Plots anomalies as pink windows on the plotly ``Figure`` object ``fig``.
"""
if anomaly_labels is None:
return fig
anomaly_labels = anomaly_labels.to_pd()
t, y = anomaly_labels.index, anomaly_labels.values
splits = np.where(y[1:] != y[:-1])[0] + 1
splits = np.concatenate(([0], splits, [len(y) - 1]))
for k in range(len(splits) - 1):
if y[splits[k]]: # If splits[k] is anomalous
fig.add_vrect(t[splits[k]], t[splits[k + 1]], line_width=0, fillcolor="#e07070", opacity=0.4)
return fig
class Figure:
"""
Class for visualizing predictions of univariate anomaly detection & forecasting models.
"""
_default_label_alias = dict(yhat="Forecast", anom="Anomaly Score")
def __init__(
self,
y: UnivariateTimeSeries = None,
anom: UnivariateTimeSeries = None,
yhat: UnivariateTimeSeries = None,
yhat_lb: UnivariateTimeSeries = None,
yhat_ub: UnivariateTimeSeries = None,
y_prev: UnivariateTimeSeries = None,
yhat_prev: UnivariateTimeSeries = None,
yhat_prev_lb: UnivariateTimeSeries = None,
yhat_prev_ub: UnivariateTimeSeries = None,
yhat_color: str = None,
):
"""
:param y: the true value of the time series
:param anom: anomaly scores returned by a model
:param yhat: forecast returned by a model
:param yhat_lb: lower bound on ``yhat`` (if model supports uncertainty estimation)
:param yhat_ub: upper bound on ``yhat`` (if model supports uncertainty estimation)
:param y_prev: portion of time series preceding ``y``
:param yhat_prev: model's forecast of ``y_prev``
:param yhat_prev_lb: lower bound on ``yhat_prev`` (if model supports uncertainty estimation)
:param yhat_prev_ub: upper bound on ``yhat_prev`` (if model supports uncertainty estimation)
:param yhat_color: the color in which to plot the forecast
"""
assert not (anom is not None and y is None), "If `anom` is given, `y` must also be given"
if yhat is None:
assert yhat_lb is None and yhat_ub is None, "Can only give `yhat_lb` and `yhat_ub` if `yhat` is given"
else:
assert (yhat_lb is None and yhat_ub is None) or (
yhat_lb is not None and yhat_ub is not None
), "Must give both or neither of `yhat_lb` and `yhat_ub`"
if yhat_prev is None:
assert (
yhat_prev_lb is None and yhat_prev_ub is None
), "Can only give `yhat_prev_lb` and `yhat_prev_ub` if `yhat_prev` is given"
else:
assert (yhat_prev_lb is None and yhat_prev_ub is None) or (
yhat_prev_lb is not None and yhat_prev_ub is not None
), "Must give both or neither of `yhat_prev_lb` and `yhat_prev_ub`"
self.y = y
self.anom = anom
self.yhat = yhat
if yhat_lb is not None and yhat_ub is not None:
self.yhat_iqr = TimeSeries({"lb": yhat_lb, "ub": yhat_ub}).align()
else:
self.yhat_iqr = None
self.y_prev = y_prev
self.yhat_prev = yhat_prev
if yhat_prev_lb is not None and yhat_prev_ub is not None:
self.yhat_prev_iqr = TimeSeries({"lb": yhat_prev_lb, "ub": yhat_prev_ub}).align()
else:
self.yhat_prev_iqr = None
self.yhat_color = yhat_color if isinstance(yhat_color, str) else "#0072B2"
@property
def t0(self):
"""
:return: First time being plotted.
"""
ys = [self.anom, self.y, self.yhat, self.y_prev, self.yhat_prev]
return min(y.index[0] for y in ys if y is not None and len(y) > 0)
@property
def tf(self):
"""
:return: Final time being plotted.
"""
ys = [self.anom, self.y, self.yhat, self.y_prev, self.yhat_prev]
return max(y.index[-1] for y in ys if y is not None and len(y) > 0)
@property
def t_split(self):
"""
:return: Time splitting train from test.
"""
if self.y_prev is not None:
return self.y_prev.index[-1]
if self.yhat_prev is not None:
return self.yhat_prev.index[-1]
return None
def get_y(self):
"""Get all y's (actual values)"""
if self.y is not None and self.y_prev is not None:
return self.y_prev.concat(self.y)
elif self.y_prev is not None:
return self.y_prev
elif self.y is not None:
return self.y
else:
return None
def get_yhat(self):
"""Get all yhat's (predicted values)."""
if self.yhat is not None and self.yhat_prev is not None:
return self.yhat_prev.concat(self.yhat)
elif self.yhat_prev is not None:
return self.yhat_prev
elif self.yhat is not None:
return self.yhat
else:
return None
def get_yhat_iqr(self):
"""Get IQR of predicted values."""
if self.yhat_iqr is not None and self.yhat_prev_iqr is not None:
return self.yhat_prev_iqr + self.yhat_iqr
elif self.yhat_prev_iqr is not None:
return self.yhat_prev_iqr
elif self.yhat_iqr is not None:
return self.yhat_iqr
else:
return None
def plot(self, title=None, metric_name=None, figsize=(1000, 600), ax=None, label_alias: Dict[str, str] = None):
"""
Plots the figure in matplotlib.
:param title: title of the plot.
:param metric_name: name of the metric (y axis)
:param figsize: figure size in pixels
:param ax: matplotlib axes to add the figure to.
:param label_alias: dict which maps entities in the figure,
specifically ``y_hat`` and ``anom`` to their label names.
:return: (fig, ax): matplotlib figure & matplotlib axes
"""
# determine full label alias
label_alias = {} if label_alias is None else label_alias
full_label_alias = copy(self._default_label_alias)
full_label_alias.update(label_alias)
# Get the figure
figsize = (figsize[0] / 100, figsize[1] / 100)
if ax is None:
fig = plt.figure(facecolor="w", figsize=figsize)
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
ax.set_facecolor((0.9, 0.9, 0.9))
# Get & plot the actual value (if applicable)
lines = []
y = self.get_y()
if y is not None:
metric_name = y.name if metric_name is None else metric_name
ln = ax.plot(y.index, y.np_values, c="k", alpha=0.8, lw=1, zorder=1, label=metric_name)
lines.extend(ln)
# Dotted line to cordon off previous times from current ones
t_split = self.t_split
if t_split is not None:
ax.axvline(t_split, ls="--", lw=2, c="k")
# Get & plot the prediction (if applicable)
yhat = self.get_yhat()
if yhat is not None:
metric_name = yhat.name if metric_name is None else metric_name
yhat_label = full_label_alias.get("yhat")
ln = ax.plot(yhat.index, yhat.np_values, ls="-", c=self.yhat_color, zorder=0, label=yhat_label)
lines.extend(ln)
# Get & plot the uncertainty of the prediction (if applicable)
iqr = self.get_yhat_iqr()
if iqr is not None:
lb, ub = iqr.univariates["lb"], iqr.univariates["ub"]
ax.fill_between(lb.index, lb.values, ub.values, color=self.yhat_color, alpha=0.2, zorder=2)
# Plot anomaly scores if desired
if self.anom is not None and self.y is not None:
ax2 = ax.twinx()
anom_vals = self.anom.np_values
anom_label = full_label_alias.get("anom")
ln = ax2.plot(self.anom.index, anom_vals, color="r", label=anom_label)
ax2.set_ylabel(anom_label)
minval, maxval = min(anom_vals), max(anom_vals)
delta = maxval - minval
if delta > 0:
ax2.set_ylim(minval - delta / 8, maxval + 2 * delta)
else:
ax2.set_ylim(minval - 1 / 30, maxval + 1)
lines.extend(ln)
# Format the axes before returning the figure
locator = AutoDateLocator(interval_multiples=False)
formatter = AutoDateFormatter(locator)
ax.set_xlim(self.t0 - (self.tf - self.t0) / 20, self.tf + (self.tf - self.t0) / 20)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.grid(True, which="major", c="gray", ls="-", lw=1, alpha=0.2)
ax.set_xlabel("Time")
ax.set_ylabel(metric_name)
ax.set_title(title if title else metric_name)
fig.legend(lines, [l.get_label() for l in lines])
fig.tight_layout()
return fig, ax
def plot_plotly(self, title=None, metric_name=None, figsize=(1000, 600), label_alias: Dict[str, str] = None):
"""
Plots the figure in plotly.
:param title: title of the plot.
:param metric_name: name of the metric (y axis)
:param figsize: figure size in pixels
:param label_alias: dict which maps entities in the figure,
specifically ``y_hat`` and ``anom`` to their label names.
:return: plotly figure.
"""
# determine full label alias
label_alias = {} if label_alias is None else label_alias
full_label_alias = copy(self._default_label_alias)
full_label_alias.update(label_alias)
error_color = "rgba" + str(tuple(int(x * 255) for x in to_rgb(self.yhat_color)) + (0.2,))
actual_color = "black"
anom_color = "red"
line_width = 2
traces = []
y = self.get_y()
yhat = self.get_yhat()
iqr = self.get_yhat_iqr()
if metric_name is None:
if y is not None:
metric_name = y.name
elif yhat is not None:
metric_name = yhat.name
if iqr is not None:
lb = iqr.univariates["lb"]
traces.append(
go.Scatter(
x=lb.index, y=lb.np_values, mode="lines", line=dict(width=0), hoverinfo="skip", showlegend=False
)
)
if yhat is not None:
fill_mode = "tonexty" if iqr is not None else "none"
yhat_label = full_label_alias.get("yhat")
traces.append(
go.Scatter(
name=yhat_label,
x=yhat.index,
y=yhat.np_values,
mode="lines",
line=dict(color=self.yhat_color, width=line_width),
fillcolor=error_color,
fill=fill_mode,
)
)
if iqr is not None:
ub = iqr.univariates["ub"]
traces.append(
go.Scatter(
x=ub.index,
y=ub.np_values,
mode="lines",
line=dict(width=0),
hoverinfo="skip",
showlegend=False,
fillcolor=error_color,
fill="tonexty",
)
)
if y is not None:
traces.append(
go.Scatter(
name=y.name, x=y.index, y=y.values, mode="lines", line=dict(color=actual_color, width=line_width)
)
)
anom_trace = None
if self.anom is not None:
anom_label = full_label_alias.get("anom")
anom_trace = go.Scatter(
name=anom_label,
x=self.anom.index,
y=self.anom.np_values,
mode="lines",
line=dict(color=anom_color, width=line_width),
)
layout = dict(
showlegend=True,
width=figsize[0],
height=figsize[1],
yaxis=dict(title=metric_name),
xaxis=dict(
title="Time",
type="date",
rangeselector=dict(
buttons=list(
[
dict(count=7, label="1w", step="day", stepmode="backward"),
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
),
rangeslider=dict(visible=True),
),
)
title = title if title else metric_name
if title is not None:
layout["title"] = title
fig = make_subplots(
specs=[[{"secondary_y": anom_trace is not None}]], figure=go.Figure(data=traces, layout=layout)
)
if self.t_split is not None:
fig.add_vline(x=self.t_split, line_dash="dot", line_color="black", line_width=2)
if anom_trace is not None:
fig.add_trace(anom_trace, secondary_y=True)
minval, maxval = min(self.anom.np_values), max(self.anom.np_values)
delta = maxval - minval
if delta > 0:
minval, maxval = minval - delta / 8, maxval + 2 * delta
else:
minval, maxval = minval - 1 / 30, maxval + 1
fig.update_yaxes(title_text=anom_label, range=[minval, maxval], secondary_y=True)
return fig
class MTSFigure:
def __init__(
self,
y: TimeSeries = None,
anom: TimeSeries = None,
yhat: TimeSeries = None,
yhat_lb: TimeSeries = None,
yhat_ub: TimeSeries = None,
y_prev: TimeSeries = None,
yhat_prev: TimeSeries = None,
yhat_prev_lb: TimeSeries = None,
yhat_prev_ub: TimeSeries = None,
yhat_color: str = None,
):
assert y is not None, "`y` must be given"
if yhat is None:
assert yhat_lb is None and yhat_ub is None, "Can only give `yhat_lb` and `yhat_ub` if `yhat` is given"
else:
assert (yhat_lb is None and yhat_ub is None) or (
yhat_lb is not None and yhat_ub is not None
), "Must give both or neither of `yhat_lb` and `yhat_ub`"
if yhat_prev is None:
assert (
yhat_prev_lb is None and yhat_prev_ub is None
), "Can only give `yhat_prev_lb` and `yhat_prev_ub` if `yhat_prev` is given"
else:
assert (yhat_prev_lb is None and yhat_prev_ub is None) or (
yhat_prev_lb is not None and yhat_prev_ub is not None
), "Must give both or neither of `yhat_prev_lb` and `yhat_prev_ub`"
self.y = y
self.anom = anom
self.yhat = yhat
self.yhat_lb = yhat_lb
self.yhat_ub = yhat_ub
self.y_prev = y_prev
self.yhat_prev = yhat_prev
self.yhat_prev_lb = yhat_prev_lb
self.yhat_prev_ub = yhat_prev_ub
self.yhat_color = yhat_color if isinstance(yhat_color, str) else "#0072B2"
@property
def t0(self):
ys = [self.anom, self.y, self.yhat, self.y_prev, self.yhat_prev]
return min(y.t0 for y in ys if y is not None and len(y) > 0)
@property
def tf(self):
ys = [self.anom, self.y, self.yhat, self.y_prev, self.yhat_prev]
return max(y.tf for y in ys if y is not None and len(y) > 0)
@property
def t_split(self):
if self.y_prev is not None:
return pd.to_datetime(self.y_prev.tf, unit="s")
if self.yhat_prev is not None:
return pd.to_datetime(self.yhat_prev.tf, unit="s")
return None
@staticmethod
def _combine_prev(x, x_prev):
if x is not None and x_prev is not None:
return x_prev + x
elif x_prev is not None:
return x_prev
elif x is not None:
return x
else:
return None
def get_y(self):
"""Get all y's (actual values)"""
return self._combine_prev(self.y, self.y_prev)
def get_yhat(self):
"""Get all yhat's (predicted values)."""
return self._combine_prev(self.yhat, self.yhat_prev)
def get_yhat_iqr(self):
"""Get IQR of predicted values."""
return self._combine_prev(self.yhat_lb, self.yhat_prev_lb), self._combine_prev(self.yhat_ub, self.yhat_prev_ub)
@staticmethod
def _get_layout(title, figsize):
layout = dict(
showlegend=True,
xaxis=dict(
title="Time",
type="date",
rangeselector=dict(
buttons=list(
[
dict(count=7, label="1w", step="day", stepmode="backward"),
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
),
rangeslider=dict(visible=True),
),
)
layout["title"] = title if title else "Untitled"
if figsize is not None:
assert len(figsize) == 2, "figsize should be (width, height)."
layout["width"] = figsize[0]
layout["height"] = figsize[1]
return layout
def plot_plotly(self, title=None, figsize=None):
"""
Plots the figure in plotly.
:param title: title of the plot.
:param figsize: figure size in pixels
:return: plotly figure.
"""
anom_color = "red"
error_color = "rgba" + str(tuple(int(x * 255) for x in to_rgb(self.yhat_color)) + (0.2,))
traces = []
y = self.get_y()
yhat = self.get_yhat()
lb, ub = self.get_yhat_iqr()
color_list = plotly.colors.qualitative.Dark24
valid_idx = [i for i in range(len(color_list)) if i not in [3, 12]] # exclude red to make anom trace clearer
for i, name in enumerate(y.names):
v = y.univariates[name]
color = color_list[valid_idx[i % len(valid_idx)]]
traces.append(go.Scatter(name=name, x=v.index, y=v.np_values, mode="lines", line=dict(color=color)))
if lb is not None and name in lb.names:
v = lb.univariates[name]
traces.append(
go.Scatter(
x=v.index, y=v.np_values, mode="lines", line=dict(width=0), hoverinfo="skip", showlegend=False
)
)
if yhat is not None and name in yhat.names:
v = yhat.univariates[name]
fill_mode = "tonexty" if lb is not None or ub is not None else "none"
traces.append(
go.Scatter(
name=f"{name}_forecast",
x=v.index,
y=v.np_values,
mode="lines",
line=dict(color=self.yhat_color),
fillcolor=error_color,
fill=fill_mode,
)
)
if ub is not None and name in ub.names:
v = ub.univariates[name]
traces.append(
go.Scatter(
x=v.index,
y=v.np_values,
mode="lines",
line=dict(width=0),
hoverinfo="skip",
showlegend=False,
fillcolor=error_color,
fill="tonexty",
)
)
anom_trace = None
if self.anom is not None:
v = self.anom.univariates[self.anom.names[0]]
anom_trace = go.Scatter(
name="Anomaly Score", x=v.index, y=v.np_values, mode="lines", line=dict(color=anom_color)
)
fig = make_subplots(
specs=[[{"secondary_y": anom_trace is not None}]], figure=go.Figure(layout=self._get_layout(title, figsize))
)
if anom_trace is not None:
fig.add_trace(anom_trace, secondary_y=True)
v = self.anom.univariates[self.anom.names[0]]
minval, maxval = min(v.np_values), max(v.np_values)
delta = maxval - minval
if delta > 0:
minval, maxval = minval - delta / 8, maxval + 2 * delta
else:
minval, maxval = minval - 1 / 30, maxval + 1
fig.update_yaxes(title_text="Anomaly Score", range=[minval, maxval], secondary_y=True)
for trace in traces:
fig.add_trace(trace)
if self.t_split is not None:
fig.add_vline(x=self.t_split, line_dash="dot", line_color="black", line_width=2)
return fig | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/plot.py | 0.924381 | 0.660309 | plot.py | pypi |
from collections import OrderedDict
import logging
from typing import Sequence
import numpy as np
import scipy.signal
from scipy.stats import norm
from merlion.transform.base import TransformBase, InvertibleTransformBase
from merlion.utils import UnivariateTimeSeries, TimeSeries
logger = logging.getLogger(__name__)
class MovingAverage(InvertibleTransformBase):
"""
Computes the n_steps-step moving average of the time series, with
the given relative weights assigned to each time in the moving average
(default is to take the non-weighted average). Zero-pads the input time
series to the left before taking the moving average.
"""
def __init__(self, n_steps: int = None, weights: Sequence[float] = None):
super().__init__()
assert (
n_steps is not None and n_steps > 0
) or weights is not None, "Must specify at least one of n_steps or weights for MovingAverage"
if weights is None:
weights = np.ones(n_steps) / n_steps
elif n_steps is None:
n_steps = len(weights)
else:
assert len(weights) == n_steps
assert len(weights) >= 2, "Must compute a moving average with a window size of at least 2"
self.n_steps = n_steps
self.weights = weights
@property
def requires_inversion_state(self):
return False
@property
def _n_pad(self):
return len(self.weights) - 1
def train(self, time_series: TimeSeries):
pass
def __call__(self, time_series: TimeSeries) -> TimeSeries:
new_vars = OrderedDict()
for name, var in time_series.items():
x = np.concatenate((np.full(self._n_pad, var.np_values[0]), var.np_values))
y = scipy.signal.correlate(x, self.weights, mode="full")[self._n_pad : -self._n_pad]
new_vars[name] = UnivariateTimeSeries(var.index, y)
ret = TimeSeries(new_vars, check_aligned=False)
ret._is_aligned = time_series.is_aligned
return ret
def _invert(self, time_series: TimeSeries) -> TimeSeries:
new_vars = OrderedDict()
for name, var in time_series.items():
left_pad = np.cumsum(self.weights[-1:0:-1]) * var.np_values[0]
right_pad = np.full(self._n_pad, var.np_values[-1])
y = np.concatenate((left_pad, var.np_values, right_pad))
x = scipy.signal.deconvolve(y, self.weights[-1::-1])[0][self._n_pad :]
new_vars[name] = UnivariateTimeSeries(var.index, x)
ret = TimeSeries(new_vars, check_aligned=False)
ret._is_aligned = time_series.is_aligned
return ret
class MovingPercentile(TransformBase):
"""
Computes the n-step moving percentile of the time series.
For datapoints at the start of the time series which are preceded by
fewer than ``n_steps`` datapoints, the percentile is computed using only the
available datapoints.
"""
def __init__(self, n_steps: int, q: float):
"""
:param q: The percentile to use. Between 0 and 100 inclusive.
:param n_steps: The number of steps to use.
"""
super().__init__()
assert 0 <= q <= 100
assert 1 <= n_steps
self.n_steps = int(n_steps)
self.q = q
def train(self, time_series: TimeSeries):
pass
def __call__(self, time_series: TimeSeries) -> TimeSeries:
new_vars = OrderedDict()
for name, var in time_series.items():
x = var.np_values
new_x = []
for i in range(len(x)):
window = x[max(0, i - self.n_steps + 1) : i + 1]
new_x.append(np.percentile(window, self.q))
new_vars[name] = UnivariateTimeSeries(var.index, new_x)
ret = TimeSeries(new_vars, check_aligned=False)
ret._is_aligned = time_series.is_aligned
return ret
class ExponentialMovingAverage(InvertibleTransformBase):
r"""
Computes the exponential moving average (normalized or un-normalized) of the
time series, with smoothing factor alpha (lower alpha = more smoothing).
alpha must be between 0 and 1.
The unnormalized moving average ``y`` of ``x`` is computed as
.. math::
\begin{align*}
y_0 & = x_0 \\
y_i & = (1 - \alpha) \cdot y_{i-1} + \alpha \cdot x_i
\end{align*}
The normalized moving average ``y`` of ``x`` is computed as
.. math::
y_i = \frac{x_i + (1 - \alpha) x_{i-1} + \ldots + (1 - \alpha)^i x_0}
{1 + (1 - \alpha) + \ldots + (1 - \alpha)^i}
Upper and lower confidence bounds, ``l`` and ``u``, of the exponential moving
average are computed using the exponential moving standard deviation, ``s``, and ``y`` as
.. math::
l_i = y_i + z_{\frac{1}{2} (1-p)} \times s_i \\
u_i = u_o + z_{\frac{1}{2} (1+p)} \times s_i
If condfidence bounds are included, the returned time series will contain
the upper and lower bounds as additional univariates. For example if the
transform is applied to a time series with two univariates "x" and "y",
the resulting time series will contain univariates with the following names:
"x", "x_lb", "x_ub", "y", "y_lb", "y_ub".
"""
def __init__(self, alpha: float, normalize: bool = True, p: float = 0.95, ci: bool = False):
"""
:param alpha: smoothing factor to use for exponential weighting.
:param normalize: If True, divide by the decaying adjustment in
beginning periods.
:param p: confidence level to use if returning the upper and lower
bounds of the confidence interval.
:param ci: If True, return the the upper and lower confidence bounds
of the the exponential moving average as well.
"""
super().__init__()
self.alpha = alpha
self.normalize = normalize
self.p = p
self.ci = ci
@property
def requires_inversion_state(self):
"""
``False`` because the exponential moving average is stateless to invert.
"""
return False
def train(self, time_series: TimeSeries):
pass
def __call__(self, time_series: TimeSeries) -> TimeSeries:
new_vars = OrderedDict()
for name, var in time_series.items():
emw = var.to_pd().ewm(alpha=self.alpha, adjust=self.normalize)
ema = emw.mean()
new_vars[name] = UnivariateTimeSeries.from_pd(ema)
if self.ci:
ems = emw.std()
ems[0] = ems[1]
new_vars[f"{name}_lb"] = UnivariateTimeSeries.from_pd(ema + norm.ppf(0.5 * (1 - self.p)) * ems)
new_vars[f"{name}_ub"] = UnivariateTimeSeries.from_pd(ema + norm.ppf(0.5 * (1 + self.p)) * ems)
ret = TimeSeries(new_vars, check_aligned=False)
ret._is_aligned = time_series.is_aligned
return ret
def _invert(self, time_series: TimeSeries) -> TimeSeries:
new_vars = OrderedDict()
for name, var in time_series.items():
# check whether varaiable is an upper or lower confidence bound
if isinstance(name, str) and (name.endswith("_lb") or name.endswith("_ub")):
continue
t, y = var.index, var.np_values
# Geometric series formula for (1 - alpha)^0 + ... + (1 - alpha)^i
# to unnormalize the EWM before inverting it
if self.normalize:
weights = 1 - (1 - self.alpha) ** np.arange(1, len(y) + 1)
y = y * weights / self.alpha
x = y[1:] - (1 - self.alpha) * y[:-1]
# Direct inversion of one-step update for unnormalized EWMA
else:
x = (y[1:] - (1 - self.alpha) * y[:-1]) / self.alpha
x = np.concatenate((y[:1], x))
new_vars[name] = UnivariateTimeSeries(t, x)
ret = TimeSeries(new_vars, check_aligned=False)
ret._is_aligned = time_series.is_aligned
return ret
class DifferenceTransform(InvertibleTransformBase):
"""
Applies a difference transform to the input time series. We include it
as a moving average because we can consider the difference transform
to be a 2-step moving "average" with weights w = [-1, 1].
"""
def train(self, time_series: TimeSeries):
pass
def __call__(self, time_series: TimeSeries) -> TimeSeries:
x0 = {}
new_vars = OrderedDict()
for name, var in time_series.items():
x0[name] = var[0]
if len(var) <= 1:
logger.warning(f"Cannot apply a difference transform to a time series of length {len(var)} < 2")
new_vars[name] = UnivariateTimeSeries([], [])
else:
new_vars[name] = UnivariateTimeSeries.from_pd(var.diff())[1:]
self.inversion_state = x0
ret = TimeSeries(new_vars, check_aligned=False)
ret._is_aligned = time_series.is_aligned
return ret
def _invert(self, time_series: TimeSeries) -> TimeSeries:
new_vars = OrderedDict()
for name, var in time_series.items():
t0, x0 = self.inversion_state[name]
var = UnivariateTimeSeries([t0], [x0]).concat(var).cumsum()
new_vars[name] = UnivariateTimeSeries.from_pd(var)
ret = TimeSeries(new_vars, check_aligned=False)
ret._is_aligned = time_series.is_aligned
return ret
class LagTransform(InvertibleTransformBase):
"""
Applies a lag transform to the input time series. Each x(i) gets mapped
to x(i) - x(i-k). We include it as a moving average because we can consider
the lag transform to be a k+1-step moving "average" with weights
w = [-1, 0,..., 0, 1]. One may optionally left-pad the sequence with the
first value in the time series.
"""
def __init__(self, k: int, pad: bool = False):
super().__init__()
assert k >= 1
self.k = k
self.pad = pad
def train(self, time_series: TimeSeries):
pass
def __call__(self, time_series: TimeSeries) -> TimeSeries:
all_tk, all_xk = {}, {}
new_vars = OrderedDict()
for name, var in time_series.items():
# Apply any x-padding or t-truncating necessary
t, x = var.index, var.np_values
all_xk[name] = x[: self.k]
if self.pad:
all_tk[name] = t[:0]
x = np.concatenate((np.full(self.k, x[0]), x))
else:
all_tk[name] = t[: self.k]
t = t[self.k :]
if len(var) <= self.k and not self.pad:
logger.warning(
f"Cannot apply a {self.k}-lag transform to a time series of length {len(var)} <= {self.k}"
)
new_vars[name] = UnivariateTimeSeries([], [])
else:
new_vars[name] = UnivariateTimeSeries(t, x[self.k :] - x[: -self.k])
self.inversion_state = all_tk, all_xk
return TimeSeries(new_vars)
def _invert(self, time_series: TimeSeries) -> TimeSeries:
all_tk, all_xk = self.inversion_state
new_vars = OrderedDict()
for name, var in time_series.items():
tk, xk = all_tk[name], all_xk[name]
t = tk.union(var.index)
if len(t) == len(xk) + len(var): # no padding
y = np.concatenate((xk, var.np_values))
elif len(t) == len(var): # padding
y = np.asarray(var.values)
y[: len(xk)] = xk
else:
raise RuntimeError("Something went wrong: inversion state has unexpected size.")
x = np.zeros(len(t))
for i in range(self.k):
x[i :: self.k] = np.cumsum(y[i :: self.k])
new_vars[name] = UnivariateTimeSeries(t, x)
return TimeSeries(new_vars)
def compute_lag(self, var: UnivariateTimeSeries) -> UnivariateTimeSeries:
t, x = var.index, var.np_values
if self.pad:
x = np.concatenate((np.full(self.k, x[0]), x))
vals = x[self.k :] - x[: -self.k]
times = t if self.pad else t[self.k :]
return UnivariateTimeSeries(times, vals) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/transform/moving_average.py | 0.964271 | 0.756211 | moving_average.py | pypi |
from abc import abstractmethod
from typing import Tuple
import numpy as np
import pandas as pd
from merlion.transform.base import Identity, TransformBase
from merlion.transform.bound import LowerUpperClip
from merlion.utils.time_series import UnivariateTimeSeries, TimeSeries
class Anomalize(TransformBase):
"""
Injects anomalies into a time series with controlled randomness and returns
both the anomalized time series along with associated anomaly labels.
"""
def __init__(self, anom_prob: float = 0.01, natural_bounds: Tuple[float, float] = (None, None), **kwargs):
"""
:param anom_prob: The probability of anomalizing a particular data point.
:param natural_bounds: Upper and lower natrual boundaries which injected anomalies should
a particular time series must stay within.
"""
super().__init__(**kwargs)
assert 0 <= anom_prob <= 1
self.anom_prob = anom_prob
self.natural_bounds = natural_bounds
@property
def natural_bounds(self):
return self.nat_lower, self.nat_upper
@natural_bounds.setter
def natural_bounds(self, bounds: Tuple[float, float]):
lower, upper = bounds
if lower is not None or upper is not None:
self.bound = LowerUpperClip(lower, upper)
else:
self.bound = Identity()
self.nat_lower = lower
self.nat_upper = upper
@property
def is_trained(self) -> bool:
return True
def random_is_anom(self):
return np.random.uniform() < self.anom_prob
def __call__(self, time_series: TimeSeries, label_anoms: bool = True) -> TimeSeries:
"""
:param label_anoms: If True, label injected anomalies with 1, otherwise, do not
label injected anomalies.
"""
if not self.is_trained:
raise RuntimeError(f"Cannot use {type(self).__name__} without training it first!")
assert time_series.dim <= 2, (
"anomalize transforms may only be applied to univariate time series "
"or bivariate time series, in which the second variable is a series "
"of anomaly labels"
)
if time_series.dim == 2:
var, prev_label_var = [time_series.univariates[name] for name in time_series.names]
assert "anom" in prev_label_var.name
else:
var, prev_label_var = time_series.univariates[time_series.names[0]], None
new_var, label_var = self._anomalize_univariate(var)
if not label_anoms:
label_var = UnivariateTimeSeries(label_var.time_stamps, [0] * len(prev_label_var), prev_label_var.name)
# combine label univariates
if prev_label_var is not None:
labels = []
for (t1, lab), (t2, prev_lab) in zip(prev_label_var, label_var):
labels.append(max(lab, prev_lab))
label_var = UnivariateTimeSeries(label_var.time_stamps, labels, label_var.name)
# bound result
return TimeSeries.from_ts_list([self.bound(new_var.to_ts()), label_var.to_ts()])
@abstractmethod
def _anomalize_univariate(self, var: UnivariateTimeSeries):
pass
class Shock(Anomalize):
"""
Injects random spikes or dips into a time series.
Letting ``y_t`` be a time series, if an anomaly is injected into
the time series at time ``t``, the anomalous value that gets injected is as follows:
.. math::
\\tilde{y}_t &= y_t + \\text{shock} \\\\
\\begin{split}
\\text{where } \\space & \\text{shock} = Sign \\times Z\\times \\text{RWSD}_{\\alpha}(y_t), \\\\
& Z \\sim \\mathrm{Unif}(a,b), \\\\
& Sign \\text{ is a random sign} \\\\
\\end{split}
Additionally, the ``shock`` that is added to ``y_t`` is also applied to
``y_t+1``, ... ``y_w-1``, where ``w``, known as the "anomaly width" is
randomly determined by a random draw from a uniform distribution.
"""
def __init__(
self,
alpha: float = 0.2,
pos_prob: float = 1.0,
sd_range: Tuple[float, float] = (3, 6),
anom_width_range: Tuple[int, int] = (1, 5),
persist_shock: bool = False,
**kwargs,
):
"""
:param alpha: The recency weight to use when calculating recency-weighted
standard deviation.
:param pos_prob: The probably with which a shock's sign is positive.
:param sd_range: The range of standard units that is used to create a shock
:param anom_width_range: The range of anomaly widths.
:param persist_shock: whether to apply the shock to all successive datapoints.
"""
super().__init__(**kwargs)
assert 0.0 <= pos_prob <= 1.0
self.alpha = alpha
self.pos_prob = pos_prob
self.sd_range = sd_range
self.anom_width_range = anom_width_range
self.persist_shock = persist_shock
@property
def anom_width_range(self):
return self.width_lower, self.width_upper
@anom_width_range.setter
def anom_width_range(self, range: Tuple[int, int]):
lower, upper = range
assert 0 < lower <= upper
self.width_lower = lower
self.width_upper = upper
@property
def sd_range(self):
return self.sd_lower, self.sd_upper
@sd_range.setter
def sd_range(self, range: Tuple[float, float]):
lower, upper = range
assert lower <= upper
self.sd_lower = lower
self.sd_upper = upper
def random_sd_units(self):
sign = 1 if np.random.uniform() < self.pos_prob else -1
return sign * np.random.uniform(self.sd_lower, self.sd_upper)
def random_anom_width(self):
return np.random.choice(range(self.width_lower, self.width_upper + 1))
def random_is_anom(self):
return np.random.uniform() < self.anom_prob
def train(self, time_series: TimeSeries):
"""
The `Shock` transform doesn't require training.
"""
pass
def _anomalize_univariate(self, var: UnivariateTimeSeries) -> Tuple[UnivariateTimeSeries, UnivariateTimeSeries]:
ems = var.to_pd().ewm(alpha=self.alpha, adjust=False).std(bias=True)
new_vals, labels = [], []
anom_width, shock = 0, 0
for ((t, x), sd) in zip(var, ems):
if anom_width == 0:
is_anom = self.random_is_anom()
if is_anom:
shock = self.random_sd_units() * sd
anom_width = self.random_anom_width() - 1
val = x + shock
else:
val = x + shock * self.persist_shock
elif anom_width > 0:
is_anom = True
val = x + shock
anom_width -= 1
new_vals.append(val)
labels.append(is_anom)
anomalized_var = UnivariateTimeSeries(var.time_stamps, new_vals, var.name)
labels_var = UnivariateTimeSeries(var.time_stamps, labels, "anomaly")
return anomalized_var, labels_var
class LevelShift(Shock):
"""
Injects random level shift anomalies into a time series.
A level shift is a sudden change of level in a time series. It is equivalent to
a shock that, when applied to ``y_t``, is also applied to every datapoint after ``t``.
"""
def __init__(self, **kwargs):
kwargs["persist_shock"] = True
# We count a level shift anomaly as lasting for 20 points after the fact
kwargs["anom_width_range"] = (20, 20)
super().__init__(**kwargs)
class TrendChange(Anomalize):
r"""
Injects random trend changes into a time series.
At a high level, the transform tracks the velocity (trend) of a time series
and then, when injecting a trend change at a particular time, it scales
the current velocity by a random factor. The disturbance to the velocity is
persisted to values in the near future, thus emulating a sudden change of trend.
Let, ``(a,b)`` be the scale range. If the first trend change happens at time ``t*``,
it is injected as follows:
.. math::
\tilde{y}_{t^*} = y_{t^*-1} + v_{t^*} + \Delta v_{t^*} \\
\begin{align*}
\text{where } & \Delta v_{t^*} = Sign \times Z \times v_{t^*}, \\
& v_{t^*} = y_{t^*} - y_{t^*-1}
& Z \sim Unif(a,b), \\
& Sign \text{ is a random sign} \\
\end{align*}
Afterward, the trend change is persisted and ``y_t`` (for ``t > t*``) is changed as follows:
.. math::
\tilde{y}_{t} = \tilde{y}_{t-1} + v_t + \beta \times \Delta v_{t^*}
"""
def __init__(
self,
alpha: float = 0.5,
beta: float = 0.95,
pos_prob: float = 0.5,
scale_range: Tuple[float, float] = (0.5, 3.0),
**kwargs,
):
super().__init__(**kwargs)
"""
:param alpha: The recency weight to use when calculating recency-weighted
standard deviation.
:param beta: A parameter controlling the degree of trend change persistence.
:param pos_prob: The probably with which a shock's sign is positive.
:param scale_range: The range of possible values by which a time series's
velocity will be scaled.
"""
assert all(0 <= param <= 1 for param in (alpha, beta, pos_prob))
self.alpha = alpha
self.beta = beta
self.scale_range = scale_range
self.pos_prob = pos_prob
@property
def scale_range(self):
return self.scale_lower, self.scale_upper
@scale_range.setter
def scale_range(self, scale_range: Tuple[float, float]):
lower, upper = scale_range
assert 0 < lower <= upper
self.scale_lower = lower
self.scale_upper = upper
def random_scale(self):
sign = 1 if np.random.uniform() < self.pos_prob else -1
return sign * np.random.uniform(self.scale_lower, self.scale_upper)
def _anomalize_univariate(self, var: UnivariateTimeSeries):
vels = [0] + var.diff()[1:].tolist()
emv = pd.Series(vels).ewm(alpha=self.alpha, adjust=False).mean()
new_vals, labels = [], []
x_prev, v_delta = var.values[0], vels[0]
for v, mv in zip(vels, emv):
is_anom = self.random_is_anom()
v_delta = self.random_scale() * mv if is_anom else self.beta * v_delta
x = x_prev + v + v_delta
new_vals.append(x)
labels.append(is_anom)
x_prev = x
anomalized_var = UnivariateTimeSeries(var.time_stamps, new_vals, var.name)
labels_var = UnivariateTimeSeries(var.time_stamps, labels, "anomaly")
return anomalized_var, labels_var
def train(self, time_series: TimeSeries):
"""
The `TrendChange` transform doesn't require training.
"""
pass | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/transform/anomalize.py | 0.913353 | 0.575379 | anomalize.py | pypi |
from collections import OrderedDict
import logging
from typing import Iterable, Mapping
import numpy as np
import pandas as pd
import scipy.special
import scipy.stats
from sklearn.preprocessing import StandardScaler
from merlion.transform.base import InvertibleTransformBase, TransformBase
from merlion.utils import UnivariateTimeSeries, TimeSeries
logger = logging.getLogger(__name__)
class AbsVal(TransformBase):
"""
Takes the absolute value of the input time series.
"""
@property
def requires_inversion_state(self):
"""
``False`` because the "pseudo-inverse" is just the identity (i.e. we lose sign information).
"""
return False
@property
def identity_inversion(self):
return True
def train(self, time_series: TimeSeries):
pass
def __call__(self, time_series: TimeSeries) -> TimeSeries:
return TimeSeries(
OrderedDict(
(name, UnivariateTimeSeries(var.index, np.abs(var.np_values))) for name, var in time_series.items()
)
)
class Rescale(InvertibleTransformBase):
"""
Rescales the bias & scale of input vectors or scalars by pre-specified amounts.
"""
def __init__(self, bias=0.0, scale=1.0, normalize_bias=True, normalize_scale=True):
super().__init__()
self.bias = bias
self.scale = scale
self.normalize_bias = normalize_bias
self.normalize_scale = normalize_scale
@property
def requires_inversion_state(self):
"""
``False`` because rescaling operations are stateless to invert.
"""
return False
def train(self, time_series: TimeSeries):
pass
@property
def is_trained(self):
return self.bias is not None and self.scale is not None
def __call__(self, time_series: TimeSeries) -> TimeSeries:
if not self.is_trained:
raise RuntimeError(f"Cannot use {type(self).__name__} without training it first!")
bias = self.bias if isinstance(self.bias, Mapping) else {name: self.bias for name in time_series.names}
scale = self.scale if isinstance(self.scale, Mapping) else {name: self.scale for name in time_series.names}
assert set(time_series.names).issubset(bias.keys()) and set(time_series.names).issubset(scale.keys())
new_vars = OrderedDict()
for name, var in time_series.items():
if self.normalize_bias:
var = var - bias[name]
if self.normalize_scale:
var = var / scale[name]
new_vars[name] = UnivariateTimeSeries.from_pd(var)
ret = TimeSeries(new_vars, check_aligned=False)
ret._is_aligned = time_series._is_aligned
return ret
def _invert(self, time_series: TimeSeries) -> TimeSeries:
if not self.is_trained:
raise RuntimeError(f"Cannot use {type(self).__name__} without training it first!")
bias = self.bias if isinstance(self.bias, Mapping) else {name: self.bias for name in time_series.names}
scale = self.scale if isinstance(self.scale, Mapping) else {name: self.scale for name in time_series.names}
assert set(time_series.names).issubset(bias.keys()) and set(time_series.names).issubset(scale.keys())
new_vars = OrderedDict()
for name, var in time_series.items():
if self.normalize_scale:
var = var * scale[name]
if self.normalize_bias:
var = var + bias[name]
new_vars[name] = UnivariateTimeSeries.from_pd(var)
ret = TimeSeries(new_vars, check_aligned=False)
ret._is_aligned = time_series._is_aligned
return ret
class MeanVarNormalize(Rescale):
"""
A learnable transform that rescales the values of a time series to have
zero mean and unit variance.
"""
def __init__(self, bias=None, scale=None, normalize_bias=True, normalize_scale=True):
super().__init__(bias, scale, normalize_bias, normalize_scale)
def train(self, time_series: TimeSeries):
bias, scale = {}, {}
for name, var in time_series.items():
scaler = StandardScaler().fit(var.np_values.reshape(-1, 1))
bias[name] = float(scaler.mean_)
scale[name] = float(scaler.scale_)
self.bias = bias
self.scale = scale
class MinMaxNormalize(Rescale):
"""
A learnable transform that rescales the values of a time series to be
between zero and one.
"""
def __init__(self, bias=None, scale=None, normalize_bias=True, normalize_scale=True):
super().__init__(bias, scale, normalize_bias, normalize_scale)
def train(self, time_series: TimeSeries):
bias, scale = {}, {}
for name, var in time_series.items():
minval, maxval = var.min(), var.max()
bias[name] = minval
scale[name] = np.maximum(1e-8, maxval - minval)
self.bias = bias
self.scale = scale
class BoxCoxTransform(InvertibleTransformBase):
"""
Applies the Box-Cox power transform to the time series, with power lmbda.
When lmbda is None, we
When lmbda > 0, it is ((x + offset) ** lmbda - 1) / lmbda.
When lmbda == 0, it is ln(lmbda + offset).
"""
def __init__(self, lmbda=None, offset=0.0):
super().__init__()
if lmbda is not None:
if isinstance(lmbda, dict):
assert all(isinstance(x, (int, float)) for x in lmbda.values())
else:
assert isinstance(lmbda, (int, float))
self.lmbda = lmbda
self.offset = offset
@property
def requires_inversion_state(self):
"""
``False`` because the Box-Cox transform does is stateless to invert.
"""
return False
def train(self, time_series: TimeSeries):
if self.lmbda is None:
self.lmbda = {name: scipy.stats.boxcox(var.np_values + self.offset)[1] for name, var in time_series.items()}
logger.info(f"Chose Box-Cox lambda = {self.lmbda}")
elif not isinstance(self.lmbda, Mapping):
self.lmbda = {name: self.lmbda for name in time_series.names}
assert len(self.lmbda) == time_series.dim
def __call__(self, time_series: TimeSeries) -> TimeSeries:
new_vars = OrderedDict()
for name, var in time_series.items():
y = scipy.special.boxcox(var + self.offset, self.lmbda[name])
var = pd.Series(y, index=var.index, name=var.name)
new_vars[name] = UnivariateTimeSeries.from_pd(var)
return TimeSeries(new_vars)
def _invert(self, time_series: TimeSeries) -> TimeSeries:
new_vars = []
for name, var in time_series.items():
lmbda = self.lmbda[name]
if lmbda > 0:
var = (lmbda * var + 1) ** (1 / lmbda)
nanvals = var.isna()
if nanvals.any():
var[nanvals] = 0
else:
var = var.apply(np.exp)
new_vars.append(UnivariateTimeSeries.from_pd(var - self.offset))
return TimeSeries(new_vars) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/transform/normalize.py | 0.942876 | 0.469216 | normalize.py | pypi |
from abc import abstractmethod
from copy import deepcopy
from enum import Enum
import inspect
import logging
from merlion.utils import TimeSeries
from merlion.utils.misc import AutodocABCMeta
logger = logging.getLogger(__name__)
class TransformBase(metaclass=AutodocABCMeta):
"""
Abstract class for a callable data pre-processing transform.
Subclasses must override the ``train`` method (:code:`pass` if
no training is required) and ``__call__`` method (to implement
the actual transform).
Subclasses may also support a pseudo inverse transform (possibly using the
implementation-specific ``self.inversion_state``, which should be set
in ``__call__``). If an inversion state is not required, override the
property `requires_inversion_state` to return ``False``.
Due to possible information loss in the forward pass, the inverse transform
may be not be perfect/proper, and calling `TransformBase.invert` will result
in a warning. By default, the inverse transform (implemented in
`TransformBase._invert`) is just the identity.
:ivar inversion_state: Implementation-specific intermediate state that is
used to compute the inverse transform for a particular time series. Only
used if `TransformBase.requires_inversion_state` is ``True``. The
inversion state is destroyed upon calling `TransformBase.invert`,
unless the option the option ``retain_inversion_state=True`` is
specified. This is to prevent potential user error.
.. document private members
.. automethod:: _invert
"""
def __init__(self):
self.inversion_state = None
@property
def proper_inversion(self):
"""
`TransformBase` objects do not support a proper inversion.
"""
return False
@property
def requires_inversion_state(self):
"""
Indicates whether any state ``self.inversion_state`` is required to
invert the transform. Specific to each transform. ``True`` by default.
"""
return True
@property
def identity_inversion(self):
"""
Indicates whether the inverse applied by this transform is just the identity.
"""
return not self.requires_inversion_state
def to_dict(self):
state = {"name": type(self).__name__}
for k in inspect.signature(self.__init__).parameters:
v = getattr(self, k)
state[k] = v.name if isinstance(v, Enum) else deepcopy(v)
return state
@classmethod
def from_dict(cls, state: dict):
return cls(**state)
def __getstate__(self):
return {k: v for k, v in self.to_dict().items() if k != "name"}
def __setstate__(self, state):
self.__init__(**state)
@abstractmethod
def train(self, time_series: TimeSeries):
"""
Sets all trainable parameters of the transform (if any), using the input time series as training data.
"""
raise NotImplementedError
@abstractmethod
def __call__(self, time_series: TimeSeries) -> TimeSeries:
raise NotImplementedError
def invert(self, time_series: TimeSeries, retain_inversion_state=False) -> TimeSeries:
"""
Applies the inverse of this transform on the time series.
:param time_series: The time series on which to apply the inverse
transform.
:param retain_inversion_state: If an inversion state is required, supply
``retain_inversion_state=True`` to retain the inversion state
even after calling this method. Otherwise, the inversion state will
be set to ``None`` after the inversion is applied, to prevent a user
error of accidentally using a stale state.
:return: The (inverse) transformed time series.
"""
if not self.proper_inversion:
logger.warning(
f"Transform {self} is not strictly invertible. Calling invert() is not guaranteed to recover the "
f"original time series exactly!"
)
if self.requires_inversion_state and self.inversion_state is None:
raise RuntimeError(
"Inversion state not set. Please call this transform on an "
"input time series before calling invert(). If you are trying "
"to call invert() a second time, please supply the option "
"`retain_inversion_state=True` to the first call."
)
inverted = self._invert(time_series)
if not retain_inversion_state:
self.inversion_state = None
return inverted
def _invert(self, time_series: TimeSeries) -> TimeSeries:
"""
Helper method which actually performs the inverse transform
(when possible).
:param time_series: Time series to apply the inverse transform to
:return: The (inverse) transformed time series.
"""
return time_series
def __repr__(self):
kwargs = self.to_dict()
name = kwargs.pop("name")
kwargs_str = ", ".join(f"{k}={v}" for k, v in sorted(kwargs.items()))
return f"{name}({kwargs_str})"
class InvertibleTransformBase(TransformBase):
"""
Abstract class for a callable data pre-processing transform with a proper
inverse.
In addition to overriding the ``train`` and ``__call__`` methods, subclasses
*must* also override the `InvertibleTransformBase._invert` method to
implement the actual inverse transform.
:ivar inversion_state: Implementation-specific intermediate state that is
used to compute the inverse transform for a particular time series. Only
used if `TransformBase.requires_inversion_state` is ``True``. The
inversion state is destroyed upon calling `TransformBase.invert`,
unless the option the option ``retain_inversion_state=True`` is
specified. This is to prevent potential user error.
.. document private members
.. automethod:: _invert
"""
@property
def proper_inversion(self):
"""
`InvertibleTransformBase` always supports a proper inversion.
"""
return True
@property
def identity_inversion(self):
return False
@abstractmethod
def _invert(self, time_series: TimeSeries) -> TimeSeries:
raise NotImplementedError
class Identity(InvertibleTransformBase):
"""
The identity transformation. Does nothing.
"""
def __init__(self):
super().__init__()
@property
def requires_inversion_state(self):
"""
``False`` because the identity operation is stateless to invert.
"""
return False
@property
def identity_inversion(self):
return True
def train(self, time_series: TimeSeries):
pass
def __call__(self, time_series: TimeSeries) -> TimeSeries:
return time_series
def _invert(self, time_series: TimeSeries) -> TimeSeries:
return time_series | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/transform/base.py | 0.919353 | 0.481637 | base.py | pypi |
from collections import OrderedDict
import logging
from typing import Union
import numpy as np
from merlion.transform.base import TransformBase, InvertibleTransformBase
from merlion.utils import UnivariateTimeSeries, TimeSeries
from merlion.utils.resample import (
infer_granularity,
granularity_str_to_seconds,
AlignPolicy,
AggregationPolicy,
MissingValuePolicy,
)
logger = logging.getLogger(__name__)
class TemporalResample(TransformBase):
"""
Defines a policy to temporally resample a time series at a specified granularity. Note that while this transform
does support inversion, the recovered time series may differ from the input due to information loss when resampling.
"""
def __init__(
self,
granularity: Union[str, int, float] = None,
origin: int = None,
trainable_granularity: bool = None,
remove_non_overlapping=True,
aggregation_policy: Union[str, AggregationPolicy] = "Mean",
missing_value_policy: Union[str, MissingValuePolicy] = "Interpolate",
):
"""
Defines a policy to temporally resample a time series.
:param granularity: The granularity at which we want to resample.
:param origin: The time stamp defining the offset to start at.
:param trainable_granularity: Whether we will automatically infer the granularity of the time series.
If ``None`` (default), it will be trainable only if no granularity is explicitly given.
:param remove_non_overlapping: If ``True``, we will only keep the portions
of the univariates that overlap with each other. For example, if we
have 3 univariates which span timestamps [0, 3600], [60, 3660], and
[30, 3540], we will only keep timestamps in the range [60, 3540]. If
``False``, we will keep all timestamps produced by the resampling.
:param aggregation_policy: The policy we will use to aggregate multiple values in a window (downsampling).
:param missing_value_policy: The policy we will use to impute missing values (upsampling).
"""
super().__init__()
self.granularity = granularity
self.origin = origin
self.trainable_granularity = (granularity is None) if trainable_granularity is None else trainable_granularity
self.remove_non_overlapping = remove_non_overlapping
self.aggregation_policy = aggregation_policy
self.missing_value_policy = missing_value_policy
@property
def requires_inversion_state(self):
return False
@property
def proper_inversion(self):
"""
We treat resampling as a proper inversion to avoid emitting warnings.
"""
return True
@property
def granularity(self):
return self._granularity
@granularity.setter
def granularity(self, granularity):
if not isinstance(granularity, (int, float)):
try:
granularity = granularity_str_to_seconds(granularity)
except:
granularity = getattr(granularity, "freqstr", granularity)
self._granularity = granularity
@property
def aggregation_policy(self) -> AggregationPolicy:
return self._aggregation_policy
@aggregation_policy.setter
def aggregation_policy(self, agg: Union[str, AggregationPolicy]):
if isinstance(agg, str):
valid = set(AggregationPolicy.__members__.keys())
if agg not in valid:
raise KeyError(f"{agg} is not a valid aggregation policy. Valid aggregation policies are: {valid}")
agg = AggregationPolicy[agg]
self._aggregation_policy = agg
@property
def missing_value_policy(self) -> MissingValuePolicy:
return self._missing_value_policy
@missing_value_policy.setter
def missing_value_policy(self, mv: Union[str, MissingValuePolicy]):
if isinstance(mv, str):
valid = set(MissingValuePolicy.__members__.keys())
if mv not in valid:
raise KeyError(f"{mv} is not a valid missing value policy. Valid aggregation policies are: {valid}")
mv = MissingValuePolicy[mv]
self._missing_value_policy = mv
def train(self, time_series: TimeSeries):
if self.trainable_granularity:
granularity = infer_granularity(time_series.np_time_stamps)
logger.warning(f"Inferred granularity {granularity}")
self.granularity = granularity
if self.trainable_granularity or self.origin is None:
t0, tf = time_series.t0, time_series.tf
if isinstance(self.granularity, (int, float)):
offset = (tf - t0) % self.granularity
else:
offset = 0
self.origin = t0 + offset
def __call__(self, time_series: TimeSeries) -> TimeSeries:
if self.granularity is None:
logger.warning(
f"Skipping resampling step because granularity is "
f"None. Please either specify a granularity or train "
f"this transformation on a time series."
)
return time_series
return time_series.align(
alignment_policy=AlignPolicy.FixedGranularity,
granularity=self.granularity,
origin=self.origin,
remove_non_overlapping=self.remove_non_overlapping,
aggregation_policy=self.aggregation_policy,
missing_value_policy=self.missing_value_policy,
)
class Shingle(InvertibleTransformBase):
"""
Stacks adjacent observations into a single vector. Downsamples by the
specified stride (less than or equal to the shingle size) if desired.
More concretely, consider an input time series,
.. code-block:: python
TimeSeries(
UnivariateTimeSeries((t1[0], x1[0]), ..., (t1[m], t1[m])),
UnivariateTimeSeries((t2[0], x2[0]), ..., (t2[m], t2[m])),
)
Applying a shingle of size 3 and stride 2 will yield
.. code-block:: python
TimeSeries(
UnivariateTimeSeries((t1[0], x1[0]), (t1[2], x1[2]), ..., (t1[m-2], x1[m-2])),
UnivariateTimeSeries((t1[1], x1[1]), (t1[3], x1[3]), ..., (t1[m-1], x1[m-1])),
UnivariateTimeSeries((t1[2], x1[2]), (t1[4], x1[4]), ..., (t1[m], x1[m])),
UnivariateTimeSeries((t2[0], x2[0]), (t2[2], x2[2]), ..., (t2[m-2], x2[m-2])),
UnivariateTimeSeries((t2[1], x2[1]), (t2[3], x2[3]), ..., (t2[m-1], x2[m-1])),
UnivariateTimeSeries((t2[2], x2[2]), (t2[4], x2[4]), ..., (t2[m], x2[m])),
)
If the length of any univariate is not perfectly divisible by the stride, we
will pad it on the left side with the first value in the univariate.
"""
def __init__(self, size: int = 1, stride: int = 1, multivar_skip=True):
"""
Converts the time series into shingle vectors of the appropriate size.
This converts each univariate into a multivariate time series with
``size`` variables.
:param size: let x(t) = value_t be the value of the time series at
time index t. Then, the output vector for time index t will be
:code:`[x(t - size + 1), ..., x(t - 1), x(t)]`.
:param stride: The stride at which the output vectors are downsampled.
:param multivar_skip: Whether to skip this transform if the transform
is already multivariate.
"""
super().__init__()
assert size >= 0
assert 1 <= stride <= size
self.stride = stride
self.size = size
self.multivar_skip = multivar_skip
def train(self, time_series: TimeSeries):
pass
def __call__(self, time_series: TimeSeries) -> TimeSeries:
if self.multivar_skip and time_series.dim > 1:
self.inversion_state = "skip"
return time_series
new_vars = OrderedDict()
for name, var in time_series.items():
# Left-pad the time series with the first value
x0 = var.np_values[0]
vals = np.concatenate((np.full(self.size - 1, x0), var.np_values))
# Stack adjacent observations into vectors of length self.size,
# and apply any striding desired
i0 = (len(var) - 1) % self.stride
times = var.index[i0 :: self.stride]
all_vals = np.stack([vals[i : len(vals) - self.size + i + 1] for i in range(self.size)])
all_vals = all_vals[:, i0 :: self.stride]
# Convert the stacked values into UnivariateTimeSeries objects
new_vars.update(
OrderedDict([(f"{name}_{i}", UnivariateTimeSeries(times, x)) for i, x in enumerate(all_vals)])
)
# The inversion state is just the timestamps of the univariates before
# shingling occurs, and the name of the original univariate
self.inversion_state = [(name, v.index) for name, v in time_series.items()]
return TimeSeries(new_vars)
def _invert(self, time_series: TimeSeries) -> TimeSeries:
if self.inversion_state == "skip":
return time_series
new_vars = OrderedDict()
for i, (name, time_stamps) in enumerate(self.inversion_state):
vals = []
expected_src_names = [f"{name}_{i}" for i in range(self.size)]
src_names = time_series.names[i * self.size : (i + 1) * self.size]
src = TimeSeries(OrderedDict([(k, time_series.univariates[k]) for k in src_names]))
assert src.is_aligned and src.dim == self.size, (
f"{self} should convert a univariate time series into an "
f"aligned multivariate time series of dim {self.size}, but "
f"something went wrong."
)
assert (
src.names == expected_src_names
), f"Expected univariates named {expected_src_names}, but got {src.names}"
for j, (t, val_vec) in enumerate(src[::-1]):
j0 = j * self.stride
val_vec = val_vec[::-1]
vals.extend(val_vec[len(vals) - j0 :])
vals = vals[len(time_stamps) :: -1][-len(time_stamps) :]
new_vars[name] = UnivariateTimeSeries(time_stamps, vals)
return TimeSeries(new_vars) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/transform/resample.py | 0.954457 | 0.566049 | resample.py | pypi |
from collections import OrderedDict
import logging
from typing import List
from merlion.transform.base import TransformBase, InvertibleTransformBase, Identity
from merlion.transform.factory import TransformFactory
from merlion.utils import TimeSeries
logger = logging.getLogger(__name__)
class TransformSequence(InvertibleTransformBase):
"""
Applies a series of data transformations sequentially.
"""
def __init__(self, transforms: List[TransformBase]):
super().__init__()
self.transforms = []
for t in transforms:
assert isinstance(
t, (TransformBase, dict)
), f"Expected all transforms to be instances of TransformBase, or dict, but got {transforms}"
if isinstance(t, dict):
t = TransformFactory.create(**t)
self.transforms.extend(self._extract_nontrivial_transforms(t))
def _extract_nontrivial_transforms(self, transform: TransformBase) -> List[TransformBase]:
if isinstance(transform, type(self)):
transforms = sum([self._extract_nontrivial_transforms(t) for t in transform.transforms], [])
else:
transforms = [transform]
return [t for t in transforms if not isinstance(t, Identity)]
@property
def proper_inversion(self):
"""
A transform sequence is invertible if and only if all the transforms comprising it are invertible.
"""
return all(f.proper_inversion for f in self.transforms)
@property
def identity_inversion(self):
return all(f.identity_inversion for f in self.transforms)
@property
def requires_inversion_state(self):
"""
``False`` because inversion state is held by individual transforms.
"""
return False
def to_dict(self):
return {"name": type(self).__name__, "transforms": [f.to_dict() for f in self.transforms]}
def append(self, transform):
assert isinstance(transform, TransformBase)
self.transforms.append(transform)
@classmethod
def from_dict(cls, state):
return cls([TransformFactory.create(**d) for d in state["transforms"]])
def train(self, time_series: TimeSeries):
for f in self.transforms:
f.train(time_series)
time_series = f(time_series)
def __call__(self, time_series: TimeSeries) -> TimeSeries:
for f in self.transforms:
time_series = f(time_series)
return time_series
def invert(self, time_series: TimeSeries, retain_inversion_state=False) -> TimeSeries:
for f in self.transforms[-1::-1]:
time_series = f.invert(time_series, retain_inversion_state)
return time_series
def _invert(self, time_series: TimeSeries) -> TimeSeries:
logger.warning(
f"_invert() should not be called by a transform of type {type(self).__name__}. Applying the identity.",
stack_info=True,
)
return time_series
def __repr__(self):
return "TransformSequence(\n " + ",\n ".join([repr(f) for f in self.transforms]) + "\n)"
class TransformStack(InvertibleTransformBase):
"""
Applies a set of data transformations individually to an input time series.
Stacks all of the results into a multivariate time series.
"""
def __init__(self, transforms, *, check_aligned=True):
super().__init__()
self.transforms = []
for t in transforms:
assert isinstance(
t, (TransformBase, dict)
), f"Expected all transforms to be instances of TransformBase, or dict, but got {transforms}"
if isinstance(t, dict):
t = TransformFactory.create(**t)
self.transforms.append(t)
self.check_aligned = check_aligned
@property
def proper_inversion(self):
"""
A stacked transform is invertible if and only if at least one of the transforms comprising it are invertible.
"""
return any(f.proper_inversion for f in self.transforms)
@property
def requires_inversion_state(self):
"""
``True`` because the inversion state tells us which stacked transform to invert, and which part of the
output time series to apply that inverse to.
"""
return True
def train(self, time_series: TimeSeries):
for f in self.transforms:
f.train(time_series)
def __call__(self, time_series: TimeSeries) -> TimeSeries:
ts_list = [f(time_series) for f in self.transforms]
# To invert the overall stacked transform, we pick one transform (idx)
# to invert. The outputs of this transform are univariates d0 to df of
# the output time series. We also need to keep track of the names of the
# univariates in the input time series.
if self.proper_inversion:
idx = min(i for i, f in enumerate(self.transforms) if f.proper_inversion)
d0 = sum(ts.dim for ts in ts_list[:idx])
df = d0 + ts_list[idx].dim
self.inversion_state = (idx, d0, df, time_series.names)
else:
self.inversion_state = (0, 0, ts_list[0].dim, time_series.names)
return TimeSeries.from_ts_list(ts_list, check_aligned=self.check_aligned)
def invert(self, time_series: TimeSeries, retain_inversion_state=False) -> TimeSeries:
if self.inversion_state is None:
raise RuntimeError(
"Inversion state not set. Please call this transform on an "
"input time series before calling invert(). If you are trying "
"to call invert() a second time, please supply the option "
"`retain_inversion_state=True` to the first call."
)
idx, d0, df, names = self.inversion_state
ts = TimeSeries(OrderedDict((n, time_series.univariates[n]) for n in time_series.names[d0:df]))
inverted = self.transforms[idx].invert(ts, retain_inversion_state)
assert inverted.dim == len(names)
inverted = TimeSeries(OrderedDict((name, var) for name, var in zip(names, inverted.univariates)))
if not retain_inversion_state:
self.inversion_state = None
return inverted
def _invert(self, time_series: TimeSeries) -> TimeSeries:
logger.warning(
f"_invert() should not be called by a transform of type {type(self).__name__}. Applying the identity.",
stack_info=True,
)
return time_series
def __repr__(self):
return "TransformStack(\n " + ",\n ".join([repr(f) for f in self.transforms]) + "\n)" | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/transform/sequence.py | 0.946535 | 0.641647 | sequence.py | pypi |
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
try:
import pyspark.sql
import pyspark.sql.functions as F
from pyspark.sql.types import DateType, StringType, StructType
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[spark]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
TSID_COL_NAME = "__ts_id"
"""
Many functions in this module rely on having a column named `TSID_COL_NAME` being in the dataset.
This column can be added manually using `add_tsid_column`, and its addition is handled automatically by `read_dataset`.
"""
def read_dataset(
spark: pyspark.sql.SparkSession,
path: str,
file_format: str = "csv",
time_col: str = None,
index_cols: List[str] = None,
data_cols: List[str] = None,
) -> pyspark.sql.DataFrame:
"""
Reads a time series dataset as a pyspark Dataframe.
:param spark: The current SparkSession.
:param path: The path at which the dataset is stored.
:param file_format: The file format the dataset is stored in.
:param time_col: The name of the column which specifies timestamp. If ``None`` is provided, it is assumed to be the
first column which is not an index column or pre-specified data column.
:param index_cols: The columns used to index the various time series in the dataset. If ``None`` is provided, we
assume the entire dataset is just a single time series.
:param data_cols: The columns we will use for downstream time series tasks. If ``None`` is provided, we use all
columns that are not a time or index column.
:return: A pyspark dataframe with columns ``[time_col, *index_cols, *data_cols, TSID_COL_NAME]`` (in that order).
"""
# Read the dataset into a pyspark dataframe
df = spark.read.format(file_format).load(path, inferSchema=True, header=True)
# Only keep the index column, data columns, and time column
index_cols = index_cols or []
if time_col is None:
time_col = [c for c in df.schema.fieldNames() if c not in index_cols + (data_cols or [])][0]
# Use all non-index non-time columns as data columns data columns are not given
if data_cols is None or len(data_cols) == 0:
data_cols = [c for c in df.schema.fieldNames() if c not in index_cols + [time_col]]
assert all(col in data_cols and col not in index_cols + [time_col] for col in data_cols)
# Get the columns in the right order, convert index columns to string, and get data columns in the right order.
# Index cols are string because we indicate aggregation with a reserved "__aggregated__" string
df = df.select(
F.col(time_col).cast(DateType()).alias(time_col),
*[F.col(c).cast(StringType()).alias(c) for c in index_cols],
*data_cols,
)
# add TSID_COL_NAME to the end before returning
return add_tsid_column(spark=spark, df=df, index_cols=index_cols)
def write_dataset(df: pyspark.sql.DataFrame, time_col: str, path: str, file_format: str = "csv"):
"""
Writes the dataset at the specified path.
:param df: The dataframe to save. The dataframe must have a column `TSID_COL_NAME`
indexing the time series in the dataset (this column is automatically added by `read_dataset`).
:param time_col: The name of the column which specifies timestamp.
:param path: The path to save the dataset at.
:param file_format: The file format in which to save the dataset.
"""
df = df.sort([TSID_COL_NAME, time_col]).drop(TSID_COL_NAME)
df.write.format(file_format).save(path, header=True, mode="overwrite")
def create_hier_dataset(
spark: pyspark.sql.SparkSession,
df: pyspark.sql.DataFrame,
time_col: str = None,
index_cols: List[str] = None,
agg_dict: Dict = None,
) -> Tuple[pyspark.sql.DataFrame, np.ndarray]:
"""
Aggregates the time series in the dataset & appends them to the original dataset.
:param spark: The current SparkSession.
:param df: A pyspark dataframe containing all the data. The dataframe must have a column `TSID_COL_NAME`
indexing the time series in the dataset (this column is automatically added by `read_dataset`).
:param time_col: The name of the column which specifies timestamp. If ``None`` is provided, it is assumed to be the
first column which is not an index column or pre-specified data column.
:param index_cols: The columns used to index the various time series in the dataset. If ``None`` is provided, we
assume the entire dataset is just a single time series. These columns define the levels of the hierarchy.
For example, if each time series represents sales and we have ``index_cols = ["store", "item"]``, we will
first aggregate sales for all items sold at a particular store; then we will aggregate sales for all items at
all stores.
:param agg_dict: A dictionary used to specify how different data columns should be aggregated. If a data column
is not in the dict, we aggregate using sum by default.
:return: The dataset with additional time series corresponding to each level of the hierarchy, as well as a
matrix specifying how the hierarchy is constructed.
"""
# Determine which columns are index vs. data columns
index_cols = [] if index_cols is None else index_cols
index_cols = [c for c in index_cols if c != TSID_COL_NAME]
extended_index_cols = index_cols + [TSID_COL_NAME]
if time_col is None:
non_index_cols = [c for c in df.schema.fieldNames() if c not in extended_index_cols]
time_col = non_index_cols[0]
data_cols = non_index_cols[1:]
else:
data_cols = [c for c in df.schema.fieldNames() if c not in extended_index_cols + [time_col]]
# Create a pandas index for all the time series
ts_index = df.groupBy(extended_index_cols).count().drop("count").toPandas()
ts_index = ts_index.set_index(index_cols).sort_index()
index_schema = StructType([df.schema[c] for c in extended_index_cols])
n = len(ts_index)
# Add all higher levels of the hierarchy
full_df = df
hier_vecs = []
# Compose the aggregation portions of the SQL select statements below
df.createOrReplaceTempView("df")
agg_dict = {} if agg_dict is None else agg_dict
data_col_sql = [f"{agg_dict.get(c, 'sum').upper()}(`{c}`) AS `{c}`" for c in data_cols]
for k in range(len(index_cols)):
# Aggregate values of data columns over the last k+1 index column values.
gb_cols = index_cols[: -(k + 1)]
gb_col_sql = [f"`{c}`" for c in [time_col] + gb_cols]
agg = spark.sql(f"SELECT {','.join(gb_col_sql + data_col_sql)} FROM df GROUP BY {','.join(gb_col_sql)};")
# Add back dummy NA values for the index columns we aggregated over, add a time series ID column,
# concatenate the aggregated time series to the full dataframe, and compute the hierarchy vector.
# For the top level of the hierarchy, this is easy as we just sum everything
if len(gb_cols) == 0:
dummy = [["__aggregated__"] * len(index_cols) + [n + len(hier_vecs)]]
full_df = full_df.unionByName(agg.join(spark.createDataFrame(dummy, schema=index_schema)))
hier_vecs.append(np.ones(n))
continue
# For lower levels of the hierarchy, we determine the membership of each grouping to create
# the appropriate dummy entries and hierarchy vectors.
dummy = []
for i, (group, group_idxs) in enumerate(ts_index.groupby(gb_cols).groups.items()):
group = [group] if len(gb_cols) == 1 else list(group)
locs = [ts_index.index.get_loc(j) for j in group_idxs]
dummy.append(group + ["__aggregated__"] * (k + 1) + [n + len(hier_vecs)])
x = np.zeros(n)
x[locs] = 1
hier_vecs.append(x)
dummy = spark.createDataFrame(dummy, schema=index_schema)
full_df = full_df.unionByName(agg.join(dummy, on=gb_cols))
# Create the full hierarchy matrix, and return it along with the updated dataframe
hier_matrix = np.concatenate([np.eye(n), np.stack(hier_vecs)])
return full_df, hier_matrix
def add_tsid_column(
spark: pyspark.sql.SparkSession, df: pyspark.sql.DataFrame, index_cols: List[str]
) -> pyspark.sql.DataFrame:
"""
Adds the column `TSID_COL_NAME` to the dataframe, which assigns an integer ID to each time series in the dataset.
:param spark: The current SparkSession.
:param df: A pyspark dataframe containing all the data.
:param index_cols: The columns used to index the various time series in the dataset.
:return: The pyspark dataframe with an additional column `TSID_COL_NAME` added as the last column.
"""
if TSID_COL_NAME in df.schema.fieldNames():
return df
# If no index columns are specified, we are only dealing with a single time series
if index_cols is None or len(index_cols) == 0:
return df.join(spark.createDataFrame(pd.DataFrame([0], columns=[TSID_COL_NAME])))
# Compute time series IDs. Time series with any null indexes come last b/c these are aggregated time series.
ts_index = df.groupBy(index_cols).count().drop("count").toPandas()
null_rows = ts_index.isna().any(axis=1)
ts_index = pd.concat(
(
ts_index[~null_rows].sort_values(by=index_cols, axis=0, ascending=True),
ts_index[null_rows].sort_values(by=index_cols, axis=0, ascending=True),
),
axis=0,
)
ts_index[TSID_COL_NAME] = np.arange(len(ts_index))
# Add the time series ID column to the overall dataframe
ts_index = spark.createDataFrame(ts_index)
for i, col in enumerate(index_cols):
pred = df[col].eqNullSafe(ts_index[col])
condition = pred if i == 0 else condition & pred
df = df.join(ts_index, on=condition)
for col in index_cols:
df = df.drop(ts_index[col])
return df | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/spark/dataset.py | 0.908106 | 0.713244 | dataset.py | pypi |
import logging
import traceback
from typing import List, Union
import numpy as np
import pandas as pd
from merlion.models.factory import instantiate_or_copy_model, ModelFactory
from merlion.models.anomaly.base import DetectorBase
from merlion.models.forecast.base import ForecasterBase
from merlion.spark.dataset import TSID_COL_NAME
from merlion.utils import TimeSeries, to_pd_datetime
logger = logging.getLogger(__name__)
def forecast(
pdf: pd.DataFrame,
index_cols: List[str],
time_col: str,
target_col: str,
time_stamps: Union[List[int], List[str]],
model: Union[ForecasterBase, dict],
predict_on_train: bool = False,
agg_dict: dict = None,
) -> pd.DataFrame:
"""
Pyspark pandas UDF for performing forecasting.
Should be called on a pyspark dataframe grouped by time series ID, i.e. by ``index_cols``.
:param pdf: The ``pandas.DataFrame`` containing the training data. Should be a single time series.
:param index_cols: The list of column names used to index all the time series in the dataset. Not used for modeling.
:param time_col: The name of the column containing the timestamps.
:param target_col: The name of the column whose value we wish to forecast.
:param time_stamps: The timestamps at which we would like to obtain a forecast.
:param model: The model (or model ``dict``) we are using to obtain a forecast.
:param predict_on_train: Whether to return the model's prediction on the training data.
:param agg_dict: A dictionary used to specify how different data columns should be aggregated. If a non-target
data column is not in agg_dict, we do not model it for aggregated time series.
:return: A ``pandas.DataFrame`` with the forecast & its standard error (NaN if the model doesn't have error bars).
Columns are ``[*index_cols, time_col, target_col, target_col + \"_err\"]``.
"""
# If the time series has been aggregated, drop non-target columns which are not explicitly specified in agg_dict.
if TSID_COL_NAME not in index_cols and TSID_COL_NAME in pdf.columns:
index_cols = index_cols + [TSID_COL_NAME]
if (pdf.loc[:, index_cols] == "__aggregated__").any().any():
data_cols = [c for c in pdf.columns if c not in index_cols + [time_col]]
pdf = pdf.drop(columns=[c for c in data_cols if c != target_col and c not in agg_dict])
# Sort the dataframe by time & turn it into a Merlion time series
pdf = pdf.sort_values(by=time_col)
ts = TimeSeries.from_pd(pdf.drop(columns=index_cols).set_index(time_col))
# Create model
model = instantiate_or_copy_model(model or {"name": "DefaultForecaster"})
if not isinstance(model, ForecasterBase):
raise TypeError(f"Expected `model` to be an instance of ForecasterBase, but got {model}.")
# Train model & run forecast
try:
train_pred, train_err = model.train(ts)
pred, err = model.forecast(time_stamps=time_stamps)
except Exception:
row0 = pdf.iloc[0]
idx = ", ".join(f"{k} = {row0[k]}" for k in index_cols)
logger.warning(
f"Model {type(model).__name__} threw an exception on ({idx}). Returning the mean training value as a "
f"placeholder forecast. {traceback.format_exc()}"
)
meanval = pdf.loc[:, target_col].mean().item()
train_err, err = None, None
train_pred = TimeSeries.from_pd(pd.DataFrame(meanval, index=pdf[time_col], columns=[target_col]))
pred = TimeSeries.from_pd(pd.DataFrame(meanval, index=to_pd_datetime(time_stamps), columns=[target_col]))
# Concatenate train & test results if predict_on_train is True
if predict_on_train:
if train_pred is not None and pred is not None:
pred = train_pred + pred
if train_err is not None and err is not None:
err = train_err + err
# Combine forecast & stderr into a single dataframe
pred = pred.to_pd()
dtype = pred.dtypes[0]
err = pd.DataFrame(np.full(len(pred), np.nan), index=pred.index, dtype=dtype) if err is None else err.to_pd()
pred = pd.DataFrame(pred.iloc[:, 0].rename(target_col))
err = pd.DataFrame(err.iloc[:, 0].rename(f"{target_col}_err"))
pred_pdf = pd.concat([pred, err], axis=1)
# Turn the time index into a regular column, and add the index columns back to the prediction
pred_pdf.index.name = time_col
pred_pdf.reset_index(inplace=True)
index_pdf = pd.concat([pdf[index_cols].iloc[:1]] * len(pred_pdf), ignore_index=True)
return pd.concat((index_pdf, pred_pdf), axis=1)
def anomaly(
pdf: pd.DataFrame,
index_cols: List[str],
time_col: str,
train_test_split: Union[int, str],
model: Union[DetectorBase, dict],
predict_on_train: bool = False,
) -> pd.DataFrame:
"""
Pyspark pandas UDF for performing anomaly detection.
Should be called on a pyspark dataframe grouped by time series ID, i.e. by ``index_cols``.
:param pdf: The ``pandas.DataFrame`` containing the training and testing data. Should be a single time series.
:param index_cols: The list of column names used to index all the time series in the dataset. Not used for modeling.
:param time_col: The name of the column containing the timestamps.
:param train_test_split: The time at which the testing data starts.
:param model: The model (or model ``dict``) we are using to predict anomaly scores.
:param predict_on_train: Whether to return the model's prediction on the training data.
:return: A ``pandas.DataFrame`` with the anomaly scores on the test data.
Columns are ``[*index_cols, time_col, \"anom_score\"]``.
"""
# Sort the dataframe by time & turn it into a Merlion time series
if TSID_COL_NAME not in index_cols and TSID_COL_NAME in pdf.columns:
index_cols = index_cols + [TSID_COL_NAME]
pdf = pdf.sort_values(by=time_col)
ts = TimeSeries.from_pd(pdf.drop(columns=index_cols).set_index(time_col))
# Create model
model = instantiate_or_copy_model(model or {"name": "DefaultDetector"})
if not isinstance(model, DetectorBase):
raise TypeError(f"Expected `model` to be an instance of DetectorBase, but got {model}.")
# Train model & run inference
exception = False
train, test = ts.bisect(train_test_split, t_in_left=False)
try:
train_pred = model.train(train)
train_pred = model.post_rule(train_pred).to_pd()
pred = model.get_anomaly_label(test).to_pd()
except Exception:
exception = True
row0 = pdf.iloc[0]
idx = ", ".join(f"{k} = {row0[k]}" for k in index_cols)
logger.warning(
f"Model {type(model).__name__} threw an exception on ({idx}). {traceback.format_exc()}"
f"Trying StatThreshold model instead.\n"
)
if exception:
try:
model = ModelFactory.create(name="StatThreshold", target_seq_index=0, threshold=model.threshold)
train_pred = model.train(train)
train_pred = model.post_rule(train_pred).to_pd()
pred = model.get_anomaly_label(test).to_pd()
except Exception:
logger.warning(
f"Model StatThreshold threw an exception on ({idx}).{traceback.format_exc()}"
f"Returning anomaly score = 0 as a placeholder.\n"
)
train_pred = pd.DataFrame(0, index=to_pd_datetime(train.time_stamps), columns=["anom_score"])
pred = pd.DataFrame(0, index=to_pd_datetime(test.time_stamps), columns=["anom_score"])
if predict_on_train and train_pred is not None:
pred = pd.concat((train_pred, pred))
# Turn the time index into a regular column, and add the index columns back to the prediction
pred.index.name = time_col
pred.reset_index(inplace=True)
index_pdf = pd.concat([pdf[index_cols].iloc[:1]] * len(pred), ignore_index=True)
return pd.concat((index_pdf, pred), axis=1)
def reconciliation(pdf: pd.DataFrame, hier_matrix: np.ndarray, target_col: str):
"""
Pyspark pandas UDF for computing the minimum-trace hierarchical time series reconciliation, as described by
`Wickramasuriya et al. 2018 <https://robjhyndman.com/papers/mint.pdf>`__.
Should be called on a pyspark dataframe grouped by timestamp. Pyspark implementation of
`merlion.utils.hts.minT_reconciliation`.
:param pdf: A ``pandas.DataFrame`` containing forecasted values & standard errors from ``m`` time series at a single
timestamp. Each time series should be indexed by `TSID_COL_NAME`.
The first ``n`` time series (in order of ID) orrespond to leaves of the hierarchy, while the remaining ``m - n``
are weighted sums of the first ``n``.
This dataframe can be produced by calling `forecast` on the dataframe produced by
`merlion.spark.dataset.create_hier_dataset`.
:param hier_matrix: A ``m``-by-``n`` matrix describing how the hierarchy is aggregated. The value of the ``k``-th
time series is ``np.dot(hier_matrix[k], pdf[:n])``. This matrix can be produced by
`merlion.spark.dataset.create_hier_dataset`.
:param target_col: The name of the column whose value we wish to forecast.
:return: A ``pandas.DataFrame`` which replaces the original forecasts & errors with reconciled forecasts & errors.
.. note::
Time series series reconciliation is skipped if the given timestamp has missing values for any of the
time series. This can happen for training timestamps if the training time series has missing data and
`forecast` is called with ``predict_on_train=true``.
"""
# Get shape params & sort the data (for this timestamp) by time series ID.
m, n = hier_matrix.shape
assert len(pdf) <= m >= n
if len(pdf) < m:
return pdf
assert (hier_matrix[:n] == np.eye(n)).all()
pdf = pdf.sort_values(by=TSID_COL_NAME)
# Compute the error weight matrix W (m by m)
errname = f"{target_col}_err"
coefs = hier_matrix.sum(axis=1)
errs = pdf[errname].values if errname in pdf.columns else np.full(m, np.nan)
nan_errs = np.isnan(errs)
if nan_errs.all():
W = np.diag(coefs)
else:
if nan_errs.any():
errs[nan_errs] = np.nanmean(errs / coefs) * coefs[nan_errs]
W = np.diag(errs)
# Create other supplementary matrices
J = np.concatenate((np.eye(n), np.zeros((n, m - n))), axis=1)
U = np.concatenate((-hier_matrix[n:], np.eye(m - n)), axis=1) # U.T from the paper
# Compute projection matrix to compute coherent leaf forecasts
inv = np.linalg.pinv(U @ W @ U.T) # (m-n) by (m-n)
P = J - ((J @ W) @ U.T) @ (inv @ U) # n by m
# Compute reconciled forecasts & errors
rec = hier_matrix @ (P @ pdf[target_col].values)
if nan_errs.all():
rec_errs = errs
else:
# P * W.diagonal() is a faster way to compute P @ W, since W is diagonal
rec_errs = hier_matrix @ (P * W.diagonal()) # m by m
# np.sum(rec_errs ** 2, axis=1) is a faster way to compute (rec_errs @ rec_errs.T).diagonal()
rec_errs = np.sqrt(np.sum(rec_errs**2, axis=1))
# Replace original forecasts & errors with reconciled ones
reconciled = pd.DataFrame(np.stack([rec, rec_errs], axis=1), index=pdf.index, columns=[target_col, errname])
df = pd.concat((pdf.drop(columns=[target_col, errname]), reconciled), axis=1)
return df | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/spark/pandas_udf.py | 0.912275 | 0.507873 | pandas_udf.py | pypi |
import logging
from typing import List, Tuple
import numpy as np
from scipy.stats import norm
from scipy.interpolate import PchipInterpolator
from merlion.post_process.base import PostRuleBase
from merlion.utils import TimeSeries, UnivariateTimeSeries
logger = logging.getLogger(__name__)
class AnomScoreCalibrator(PostRuleBase):
"""
Learns a monotone function which reshapes an input sequence of anomaly scores,
to follow a standard normal distribution. This makes the anomaly scores from
many diverse models interpretable as z-scores.
"""
def __init__(self, max_score: float, abs_score: bool = True, anchors: List[Tuple[float, float]] = None):
"""
:param max_score: the maximum possible uncalibrated score
:param abs_score: whether to consider the absolute values of the
anomaly scores, rather than the raw value.
:param anchors: a sequence of (x, y) pairs mapping an uncalibrated
anomaly score to a calibrated anomaly score. Optional, as this
will be set by `AnomScoreCalibrator.train`.
"""
self.max_score = max_score
self.abs_score = abs_score
self.anchors = anchors
@property
def anchors(self):
return self._anchors
@anchors.setter
def anchors(self, anchors):
"""
:return: a sequence of (x, y) pairs mapping an uncalibrated
anomaly score to a calibrated anomaly score.
"""
if anchors is None or len(anchors) < 2:
self._anchors = None
self.interpolator = None
else:
self._anchors = anchors
self.interpolator = PchipInterpolator(*zip(*anchors))
def train(self, anomaly_scores: TimeSeries, retrain_calibrator=False) -> TimeSeries:
"""
:param anomaly_scores: `TimeSeries` of raw anomaly scores that we will use
to train the calibrator.
:param retrain_calibrator: Whether to re-train the calibrator on a new
sequence of anomaly scores, if it has already been trained once.
In practice, we find better results if this is ``False``.
"""
if self.interpolator is not None and not retrain_calibrator:
return self(anomaly_scores)
x = anomaly_scores.to_pd().values[:, 0]
if self.abs_score:
x = np.abs(x)
targets = [0, 0, 0.5, 1, 1.5, 2]
inputs = np.quantile(x, 2 * norm.cdf(targets) - 1).tolist()
# ub is an upper bound on E[max(X_1, ..., X_n)], for X_i ~ N(0, 1)
ub = self.expected_max(len(x), ub=True)
x_max = x.max()
if self.max_score < x_max:
logger.warning(
f"Obtained max score of {x_max:.2f}, but self.max_score "
f"is only {self.max_score:.2f}. Updating self.max_score "
f"to {x_max * 2:.2f}."
)
self.max_score = x_max * 2
if ub > 4:
targets.append(ub)
inputs.append(x.max())
targets.append(ub + 1)
inputs.append(min(self.max_score, 2 * x_max))
else:
targets.append(5)
inputs.append(min(self.max_score, 2 * x_max))
targets = np.asarray(targets)
inputs = np.asarray(inputs)
valid = np.concatenate(([True], np.abs(inputs[1:] - inputs[:-1]) > 1e-8))
self.anchors = list(zip(inputs[valid], targets[valid]))
return self(anomaly_scores)
@staticmethod
def expected_max(n, ub=False):
"""
:meta private:
"""
if ub:
return np.sqrt(2 * np.log(n))
g = np.euler_gamma
return (1 - g) * norm.ppf(1 - 1 / n) + g * norm.ppf(1 - 1 / np.e / n)
def __call__(self, anomaly_scores: TimeSeries) -> TimeSeries:
if self.interpolator is None:
return anomaly_scores
x = anomaly_scores.to_pd().values[:, 0]
b = self.anchors[-1][0]
m = self.interpolator.derivative()(self.anchors[-1][0])
if self.abs_score:
vals = np.maximum(self.interpolator(np.abs(x)), 0) * np.sign(x)
idx = np.abs(x) > b
if idx.any():
sub = x[idx]
vals[idx] = np.sign(sub) * ((np.abs(sub) - b) * m + self.interpolator(b))
else:
vals = self.interpolator(x)
idx = x > b
if idx.any():
vals[idx] = (x[idx] - b) * m + self.interpolator(b)
return UnivariateTimeSeries(anomaly_scores.time_stamps, vals, anomaly_scores.names[0]).to_ts() | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/post_process/calibrate.py | 0.920142 | 0.652158 | calibrate.py | pypi |
from bisect import bisect_left
from enum import Enum
from functools import partial
from typing import Tuple, Union
import numpy as np
import pandas as pd
from merlion.evaluate.base import EvaluatorBase, EvaluatorConfig
from merlion.utils import TimeSeries, UnivariateTimeSeries
from merlion.utils.misc import call_with_accepted_kwargs
def scaled_sigmoid(x, scale=2.5):
"""
:meta private:
"""
vals = (np.tanh(scale * (1 - x)) / np.tanh(scale)).reshape(-1)
return np.where(x > 2.0, -1.0, np.where(x < 0, 1.0, vals))
class ScoreType(Enum):
"""
The algorithm to use to compute true/false positives/negatives. See the technical report
for more details on each score type. Merlion's preferred default is revised point-adjusted.
"""
Pointwise = 0
PointAdjusted = 1
RevisedPointAdjusted = 2
class TSADScoreAccumulator:
"""
Accumulator which maintains summary statistics describing an anomaly
detection algorithm's performance. Can be used to compute many different
time series anomaly detection metrics.
"""
def __init__(
self,
num_tp_anom=0,
num_tp_pointwise=0,
num_tp_point_adj=0,
num_fn_anom=0,
num_fn_pointwise=0,
num_fn_point_adj=0,
num_fp=0,
num_tn=0,
tp_score=0.0,
fp_score=0.0,
tp_detection_delays=None,
tp_anom_durations=None,
anom_durations=None,
):
self.num_tp_anom = num_tp_anom
self.num_tp_pointwise = num_tp_pointwise
self.num_tp_point_adj = num_tp_point_adj
self.num_fn_anom = num_fn_anom
self.num_fn_pointwise = num_fn_pointwise
self.num_fn_point_adj = num_fn_point_adj
self.num_fp = num_fp
self.num_tn = num_tn
self.tp_score = tp_score
self.fp_score = fp_score
self.tp_detection_delays = tp_detection_delays or []
self.tp_anom_durations = tp_anom_durations or []
self.anom_durations = anom_durations or []
def __add__(self, acc):
kwargs = {k: getattr(self, k) + getattr(acc, k) for k in self.__dict__}
return TSADScoreAccumulator(**kwargs)
def precision(self, score_type: ScoreType = ScoreType.RevisedPointAdjusted):
if score_type is ScoreType.Pointwise:
tp, fp = self.num_tp_pointwise, self.num_fp
elif score_type is ScoreType.PointAdjusted:
tp, fp = self.num_tp_point_adj, self.num_fp
elif score_type is ScoreType.RevisedPointAdjusted:
tp, fp = self.num_tp_anom, self.num_fp
else:
raise NotImplementedError(f"Cannot compute precision for score_type={score_type.name}")
return 0.0 if tp + fp == 0 else tp / (tp + fp)
def recall(self, score_type: ScoreType = ScoreType.RevisedPointAdjusted):
if score_type is ScoreType.Pointwise:
tp, fn = self.num_tp_pointwise, self.num_fn_pointwise
elif score_type is ScoreType.PointAdjusted:
tp, fn = self.num_tp_point_adj, self.num_fn_point_adj
elif score_type is ScoreType.RevisedPointAdjusted:
tp, fn = self.num_tp_anom, self.num_fn_anom
else:
raise NotImplementedError(f"Cannot compute recall for score_type={score_type.name}")
return 0.0 if tp + fn == 0 else tp / (tp + fn)
def f1(self, score_type: ScoreType = ScoreType.RevisedPointAdjusted):
if isinstance(score_type, tuple) and len(score_type) == 2:
prec_score_type, rec_score_type = score_type
else:
prec_score_type = rec_score_type = score_type
p = self.precision(prec_score_type)
r = self.recall(rec_score_type)
return 0.0 if p == 0 or r == 0 else 2 * p * r / (p + r)
def f_beta(self, score_type: ScoreType = ScoreType.RevisedPointAdjusted, beta=1.0):
if isinstance(score_type, tuple) and len(score_type) == 2:
prec_score_type, rec_score_type = score_type
else:
prec_score_type = rec_score_type = score_type
p = self.precision(prec_score_type)
r = self.recall(rec_score_type)
return 0.0 if p == 0 or r == 0 else (1 + beta**2) * p * r / (beta**2 * p + r)
def mean_time_to_detect(self):
t = np.mean(self.tp_detection_delays) if self.tp_detection_delays else 0
return pd.Timedelta(seconds=int(t))
def mean_detected_anomaly_duration(self):
t = np.mean(self.tp_anom_durations) if self.tp_anom_durations else 0
return pd.Timedelta(seconds=int(t))
def mean_anomaly_duration(self):
t = np.mean(self.anom_durations) if self.anom_durations else 0
return pd.Timedelta(seconds=int(t))
def nab_score(self, tp_weight=1.0, fp_weight=0.11, fn_weight=1.0, tn_weight=0.0):
"""
Computes the NAB score, given the accumulated performance metrics and
the specified weights for different types of errors. The score is
described in section II.C of https://arxiv.org/pdf/1510.03336.pdf.
At a high level, this score is a cost-sensitive, recency-weighted
accuracy measure for time series anomaly detection.
NAB uses the following profiles for benchmarking
(https://github.com/numenta/NAB/blob/master/config/profiles.json):
- standard (default)
- tp_weight = 1.0, fp_weight = 0.11, fn_weight = 1.0
- reward low false positive rate
- tp_weight = 1.0, fp_weight = 0.22, fn_weight = 1.0
- reward low false negative rate
- tp_weight = 1.0, fp_weight = 0.11, fn_weight = 2.0
Note that tn_weight is ignored.
:param tp_weight: relative weight of true positives.
:param fp_weight: relative weight of false positives.
:param fn_weight: relative weight of false negatives.
:param tn_weight: relative weight of true negatives. Ignored, but
included for completeness.
:return: NAB score
"""
# null: label everything as negative
null_score = -(self.num_tp_anom + self.num_fn_anom) * fn_weight
# perfect: detect all anomalies as early as possible, no false positives
perfect_score = (self.num_tp_anom + self.num_fn_anom) * tp_weight
# our score is based on our model's performance
score = self.tp_score * tp_weight - self.fp_score * fp_weight - self.num_fn_anom * fn_weight
return (score - null_score) / (perfect_score - null_score + 1e-8)
def accumulate_tsad_score(
ground_truth: Union[TimeSeries, UnivariateTimeSeries],
predict: Union[TimeSeries, UnivariateTimeSeries],
max_early_sec=None,
max_delay_sec=None,
metric=None,
) -> Union[TSADScoreAccumulator, float]:
"""
Computes the components required to compute multiple different types of
performance metrics for time series anomaly detection.
:param ground_truth: A time series indicating whether each time step
corresponds to an anomaly.
:param predict: A time series with the anomaly score predicted for each
time step. Detections correspond to nonzero scores.
:param max_early_sec: The maximum amount of time (in seconds) the anomaly
detection is allowed to occur before the actual incidence. If None, no
early detections are allowed. Note that None is the same as 0.
:param max_delay_sec: The maximum amount of time (in seconds) the anomaly
detection is allowed to occur after the start of the actual incident
(but before the end of the actual incident). If None, we allow any
detection during the duration of the incident. Note that None differs
from 0 because 0 means that we only permit detections that are early
or exactly on time!
:param metric: A function which takes a `TSADScoreAccumulator` as input and
returns a ``float``. The `TSADScoreAccumulator` object is returned if
``metric`` is ``None``.
"""
ground_truth = ground_truth.to_ts() if isinstance(ground_truth, UnivariateTimeSeries) else ground_truth
predict = predict.to_ts() if isinstance(predict, UnivariateTimeSeries) else predict
assert (
ground_truth.dim == 1 and predict.dim == 1
), "Can only evaluate anomaly scores when ground truth and prediction are single-variable time series."
ground_truth = ground_truth.univariates[ground_truth.names[0]]
ts = ground_truth.np_time_stamps
ys = ground_truth.np_values.astype(bool)
i_split = np.where(ys[1:] != ys[:-1])[0] + 1
predict = predict.univariates[predict.names[0]]
ts_pred = predict.np_time_stamps
ys_pred = predict.np_values.astype(bool)
t = t_prev = ts[0]
window_is_anomaly = ys[0]
t0_anomaly, tf_anomaly = None, None
num_tp_pointwise, num_tp_point_adj, num_tp_anom = 0, 0, 0
num_fn_pointwise, num_fn_point_adj, num_fn_anom = 0, 0, 0
num_tn, num_fp = 0, 0
tp_score, fp_score = 0.0, 0.0
tp_detection_delays, anom_durations, tp_anom_durations = [], [], []
for i in [*i_split, -1]:
t_next = ts[i] + int(i == -1)
# Determine the boundaries of the window
# Add buffer if it's anomalous, remove buffer if it's not
t0, tf = t, t_next
if window_is_anomaly:
t0_anomaly, tf_anomaly = t0, tf
if max_early_sec is not None and max_early_sec > 0:
t0 = max(t_prev, t - max_early_sec)
if max_delay_sec is not None and max_delay_sec > 0 and i != -1:
tf = min(t_next, t + max_delay_sec)
else:
if max_delay_sec is not None and max_delay_sec > 0:
t0 = min(t, t_prev + max_delay_sec)
if max_early_sec is not None and max_early_sec > 0:
tf = max(t, t_next - max_early_sec)
j0 = bisect_left(ts_pred, t0)
jf = max(bisect_left(ts_pred, tf), j0 + 1)
window = ys_pred[j0:jf]
if window_is_anomaly:
anom_durations.append(tf_anomaly - t0_anomaly)
num_tp_pointwise += sum(y != 0 for y in window)
num_fn_pointwise += sum(y == 0 for y in window)
if not any(window):
num_fn_anom += 1
num_fn_point_adj += len(window)
# true positives are more beneficial if they occur earlier
else:
num_tp_anom += 1
num_tp_point_adj += len(window)
t_detect = ts_pred[np.where(window)[0][0] + j0]
tp_detection_delays.append(t_detect - t0_anomaly)
tp_anom_durations.append(tf_anomaly - t0_anomaly)
delay = 0 if tf - t0 == 0 else (t_detect - t0) / (tf - t0)
tp_score += sum(scaled_sigmoid(delay))
else:
# false positives are more severe if they occur later
# FIXME: false positives can be fired in data spans that are
# not present in the original data. Should we still
# count these, or should we remove them from the window?
if any(window):
t_fp = ts_pred[np.where(window)[0] + j0]
num_fp += len(t_fp)
if tf != t0:
delays = (t_fp - t0) / (tf - t0)
else:
delays = np.infty * np.ones(len(t_fp))
fp_score += sum(scaled_sigmoid(delays))
# do nothing for true negatives, except count them
num_tn += sum(window == 0)
# Advance to the next window
t_prev = t
t = t_next
window_is_anomaly = not window_is_anomaly
score_components = TSADScoreAccumulator(
num_tp_anom=num_tp_anom,
num_tp_pointwise=num_tp_pointwise,
num_tp_point_adj=num_tp_point_adj,
num_fp=num_fp,
num_fn_anom=num_fn_anom,
num_fn_pointwise=num_fn_pointwise,
num_fn_point_adj=num_fn_point_adj,
num_tn=num_tn,
tp_score=tp_score,
fp_score=fp_score,
tp_detection_delays=tp_detection_delays,
tp_anom_durations=tp_anom_durations,
anom_durations=anom_durations,
)
if metric is not None:
return metric(score_components)
return score_components
class TSADMetric(Enum):
"""
Enumeration of evaluation metrics for time series anomaly detection.
For each value, the name is the metric, and the value is a partial
function of form ``f(ground_truth, predicted, **kwargs)``
"""
MeanTimeToDetect = partial(accumulate_tsad_score, metric=TSADScoreAccumulator.mean_time_to_detect)
# Revised point-adjusted metrics (default)
F1 = partial(
accumulate_tsad_score, metric=partial(TSADScoreAccumulator.f1, score_type=ScoreType.RevisedPointAdjusted)
)
Precision = partial(
accumulate_tsad_score, metric=partial(TSADScoreAccumulator.precision, score_type=ScoreType.RevisedPointAdjusted)
)
Recall = partial(
accumulate_tsad_score, metric=partial(TSADScoreAccumulator.recall, score_type=ScoreType.RevisedPointAdjusted)
)
# Pointwise metrics
PointwiseF1 = partial(
accumulate_tsad_score, metric=partial(TSADScoreAccumulator.f1, score_type=ScoreType.Pointwise)
)
PointwisePrecision = partial(
accumulate_tsad_score, metric=partial(TSADScoreAccumulator.precision, score_type=ScoreType.Pointwise)
)
PointwiseRecall = partial(
accumulate_tsad_score, metric=partial(TSADScoreAccumulator.recall, score_type=ScoreType.Pointwise)
)
# Point-adjusted metrics
PointAdjustedF1 = partial(
accumulate_tsad_score, metric=partial(TSADScoreAccumulator.f1, score_type=ScoreType.PointAdjusted)
)
PointAdjustedPrecision = partial(
accumulate_tsad_score, metric=partial(TSADScoreAccumulator.precision, score_type=ScoreType.PointAdjusted)
)
PointAdjustedRecall = partial(
accumulate_tsad_score, metric=partial(TSADScoreAccumulator.recall, score_type=ScoreType.PointAdjusted)
)
# NAB scores
NABScore = partial(accumulate_tsad_score, metric=TSADScoreAccumulator.nab_score)
NABScoreLowFN = partial(accumulate_tsad_score, metric=partial(TSADScoreAccumulator.nab_score, fn_weight=2.0))
NABScoreLowFP = partial(accumulate_tsad_score, metric=partial(TSADScoreAccumulator.nab_score, fp_weight=0.22))
# Argus metrics
F2 = partial(
accumulate_tsad_score,
metric=partial(TSADScoreAccumulator.f_beta, score_type=ScoreType.RevisedPointAdjusted, beta=2.0),
)
F5 = partial(
accumulate_tsad_score,
metric=partial(TSADScoreAccumulator.f_beta, score_type=ScoreType.RevisedPointAdjusted, beta=5.0),
)
class TSADEvaluatorConfig(EvaluatorConfig):
"""
Configuration class for a `TSADEvaluator`.
"""
def __init__(self, max_early_sec: float = None, max_delay_sec: float = None, **kwargs):
"""
:param max_early_sec: the maximum number of seconds we allow an anomaly
to be detected early.
:param max_delay_sec: if an anomaly is detected more than this many
seconds after its start, it is not counted as being detected.
"""
super().__init__(**kwargs)
self.max_early_sec = max_early_sec
self.max_delay_sec = max_delay_sec
class TSADEvaluator(EvaluatorBase):
"""
Simulates the live deployment of an anomaly detection model.
"""
config_class = TSADEvaluatorConfig
def __init__(self, model, config):
from merlion.models.anomaly.base import DetectorBase
assert isinstance(model, DetectorBase)
super().__init__(model=model, config=config)
@property
def max_early_sec(self):
return self.config.max_early_sec
@property
def max_delay_sec(self):
return self.config.max_delay_sec
def _call_model(
self, time_series: TimeSeries, time_series_prev: TimeSeries, exog_data: TimeSeries = None
) -> TimeSeries:
kwargs = dict(time_series=time_series, time_series_prev=time_series_prev, exog_data=exog_data)
return call_with_accepted_kwargs(self.model.get_anomaly_score, **kwargs)
def default_retrain_kwargs(self) -> dict:
from merlion.models.ensemble.anomaly import DetectorEnsemble, DetectorEnsembleTrainConfig
no_train = dict(metric=None, unsup_quantile=None, retrain_calibrator=False)
if isinstance(self.model, DetectorEnsemble):
train_config = DetectorEnsembleTrainConfig(per_model_train_configs=[no_train] * len(self.model.models))
return {"post_rule_train_config": no_train, "train_config": train_config}
return {"post_rule_train_config": no_train}
def get_predict(
self,
train_vals: TimeSeries,
test_vals: TimeSeries,
exog_data: TimeSeries = None,
train_kwargs: dict = None,
retrain_kwargs: dict = None,
post_process=True,
) -> Tuple[TimeSeries, TimeSeries]:
"""
Initialize the model by training it on an initial set of train data.
Simulate real-time anomaly detection by the model, while re-training it
at the desired frequency.
:param train_vals: initial training data
:param test_vals: all data where we want to get the model's predictions
and compare it to the ground truth
:param exog_data: any exogenous data (only used for some models)
:param train_kwargs: dict of keyword arguments we want to use for the
initial training process. Typically, you will want to provide the
key "anomaly_labels" here, if you have training data with labeled
anomalies, as well as the key "post_rule_train_config", if you want
to use a custom training config for the model's post-rule.
:param retrain_kwargs: dict of keyword arguments we want to use for all
subsequent retrainings. Typically, you will not supply any this
argument.
:param post_process: whether to apply the model's post-rule on the
returned results.
:return: ``(train_result, result)``. ``train_result`` is a `TimeSeries`
of the model's anomaly scores on ``train_vals``. ``result`` is a
`TimeSeries` of the model's anomaly scores on ``test_vals``.
"""
train_result, result = super().get_predict(
train_vals=train_vals,
test_vals=test_vals,
exog_data=exog_data,
train_kwargs=train_kwargs,
retrain_kwargs=retrain_kwargs,
)
if post_process:
train_result = self.model.post_rule(train_result)
result = None if result is None else self.model.post_rule(result)
return train_result, result
def evaluate(
self, ground_truth: TimeSeries, predict: TimeSeries, metric: TSADMetric = None
) -> Union[TSADScoreAccumulator, float]:
"""
:param ground_truth: `TimeSeries` of ground truth anomaly labels
:param predict: `TimeSeries` of predicted anomaly scores
:param metric: the `TSADMetric` we wish to evaluate.
:return: the value of the evaluation ``metric``, if one is given. A
`TSADScoreAccumulator` otherwise.
"""
if metric is not None:
assert isinstance(metric, TSADMetric)
return metric.value(
ground_truth, predict, max_early_sec=self.max_early_sec, max_delay_sec=self.max_delay_sec
)
return accumulate_tsad_score(
ground_truth=ground_truth,
predict=predict,
metric=None,
max_early_sec=self.max_early_sec,
max_delay_sec=self.max_delay_sec,
) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/evaluate/anomaly.py | 0.925769 | 0.489564 | anomaly.py | pypi |
from enum import Enum
from functools import partial
from typing import List, Union, Tuple
import warnings
import numpy as np
import pandas as pd
from merlion.evaluate.base import EvaluatorBase, EvaluatorConfig
from merlion.models.forecast.base import ForecasterBase
from merlion.utils import TimeSeries, UnivariateTimeSeries
from merlion.utils.resample import to_offset
class ForecastScoreAccumulator:
"""
Accumulator which maintains summary statistics describing a forecasting
algorithm's performance. Can be used to compute many different forecasting metrics.
"""
def __init__(
self,
ground_truth: Union[UnivariateTimeSeries, TimeSeries],
predict: Union[UnivariateTimeSeries, TimeSeries],
insample: Union[UnivariateTimeSeries, TimeSeries] = None,
periodicity: int = 1,
ub: TimeSeries = None,
lb: TimeSeries = None,
target_seq_index: int = None,
):
"""
:param ground_truth: ground truth time series
:param predict: predicted truth time series
:param insample (optional): time series used for training model. This value is used for computing MSES, MSIS
:param periodicity (optional): periodicity. m=1 indicates the non-seasonal time series,
whereas m>1 indicates seasonal time series. This value is used for computing MSES, MSIS.
:param ub (optional): upper bound of 95% prediction interval. This value is used for computing MSIS
:param lb (optional): lower bound of 95% prediction interval. This value is used for computing MSIS
:param target_seq_index (optional): the index of the target sequence, for multivariate.
"""
ground_truth = TimeSeries.from_pd(ground_truth)
predict = TimeSeries.from_pd(predict)
insample = TimeSeries.from_pd(insample)
t0, tf = predict.t0, predict.tf
ground_truth = ground_truth.window(t0, tf, include_tf=True).align()
if target_seq_index is not None:
ground_truth = ground_truth.univariates[ground_truth.names[target_seq_index]].to_ts()
if insample is not None:
insample = insample.univariates[insample.names[target_seq_index]].to_ts()
else:
assert ground_truth.dim == 1 and (
insample is None or insample.dim == 1
), "Expected to receive either univariate ground truth time series or non-None target_seq_index"
self.ground_truth = ground_truth
self.predict = predict.align(reference=ground_truth.time_stamps)
self.insample = insample
self.periodicity = periodicity
self.ub = ub
self.lb = lb
self.target_seq_index = target_seq_index
def check_before_eval(self):
# Make sure time series is univariate
assert self.predict.dim == self.ground_truth.dim == 1
# Make sure the timestamps of preds and targets are identical
assert self.predict.time_stamps == self.ground_truth.time_stamps
def mae(self):
"""
Mean Absolute Error (MAE)
For ground truth time series :math:`y` and predicted time series :math:`\\hat{y}`
of length :math:`T`, it is computed as
.. math:: \\frac{1}{T}\\sum_{t=1}^T{(|y_t - \\hat{y}_t|)}.
"""
self.check_before_eval()
predict_values = self.predict.univariates[self.predict.names[0]].np_values
ground_truth_values = self.ground_truth.univariates[self.ground_truth.names[0]].np_values
return np.mean(np.abs(ground_truth_values - predict_values))
def marre(self):
"""
Mean Absolute Ranged Relative Error (MARRE)
For ground truth time series :math:`y` and predicted time series :math:`\\hat{y}`
of length :math:`T`, it is computed as
.. math:: 100 \\cdot \\frac{1}{T} \\sum_{t=1}^{T} {\\left| \\frac{y_t - \\hat{y}_t} {\\max_t{y_t} -
\\min_t{y_t}} \\right|}.
"""
self.check_before_eval()
predict_values = self.predict.univariates[self.predict.names[0]].np_values
ground_truth_values = self.ground_truth.univariates[self.ground_truth.names[0]].np_values
assert ground_truth_values.max() > ground_truth_values.min()
true_range = ground_truth_values.max() - ground_truth_values.min()
return 100.0 * np.mean(np.abs((ground_truth_values - predict_values) / true_range))
def rmse(self):
"""
Root Mean Squared Error (RMSE)
For ground truth time series :math:`y` and predicted time series :math:`\\hat{y}`
of length :math:`T`, it is computed as
.. math:: \\sqrt{\\frac{1}{T}\\sum_{t=1}^T{(y_t - \\hat{y}_t)^2}}.
"""
self.check_before_eval()
predict_values = self.predict.univariates[self.predict.names[0]].np_values
ground_truth_values = self.ground_truth.univariates[self.ground_truth.names[0]].np_values
return np.sqrt(np.mean((ground_truth_values - predict_values) ** 2))
def smape(self):
"""
symmetric Mean Absolute Percentage Error (sMAPE). For ground truth time series :math:`y`
and predicted time series :math:`\\hat{y}` of length :math:`T`, it is computed as
.. math::
200 \\cdot \\frac{1}{T}
\\sum_{t=1}^{T}{\\frac{\\left| y_t - \\hat{y}_t \\right|}{\\left| y_t \\right|
+ \\left| \\hat{y}_t \\right|}}.
"""
self.check_before_eval()
predict_values = self.predict.univariates[self.predict.names[0]].np_values
ground_truth_values = self.ground_truth.univariates[self.ground_truth.names[0]].np_values
errors = np.abs(ground_truth_values - predict_values)
scale = np.abs(ground_truth_values) + np.abs(predict_values)
# Make sure the divisor is not close to zero at each timestamp
if (scale < 1e-8).any():
warnings.warn("Some values very close to 0, sMAPE might not be estimated accurately.")
return np.mean(200.0 * errors / (scale + 1e-8))
def rmspe(self):
"""
Root Mean Squared Percent Error (RMSPE)
For ground truth time series :math:`y` and predicted time series :math:`\\hat{y}`
of length :math:`T`, it is computed as
.. math:: 100 \\cdot \\sqrt{\\frac{1}{T}\\sum_{t=1}^T\\frac{(y_t - \\hat{y}_t)}{y_t}^2}.
"""
self.check_before_eval()
predict_values = self.predict.univariates[self.predict.names[0]].np_values
ground_truth_values = self.ground_truth.univariates[self.ground_truth.names[0]].np_values
if (ground_truth_values < 1e-8).any():
warnings.warn("Some values very close to 0, RMSPE might not be estimated accurately.")
errors = ground_truth_values - predict_values
return 100 * np.sqrt(np.mean(np.square(errors / ground_truth_values)))
def mase(self):
"""
Mean Absolute Scaled Error (MASE)
For ground truth time series :math:`y` and predicted time series :math:`\\hat{y}`
of length :math:`T`. In sample time series :math:`\\hat{x}` of length :math:`N`
and periodicity :math:`m` it is computed as
.. math::
\\frac{1}{T}\\cdot\\frac{\\sum_{t=1}^{T}\\left| y_t
- \\hat{y}_t \\right|}{\\frac{1}{N-m}\\sum_{t=m+1}^{N}\\left| x_t - x_{t-m} \\right|}.
"""
self.check_before_eval()
assert self.insample.dim == 1
insample_values = self.insample.univariates[self.insample.names[0]].np_values
predict_values = self.predict.univariates[self.predict.names[0]].np_values
ground_truth_values = self.ground_truth.univariates[self.ground_truth.names[0]].np_values
errors = np.abs(ground_truth_values - predict_values)
scale = np.mean(np.abs(insample_values[self.periodicity :] - insample_values[: -self.periodicity]))
# Make sure the divisor is not close to zero at each timestamp
if (scale < 1e-8).any():
warnings.warn("Some values very close to 0, MASE might not be estimated accurately.")
return np.mean(errors / (scale + 1e-8))
def msis(self):
"""
Mean Scaled Interval Score (MSIS)
This metric evaluates the quality of 95% prediction intervals.
For ground truth time series :math:`y` and predicted time series :math:`\\hat{y}`
of length :math:`T`, the lower and upper bounds of the prediction intervals
:math:`L` and :math:`U`. Given in sample time series :math:`\\hat{x}` of length :math:`N`
and periodicity :math:`m`, it is computed as
.. math::
\\frac{1}{T}\\cdot\\frac{\\sum_{t=1}^{T} (U_t - L_t) + 100 \\cdot (L_t - y_t)[y_t<L_t]
+ 100\\cdot(y_t - U_t)[y_t > U_t]}{\\frac{1}{N-m}\\sum_{t=m+1}^{N}\\left| x_t - x_{t-m} \\right|}.
"""
self.check_before_eval()
assert self.insample.dim == 1
assert self.lb is not None and self.ub is not None
insample_values = self.insample.univariates[self.insample.names[0]].np_values
lb_values = self.lb.univariates[self.lb.names[0]].np_values
ub_values = self.ub.univariates[self.ub.names[0]].np_values
ground_truth_values = self.ground_truth.univariates[self.ground_truth.names[0]].np_values
errors = (
np.sum(ub_values - lb_values)
+ 100 * np.sum((lb_values - ground_truth_values)[lb_values > ground_truth_values])
+ 100 * np.sum((ground_truth_values - ub_values)[ground_truth_values > ub_values])
)
scale = np.mean(np.abs(insample_values[self.periodicity :] - insample_values[: -self.periodicity]))
# Make sure the divisor is not close to zero at each timestamp
if (scale < 1e-8).any():
warnings.warn("Some values very close to 0, MSIS might not be estimated accurately.")
return errors / (scale + 1e-8) / len(ground_truth_values)
def accumulate_forecast_score(
ground_truth: TimeSeries,
predict: TimeSeries,
insample: TimeSeries = None,
periodicity=1,
ub: TimeSeries = None,
lb: TimeSeries = None,
metric=None,
target_seq_index=None,
) -> Union[ForecastScoreAccumulator, float]:
acc = ForecastScoreAccumulator(
ground_truth=ground_truth,
predict=predict,
insample=insample,
periodicity=periodicity,
ub=ub,
lb=lb,
target_seq_index=target_seq_index,
)
return acc if metric is None else metric(acc)
class ForecastMetric(Enum):
"""
Enumeration of evaluation metrics for time series forecasting. For each value,
the name is the metric, and the value is a partial function of form
``f(ground_truth, predict, **kwargs)``. Here, ``ground_truth`` is the
original time series, and ``predict`` is the result returned by a
`ForecastEvaluator`.
"""
MAE = partial(accumulate_forecast_score, metric=ForecastScoreAccumulator.mae)
"""
Mean Absolute Error (MAE) is formulated as:
.. math::
\\frac{1}{T}\\sum_{t=1}^T{(|y_t - \\hat{y}_t|)}.
"""
MARRE = partial(accumulate_forecast_score, metric=ForecastScoreAccumulator.marre)
"""
Mean Absolute Ranged Relative Error (MARRE) is formulated as:
.. math::
100 \\cdot \\frac{1}{T} \\sum_{t=1}^{T} {\\left| \\frac{y_t
- \\hat{y}_t} {\\max_t{y_t} - \\min_t{y_t}} \\right|}.
"""
RMSE = partial(accumulate_forecast_score, metric=ForecastScoreAccumulator.rmse)
"""
Root Mean Squared Error (RMSE) is formulated as:
.. math::
\\sqrt{\\frac{1}{T}\\sum_{t=1}^T{(y_t - \\hat{y}_t)^2}}.
"""
sMAPE = partial(accumulate_forecast_score, metric=ForecastScoreAccumulator.smape)
"""
symmetric Mean Absolute Percentage Error (sMAPE) is formulated as:
.. math::
200 \\cdot \\frac{1}{T}\\sum_{t=1}^{T}{\\frac{\\left| y_t
- \\hat{y}_t \\right|}{\\left| y_t \\right| + \\left| \\hat{y}_t \\right|}}.
"""
RMSPE = partial(accumulate_forecast_score, metric=ForecastScoreAccumulator.rmspe)
"""
Root Mean Square Percent Error is formulated as:
.. math:: 100 \\cdot \\sqrt{\\frac{1}{T}\\sum_{t=1}^T\\frac{(y_t - \\hat{y}_t)}{y_t}^2}.
"""
MASE = partial(accumulate_forecast_score, metric=ForecastScoreAccumulator.mase)
"""
Mean Absolute Scaled Error (MASE) is formulated as:
.. math::
\\frac{1}{T}\\cdot\\frac{\\sum_{t=1}^{T}\\left| y_t
- \\hat{y}_t \\right|}{\\frac{1}{N-m}\\sum_{t=m+1}^{N}\\left| x_t - x_{t-m} \\right|}.
"""
MSIS = partial(accumulate_forecast_score, metric=ForecastScoreAccumulator.msis)
"""
Mean Scaled Interval Score (MSIS) is formulated as:
.. math::
\\frac{1}{T}\\cdot\\frac{\\sum_{t=1}^{T} (U_t - L_t) + 100 \\cdot (L_t - y_t)[y_t<L_t]
+ 100\\cdot(y_t - U_t)[y_t > U_t]}{\\frac{1}{N-m}\\sum_{t=m+1}^{N}\\left| x_t - x_{t-m} \\right|}.
"""
class ForecastEvaluatorConfig(EvaluatorConfig):
"""
Configuration class for a `ForecastEvaluator`
"""
_timedelta_keys = EvaluatorConfig._timedelta_keys + ["horizon"]
def __init__(self, horizon: float = None, **kwargs):
"""
:param horizon: the model's prediction horizon. Whenever the model makes
a prediction, it will predict ``horizon`` seconds into the future.
"""
super().__init__(**kwargs)
self.horizon = horizon
@property
def horizon(self) -> Union[pd.Timedelta, pd.DateOffset, None]:
"""
:return: the horizon our model is predicting into the future. Defaults to the retraining frequency.
"""
if self._horizon is None:
return self.retrain_freq
return self._horizon
@horizon.setter
def horizon(self, horizon):
self._horizon = to_offset(horizon)
@property
def cadence(self) -> Union[pd.Timedelta, pd.DateOffset, None]:
"""
:return: the cadence at which we are having our model produce new predictions. Defaults to the predictive
horizon if there is one, and the retraining frequency otherwise.
"""
if self._cadence is None:
return self.horizon
return self._cadence
@cadence.setter
def cadence(self, cadence):
self._cadence = to_offset(cadence)
class ForecastEvaluator(EvaluatorBase):
"""
Simulates the live deployment of an forecaster model.
"""
config_class = ForecastEvaluatorConfig
def __init__(self, model, config):
assert isinstance(model, ForecasterBase)
super().__init__(model=model, config=config)
@property
def horizon(self):
return self.config.horizon
@property
def cadence(self):
return self.config.cadence
def _call_model(
self,
time_series: TimeSeries,
time_series_prev: TimeSeries,
exog_data: TimeSeries = None,
return_err: bool = False,
) -> Union[Tuple[TimeSeries, TimeSeries], TimeSeries]:
if self.model.target_seq_index is not None:
name = time_series.names[self.model.target_seq_index]
time_stamps = time_series.univariates[name].time_stamps
else:
time_stamps = time_series.time_stamps
forecast, err = self.model.forecast(
time_stamps=time_stamps, time_series_prev=time_series_prev, exog_data=exog_data
)
return (forecast, err) if return_err else forecast
def evaluate(
self,
ground_truth: TimeSeries,
predict: Union[TimeSeries, List[TimeSeries]],
metric: ForecastMetric = ForecastMetric.sMAPE,
):
"""
:param ground_truth: the series of test data
:param predict: the series of predicted values
:param metric: the evaluation metric.
"""
if self.model.target_seq_index is not None:
name = ground_truth.names[self.model.target_seq_index]
ground_truth = ground_truth.univariates[name].to_ts()
if isinstance(predict, TimeSeries):
if metric is not None:
return metric.value(ground_truth, predict)
return accumulate_forecast_score(ground_truth, predict)
else:
if metric is not None:
weights = np.asarray([len(p) for p in predict if not p.is_empty()])
vals = [metric.value(ground_truth, p) for p in predict if not p.is_empty()]
return np.dot(weights / weights.sum(), vals)
return [accumulate_forecast_score(ground_truth, p) for p in predict if not p.is_empty()] | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/evaluate/forecast.py | 0.947125 | 0.678453 | forecast.py | pypi |
from abc import abstractmethod
from typing import Any, List, Tuple, Union
import numpy as np
import pandas as pd
from tqdm import tqdm
from merlion.models.base import ModelBase
from merlion.models.forecast.base import ForecasterBase
from merlion.utils.misc import AutodocABCMeta
from merlion.utils.resample import granularity_str_to_seconds, to_offset
from merlion.utils.time_series import TimeSeries
class EvaluatorConfig(metaclass=AutodocABCMeta):
"""
Abstract class which defines an evaluator config.
"""
_timedelta_keys = ["train_window", "retrain_freq", "cadence"]
def __init__(self, train_window: float = None, retrain_freq: float = None, cadence: float = None):
"""
:param train_window: the maximum duration of data we would like to train the model on. ``None`` means no limit.
:param retrain_freq: the frequency at which we want to re-train the model. ``None`` means we only train the
model once on the initial training data.
:param cadence: the frequency at which we want to obtain predictions from the model.
``None`` means that we obtain a new prediction at the same frequency as the model's predictive horizon.
``0`` means that we obtain a new prediction at every timestamp.
"""
self.train_window = train_window
self.retrain_freq = retrain_freq
self.cadence = cadence
@property
def train_window(self) -> Union[pd.Timedelta, pd.DateOffset, None]:
"""
:return: the maximum duration of data we would like to train the model on. ``None`` means no limit.
"""
return self._train_window
@train_window.setter
def train_window(self, train_window):
self._train_window = to_offset(train_window)
@property
def retrain_freq(self) -> Union[pd.Timedelta, pd.DateOffset, None]:
"""
:return: the frequency at which we want to re-train the model. ``None`` means we only train the model on the
initial training data.
"""
return self._retrain_freq
@retrain_freq.setter
def retrain_freq(self, retrain_freq):
self._retrain_freq = to_offset(retrain_freq)
@property
def cadence(self) -> Union[pd.Timedelta, pd.DateOffset]:
"""
:return: the cadence at which we are having our model produce new predictions. Defaults to the retraining
frequency if not explicitly provided.
"""
if self._cadence is None:
return self.retrain_freq
return self._cadence
@cadence.setter
def cadence(self, cadence):
self._cadence = to_offset(cadence)
@property
def horizon(self) -> pd.DateOffset:
"""
:return: the horizon our model is predicting into the future. Equal to the prediction cadence by default.
"""
return self.cadence
def to_dict(self):
config_dict = {}
for key, value in self.__dict__.items():
k_strip = key.lstrip("_")
if k_strip in self._timedelta_keys and value is not None:
config_dict[k_strip] = value.microseconds / 1e6 if isinstance(value, pd.Timedelta) else value.freqstr
else:
config_dict[k_strip] = value
return config_dict
class EvaluatorBase(metaclass=AutodocABCMeta):
"""
An evaluator simulates the live deployment of a model on historical data.
It trains a model on an initial time series, and then re-trains that model
at a specified frequency.
The `EvaluatorBase.get_predict` method returns the train & test predictions
of a model, as if it were being trained incrementally on the test data in
the manner described above.
Subclasses define slightly different protocols for different tasks, e.g.
anomaly detection vs. forecasting.
"""
config_class = EvaluatorConfig
def __init__(self, model: ModelBase, config: EvaluatorConfig):
"""
:param model: the model to evaluate.
:param config: the evaluation configuration.
"""
assert isinstance(model, ModelBase)
assert isinstance(config, self.config_class)
self.model = model
self.config = config
@property
def train_window(self):
return self.config.train_window
@property
def retrain_freq(self):
return self.config.retrain_freq
@property
def cadence(self):
return self.config.cadence
@property
def horizon(self):
return self.config.horizon
@abstractmethod
def _call_model(
self, time_series: TimeSeries, time_series_prev: TimeSeries, exog_data: TimeSeries = None
) -> TimeSeries:
raise NotImplementedError
def _train_model(self, train_vals: TimeSeries, **train_kwargs) -> TimeSeries:
return self.model.train(train_vals, **train_kwargs)
def default_train_kwargs(self) -> dict:
return {}
def default_retrain_kwargs(self) -> dict:
return {}
@property
def _concat_result(self):
"""
In general, concatenate the result of ``get_predict()`` into a single
`TimeSeries` if the prediction cadence is the same as the predictive
horizon.
"""
return self.cadence == self.horizon
def get_predict(
self,
train_vals: TimeSeries,
test_vals: TimeSeries,
exog_data: TimeSeries = None,
train_kwargs: dict = None,
retrain_kwargs: dict = None,
) -> Tuple[Any, Union[TimeSeries, List[TimeSeries]]]:
"""
Initialize the model by training it on an initial set of train data.
Get the model's predictions on the test data, retraining the model as
appropriate.
:param train_vals: initial training data
:param test_vals: all data where we want to get the model's predictions and compare it to the ground truth
:param exog_data: any exogenous data (only used for some models)
:param train_kwargs: dict of keyword arguments we want to use for the initial training process
:param retrain_kwargs: dict of keyword arguments we want to use for all subsequent retrainings
:return: ``(train_result, result)``. ``train_result`` is the output of training the model on ``train_vals``
(``None`` if ``pretrained`` is ``True``). ``result`` is the model's predictions on ``test_vals``, and is
specific to each evaluation task.
"""
# Determine the appropriate training/retraining kwargs
train_kwargs = {} if train_kwargs is None else train_kwargs
full_train_kwargs = self.default_train_kwargs()
full_train_kwargs.update(train_kwargs)
retrain_kwargs = {} if retrain_kwargs is None else retrain_kwargs
full_retrain_kwargs = self.default_retrain_kwargs()
full_retrain_kwargs.update(retrain_kwargs)
if isinstance(self.model, ForecasterBase):
full_train_kwargs.update(exog_data=exog_data)
full_retrain_kwargs.update(exog_data=exog_data)
# Train the initial model (if not pretrained)
self.model.reset()
train_result = self._train_model(train_vals, **full_train_kwargs)
if test_vals is None:
return train_result, None
# We will incrementally build up the final result window-by-window, where each window is a time series.
# t_next is the next time we will re-train the model.
all_t = test_vals.index
t, tf = all_t[0], all_t[-1] + pd.Timedelta(milliseconds=1)
t_next = tf if self.retrain_freq is None else t + self.retrain_freq
result = []
pbar = tqdm(total=int(granularity_str_to_seconds(tf - t)), desc=type(self).__name__)
t_prev = t
while t < tf:
pbar.update(int(granularity_str_to_seconds(t - t_prev)))
# Get the train & test data for the current window
cur_train, cur_test = test_vals.bisect(t, t_in_left=False)
cur_train = cur_train if train_vals is None else train_vals + cur_train
if self.train_window is not None:
cur_train = cur_train.window(t - self.train_window, t)
if self.horizon is not None:
i = np.searchsorted(all_t, t)
tf_pred = cur_train.index[-1] + self.horizon
if self.horizon is not None and i + 1 < len(all_t):
tf_pred = max(tf_pred, all_t[i + 1])
cur_test = cur_test.window(t, tf_pred, include_tf=True)
# Fully re-train the model when it is time to do so
if t >= t_next and not cur_train.is_empty() and not cur_test.is_empty():
self.model.reset()
self._train_model(cur_train, **full_retrain_kwargs)
i = np.searchsorted(all_t, t_next)
if i + 1 < len(all_t):
t_next = max(t_next + self.retrain_freq, all_t[i + 1])
else:
t_next = t_next + self.retrain_freq
# Add this result if there is any result to add
if not cur_test.is_empty():
cur_result = self._call_model(time_series=cur_test, time_series_prev=cur_train, exog_data=exog_data)
result.append(cur_result)
# Move to the next eval window based on the cadence.
i = np.searchsorted(all_t, t)
t_prev = t
if self.cadence is not None and i + 1 < len(all_t):
t = max(t + self.cadence, all_t[i + 1])
else:
t = tf
pbar.update(int(granularity_str_to_seconds(tf - t_prev)))
# Concatenate everything together into a single time series if desired
pbar.close()
if self._concat_result:
result = sum(result[1:], result[0])
return train_result, result
@abstractmethod
def evaluate(self, ground_truth, predict, metric):
"""
Given the ground truth time series & the model's prediction (as produced
by `EvaluatorBase.get_predict`), compute the specified evaluation
metric. If no metric is specified, return the appropriate score
accumulator for the task. Implementation is task-specific.
"""
raise NotImplementedError | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/evaluate/base.py | 0.956237 | 0.477859 | base.py | pypi |
"""Default models for anomaly detection & forecasting that balance speed and performance."""
import logging
from typing import Optional, Tuple
from merlion.models.factory import ModelFactory
from merlion.models.layers import LayeredDetector, LayeredForecaster, LayeredModelConfig
from merlion.models.anomaly.base import DetectorBase
from merlion.models.forecast.base import ForecasterBase
from merlion.utils import TimeSeries
logger = logging.getLogger(__name__)
class DefaultDetectorConfig(LayeredModelConfig):
"""
Config object for default anomaly detection model.
"""
def __init__(self, model=None, granularity: str = None, n_threads: int = 1, **kwargs):
"""
:param granularity: the granularity at which the input time series should
be sampled, e.g. "5min", "1h", "1d", etc.
:param n_threads: the number of parallel threads to use for relevant models
"""
self.granularity = granularity
self.n_threads = n_threads
super().__init__(model=model, **kwargs)
assert self.base_model is None or isinstance(self.base_model, DetectorBase)
class DefaultDetector(LayeredDetector):
"""
Default anomaly detection model that balances efficiency with performance.
"""
config_class = DefaultDetectorConfig
@property
def _default_post_rule_train_config(self):
from merlion.evaluate.anomaly import TSADMetric
return dict(metric=TSADMetric.F1, unsup_quantile=None)
@property
def granularity(self):
return self.config.granularity
def reset(self):
if self.model is not None:
self.model.reset()
def train(
self, train_data: TimeSeries, train_config=None, anomaly_labels: TimeSeries = None, post_rule_train_config=None
) -> TimeSeries:
transform_dict = dict(name="TemporalResample", granularity=self.granularity)
# Multivariate model is ensemble of VAE and RRCF
n_threads = self.config.n_threads
if train_data.dim > 1:
self.model = ModelFactory.create(
"DetectorEnsemble",
models=[
ModelFactory.create("VAE", transform=transform_dict),
ModelFactory.create(
"RandomCutForest",
online_updates=True,
parallel=n_threads > 1,
thread_pool_size=n_threads,
n_estimators=100,
max_n_samples=512,
),
],
**self.config.model_kwargs,
)
# Univariate model is ETS/RRCF/ZMS ensemble
else:
self.model = ModelFactory.create(
"DetectorEnsemble",
models=[
ModelFactory.create("AutoETS", model=dict(name="ETSDetector"), transform=transform_dict),
ModelFactory.create(
"RandomCutForest",
online_updates=True,
parallel=n_threads > 1,
thread_pool_size=n_threads,
n_estimators=100,
max_n_samples=512,
),
ModelFactory.create("ZMS", n_lags=3, transform=transform_dict),
],
**self.config.model_kwargs,
)
return super().train(
train_data=train_data,
anomaly_labels=anomaly_labels,
train_config=train_config,
post_rule_train_config=post_rule_train_config,
)
class DefaultForecasterConfig(LayeredModelConfig):
"""
Config object for default forecasting model.
"""
def __init__(
self,
model=None,
max_forecast_steps: int = None,
target_seq_index: int = None,
granularity: str = None,
**kwargs,
):
"""
:param max_forecast_steps: Max # of steps we would like to forecast for.
:param target_seq_index: The index of the univariate (amongst all univariates in a general multivariate time
series) whose value we would like to forecast.
:param granularity: the granularity at which the input time series should be sampled, e.g. "5min", "1d", etc.
"""
self.granularity = granularity
super().__init__(
model=model, max_forecast_steps=max_forecast_steps, target_seq_index=target_seq_index, **kwargs
)
assert self.base_model is None or isinstance(self.base_model, ForecasterBase)
class DefaultForecaster(LayeredForecaster):
"""
Default forecasting model that balances efficiency with performance.
"""
config_class = DefaultForecasterConfig
@property
def supports_exog(self):
return True
@property
def granularity(self):
return self.config.granularity
def reset(self):
if self.model is not None:
self.model.reset()
def train(
self, train_data: TimeSeries, train_config=None, exog_data=None
) -> Tuple[TimeSeries, Optional[TimeSeries]]:
# LGBM forecaster for multivariate data, AutoETS for univariate data
transform_dict = dict(name="TemporalResample", granularity=self.granularity)
kwargs = dict(transform=transform_dict, **self.config.model_kwargs)
if train_data.dim > 1 or exog_data is not None:
self.model = ModelFactory.create("LGBMForecaster", **kwargs)
else:
self.model = ModelFactory.create("AutoETS", **kwargs)
return super().train(train_data=train_data, train_config=train_config, exog_data=exog_data) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/defaults.py | 0.948917 | 0.45641 | defaults.py | pypi |
from abc import abstractmethod
import copy
from enum import Enum
import json
import logging
import os
from os.path import abspath, join
from typing import Any, Dict, Optional, Tuple, List
import dill
import pandas as pd
from merlion.transform.base import TransformBase, Identity
from merlion.transform.factory import TransformFactory
from merlion.transform.normalize import Rescale, MeanVarNormalize
from merlion.transform.sequence import TransformSequence
from merlion.utils.time_series import assert_equal_timedeltas, to_pd_datetime, infer_granularity, TimeSeries
from merlion.utils.misc import AutodocABCMeta, ModelConfigMeta
from merlion.utils.resample import to_offset
logger = logging.getLogger(__name__)
class Config(object, metaclass=ModelConfigMeta):
"""
Abstract class which defines a model config.
"""
filename = "config.json"
_default_transform = Identity()
transform: TransformBase = None
dim: Optional[int] = None
def __init__(self, transform: TransformBase = None, **kwargs):
"""
:param transform: Transformation to pre-process input time series.
:param dim: The dimension of the time series
"""
super().__init__()
if transform is None:
self.transform = copy.deepcopy(self._default_transform)
elif isinstance(transform, dict):
self.transform = TransformFactory.create(**transform)
else:
self.transform = transform
self.dim = None
def to_dict(self, _skipped_keys=None):
"""
:return: dict with keyword arguments used to initialize the config class.
"""
config_dict = {}
skipped_keys = set() if _skipped_keys is None else _skipped_keys
for key, value in self.__dict__.items():
k_strip = key.lstrip("_")
key = k_strip if hasattr(self, k_strip) else key
if hasattr(value, "to_dict"):
value = value.to_dict()
elif isinstance(value, Enum):
value = value.name # Relies on there being an appropriate getter/setter!
if key not in skipped_keys:
config_dict[key] = copy.deepcopy(value)
return config_dict
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], return_unused_kwargs=False, dim=None, **kwargs):
"""
Constructs a `Config` from a Python dictionary of parameters.
:param config_dict: dict that will be used to instantiate this object.
:param return_unused_kwargs: whether to return any unused keyword args.
:param dim: the dimension of the time series. handled as a special case.
:param kwargs: any additional parameters to set (overriding config_dict).
:return: `Config` object initialized from the dict.
"""
config_dict = copy.copy(config_dict)
dim = config_dict.pop("dim", dim)
config_dict = dict(**config_dict, **kwargs)
config = cls(**config_dict)
if dim is not None:
config.dim = dim
kwargs = config.get_unused_kwargs(**config_dict)
if len(kwargs) > 0 and not return_unused_kwargs:
logger.warning(f"Unused kwargs: {kwargs}", stack_info=True)
elif return_unused_kwargs:
return config, kwargs
return config
def __reduce__(self):
return self.__class__.from_dict, (self.to_dict(),)
def __copy__(self):
return self.from_dict(self.to_dict())
def __deepcopy__(self, memodict={}):
return self.__copy__()
def get_unused_kwargs(self, **kwargs):
return {k: v for k, v in kwargs.items() if k not in self.to_dict()}
class NormalizingConfig(Config):
"""
Model config where the transform must return normalized values. Applies
additional normalization after the initial data pre-processing transform.
"""
def __init__(self, normalize: Rescale = None, **kwargs):
"""
:param normalize: Pre-trained normalization transformation (optional).
"""
super().__init__(**kwargs)
if normalize is None:
self.normalize = MeanVarNormalize()
elif isinstance(normalize, dict):
self.normalize = TransformFactory.create(**normalize)
else:
self.normalize = normalize
@property
def full_transform(self):
"""
Returns the full transform, including the pre-processing step, lags, and
final mean/variance normalization.
"""
return TransformSequence([self.transform, self.normalize])
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, transform):
"""
Set the pre-processing transform. Also resets the mean/variance
normalization, since the new transform could change these, and the
new mean/variance may need to be re-learned.
"""
self._transform = transform
self.normalize = MeanVarNormalize()
class ModelBase(metaclass=AutodocABCMeta):
"""
Abstract base class for models.
"""
filename = "model.pkl"
config_class = Config
train_data: Optional[TimeSeries] = None
"""
The data used to train the model.
"""
def __init__(self, config: Config):
assert isinstance(config, self.config_class)
self.config = copy.copy(config)
self.last_train_time = None
self.timedelta = None
self.timedelta_offset = pd.to_timedelta(0)
self.train_data = None
def reset(self):
"""
Resets the model's internal state.
"""
self.__init__(self.config)
@property
def base_model(self):
"""
The base model of a base model is itself.
"""
return self
@property
@abstractmethod
def require_even_sampling(self) -> bool:
"""
Whether the model assumes that training data is sampled at a fixed frequency
"""
@property
@abstractmethod
def require_univariate(self) -> bool:
"""
Whether the model only works with univariate time series.
"""
@property
def auto_align(self) -> bool:
"""
Whether to ensure that all univariates in the training data are aligned.
"""
return True
@property
def _default_train_config(self):
return None
@property
def supports_exog(self):
"""
Whether the model supports exogenous regressors.
"""
return False
def __getstate__(self):
return {k: copy.deepcopy(v) for k, v in self.__dict__.items()}
def __setstate__(self, state):
for name, value in state.items():
if hasattr(self, name):
setattr(self, name, value)
else:
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'. "
f"'{name}' is an invalid kwarg for the load() method."
)
def __reduce__(self):
state_dict = self.__getstate__()
config = state_dict.pop("config")
return self.__class__, (config,), state_dict
@property
def dim(self):
return self.config.dim
@property
def transform(self):
"""
:return: The data pre-processing transform to apply on any time series,
before giving it to the model.
"""
return getattr(self.config, "full_transform", self.config.transform)
@transform.setter
def transform(self, transform):
self.config.transform = transform
@property
def timedelta(self):
"""
:return: the gap (as a ``pandas.Timedelta`` or ``pandas.DateOffset``) between data points in the training data
"""
return self._timedelta
@timedelta.setter
def timedelta(self, timedelta):
self._timedelta = to_offset(timedelta)
@property
def last_train_time(self):
"""
:return: the last time (as a ``pandas.Timestamp``) that the model was trained on
"""
return self._last_train_time
@last_train_time.setter
def last_train_time(self, last_train_time):
self._last_train_time = to_pd_datetime(last_train_time)
@property
def _pandas_train(self):
"""
Whether the _train() method requires ``pandas.DataFrame``. If False, we assume it accepts `TimeSeries`.
"""
return True
def train_pre_process(self, train_data: TimeSeries) -> TimeSeries:
"""
Applies pre-processing steps common for training most models.
:param train_data: the original time series of training data
:return: the training data, after any necessary pre-processing has been applied
"""
self.train_data = train_data
self.config.dim = train_data.dim
self.transform.train(train_data)
train_data = self.transform(train_data)
# Make sure the training data is univariate if needed
if self.require_univariate and train_data.dim != 1:
raise RuntimeError(
f"Transform {self.transform} transforms data into a {train_data.dim}-"
f"variate time series, but model {type(self).__name__} can "
f"only handle uni-variate time series. Change the transform or set target_seq_index."
)
# Make sure timestamps are equally spaced if needed (e.g. for ARIMA)
t = train_data.time_stamps
self.timedelta, self.timedelta_offset = infer_granularity(t, return_offset=True)
if self.require_even_sampling:
assert_equal_timedeltas(train_data.univariates[train_data.names[0]], self.timedelta, self.timedelta_offset)
assert train_data.is_aligned
self.last_train_time = t[-1]
return train_data.align() if self.auto_align else train_data
def transform_time_series(
self, time_series: TimeSeries, time_series_prev: TimeSeries = None
) -> Tuple[TimeSeries, Optional[TimeSeries]]:
"""
Applies the model's pre-processing transform to ``time_series`` and ``time_series_prev``.
:param time_series: The time series
:param time_series_prev: A time series of context, immediately preceding ``time_series``. Optional.
:return: The transformed ``time_series`` and ``time_series_prev``.
"""
if time_series_prev is not None and not time_series.is_empty():
t0 = time_series.t0
time_series = time_series_prev + time_series
time_series_prev, time_series = self.transform(time_series).bisect(t0, t_in_left=False)
elif time_series_prev is not None:
time_series_prev = self.transform(time_series_prev)
else:
time_series = self.transform(time_series)
return time_series, time_series_prev
@abstractmethod
def train(self, train_data: TimeSeries, train_config=None):
"""
Trains the model on the specified time series, optionally with some
additional implementation-specific config options ``train_config``.
:param train_data: a `TimeSeries` to use as a training set
:param train_config: additional configurations (if needed)
"""
raise NotImplementedError
@abstractmethod
def _train(self, train_data: pd.DataFrame, train_config=None):
raise NotImplementedError
@abstractmethod
def train_post_process(self, train_result):
raise NotImplementedError
def _save_state(self, state_dict: Dict[str, Any], filename: str = None, **save_config) -> Dict[str, Any]:
"""
Saves the model's state to the the specified file. If you override this method, please also override
``_load_state()``. By default, the model's state dict is just serialized using dill.
:param state_dict: The state dict to save.
:param filename: The name of the file to save the model to.
:param save_config: additional configurations (if needed)
:return: The state dict to save.
"""
state_dict.pop("config", None) # don't save the model's config in binary
if filename is not None:
with open(filename, "wb") as f:
dill.dump(state_dict, f)
return state_dict
def save(self, dirname: str, **save_config):
"""
:param dirname: directory to save the model & its config
:param save_config: additional configurations (if needed)
"""
state_dict = self.__getstate__()
config_dict = self.config.to_dict()
# create the directory if needed
os.makedirs(dirname, exist_ok=True)
# Save the config dict
with open(join(dirname, self.config_class.filename), "w") as f:
json.dump(config_dict, f, indent=2, sort_keys=True)
# Save the model state
self._save_state(state_dict, abspath(join(dirname, self.filename)), **save_config)
def _load_state(self, state_dict: Dict[str, Any], **kwargs):
"""
Loads the model's state from the specified file. Override this method if you have overridden _save_state().
By default, the model's state dict is loaded from a file (serialized by dill), and the state is set.
:param filename: serialized file containing the model's state.
:param kwargs: any additional keyword arguments to set manually in the state dict (after loading it).
"""
if "config" in state_dict: # don't re-set the config
state_dict.pop("config")
self.__setstate__(state_dict)
@classmethod
def _load_state_dict(cls, model_path: str):
with open(model_path, "rb") as f:
state_dict = dill.load(f)
return state_dict
@classmethod
def load(cls, dirname: str, **kwargs):
"""
:param dirname: directory to load model (and config) from
:param kwargs: config params to override manually
:return: `ModelBase` object loaded from file
"""
# Load the config
config_path = join(dirname, cls.config_class.filename)
with open(config_path, "r") as f:
config_dict = json.load(f)
# Load the state
state_dict = cls._load_state_dict(join(dirname, cls.filename))
return cls._from_config_state_dicts(config_dict, state_dict, **kwargs)
@classmethod
def _from_config_state_dicts(cls, config_dict, state_dict, **kwargs):
"""
Initializes a model from the config and state dictionaries used to
save it.
:param config_dict: Dictionary used to initialize the config.
:param state_dict: Dictionary used to load the model state.
:param kwargs: config params to override manually
:return: `ModelBase` object loaded from file
"""
config, model_kwargs = cls.config_class.from_dict(config_dict, return_unused_kwargs=True, **kwargs)
model = cls(config=config)
model._load_state(state_dict, **model_kwargs)
return model
def to_bytes(self, **save_config):
"""
Converts the entire model state and configuration to a single byte object.
:return: bytes object representing the model.
"""
return dill.dumps(self._to_serializable_comps(**save_config))
def _to_serializable_comps(self, **save_config):
state_dict = self.__getstate__()
config_dict = self.config.to_dict()
state_dict = self._save_state(state_dict, **save_config)
class_name = self.__class__.__name__
return class_name, config_dict, state_dict
@classmethod
def from_bytes(cls, obj, **kwargs):
"""
Creates a fully specified model from a byte object
:param obj: byte object to convert into a model
:return: ModelBase object loaded from ``obj``
"""
name, config_dict, state_dict = dill.loads(obj)
return cls._from_config_state_dicts(config_dict, state_dict, **kwargs)
def __copy__(self):
new_model = self.__class__(config=copy.copy(self.config))
state_dict = self.__getstate__()
state_dict.pop("config", None)
new_model.__setstate__(state_dict)
return new_model
def __deepcopy__(self, memodict={}):
new_model = self.__class__(config=copy.deepcopy(self.config))
state_dict = self.__getstate__()
state_dict.pop("config", None)
new_model.__setstate__(state_dict)
return new_model
class MultipleTimeseriesModelMixin(metaclass=AutodocABCMeta):
"""
Abstract mixin for models supporting training on multiple time series.
"""
@abstractmethod
def train_multiple(self, multiple_train_data: List[TimeSeries], train_config=None):
"""
Trains the model on multiple time series, optionally with some
additional implementation-specific config options ``train_config``.
:param multiple_train_data: a list of `TimeSeries` to use as a training set
:param train_config: additional configurations (if needed)
"""
raise NotImplementedError | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/base.py | 0.883833 | 0.286887 | base.py | pypi |
import io
import json
import copy
import logging
from typing import List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from scipy.stats import norm
from abc import abstractmethod
from enum import Enum
try:
import torch
import torch.nn as nn
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
from merlion.models.base import Config, ModelBase
from merlion.plot import Figure
from merlion.transform.base import TransformBase, Identity
from merlion.transform.factory import TransformFactory
from merlion.utils.misc import initializer
from merlion.utils.time_series import to_pd_datetime, to_timestamp, TimeSeries, AggregationPolicy, MissingValuePolicy
logger = logging.getLogger(__name__)
class Optimizer(Enum):
"""
Optimizers for learning model parameters.
"""
Adam = torch.optim.Adam
AdamW = torch.optim.AdamW
SGD = torch.optim.SGD
Adagrad = torch.optim.Adagrad
RMSprop = torch.optim.RMSprop
class LossFunction(Enum):
"""
Loss functions for learning model parameters.
"""
mse = nn.MSELoss
l1 = nn.L1Loss
huber = nn.HuberLoss
guassian_nll = nn.GaussianNLLLoss
class DeepConfig(Config):
"""
Config object used to define a deep learning (pytorch) model.
"""
@initializer
def __init__(
self,
batch_size: int = 32,
num_epochs: int = 10,
optimizer: Union[str, Optimizer] = Optimizer.Adam,
loss_fn: Union[str, LossFunction] = LossFunction.mse,
clip_gradient: Optional[float] = None,
use_gpu: bool = True,
ts_encoding: Union[None, str] = "h",
lr: float = 1e-4,
weight_decay: float = 0.0,
valid_fraction: float = 0.2,
early_stop_patience: Union[None, int] = None,
**kwargs,
):
"""
:param batch_size: Batch size of a batch for stochastic training of deep models
:param num_epochs: Total number of epochs for training.
:param optimizer: The optimizer for learning the parameters of the deep learning models. The value of optimizer
can be ``Adam``, ``AdamW``, ``SGD``, ``Adagrad``, ``RMSprop``.
:param loss_fn: Loss function for optimizing deep learning models. The value of loss_fn can be
``mse`` for l2 loss, ``l1`` for l1 loss, ``huber`` for huber loss.
:param clip_gradient: Clipping gradient norm of model parameters before updating. If ``clip_gradient is None``,
then the gradient will not be clipped.
:param use_gpu: Whether to use gpu for training deep models. If ``use_gpu = True`` while thre is no GPU device,
the model will use CPU for training instead.
:param ts_encoding: whether the timestamp should be encoded to a float vector, which can be used
for training deep learning based time series models; if ``None``, the timestamp is not encoded.
If not ``None``, it represents the frequency for time features encoding options:[s:secondly, t:minutely, h:hourly,
d:daily, b:business days, w:weekly, m:monthly]
:param lr: Learning rate for optimizing deep learning models.
:param weight_decay: Weight decay (L2 penalty) (default: 0)
:param valid_fraction: Fraction of validation set to be split from training data
:param early_stop_patience: Number of epochs with no improvement after which training will be stopped for
early stopping function. If ``early_stop_patience = None``, the training process will not stop early.
"""
super().__init__(**kwargs)
@property
def optimizer(self) -> Optimizer:
return self._optimizer
@optimizer.setter
def optimizer(self, optimizer: Union[str, Optimizer]):
if isinstance(optimizer, str):
valid = set(Optimizer.__members__.keys())
if optimizer not in valid:
raise KeyError(f"{optimizer} is not a valid optimizer that supported. Valid optimizers are: {valid}")
optimizer = Optimizer[optimizer]
self._optimizer = optimizer
@property
def loss_fn(self) -> LossFunction:
return self._loss_fn
@loss_fn.setter
def loss_fn(self, loss_fn: Union[str, LossFunction]):
if isinstance(loss_fn, str):
valid = set(LossFunction.__members__.keys())
if loss_fn not in valid:
raise KeyError(f"{loss_fn} is not a valid loss that supported. Valid optimizers are: {valid}")
loss_fn = LossFunction[loss_fn]
self._loss_fn = loss_fn
class TorchModel(nn.Module):
"""
Abstract base class for Pytorch deep learning models
"""
def __init__(self, config: DeepConfig):
super(TorchModel, self).__init__()
self.config = config
@abstractmethod
def forward(self, past, past_timestamp, future_timestamp, *args, **kwargs):
raise NotImplementedError
@property
def device(self):
return next(self.parameters()).device
class DeepModelBase(ModelBase):
"""
Abstract base class for all deep learning models
"""
config_class = DeepConfig
deep_model_class = TorchModel
def __init__(self, config: DeepConfig):
super().__init__(config)
self.deep_model = None
def _create_model(self):
"""
Create and initialize deep models and neccessary components for training
"""
self.deep_model = self.deep_model_class(self.config)
self.optimizer = self.config.optimizer.value(
self.deep_model.parameters(),
lr=self.config.lr,
weight_decay=self.config.weight_decay,
)
self.loss_fn = self.config.loss_fn.value()
if self.config.use_gpu:
self.to_gpu()
else:
self.to_cpu()
@abstractmethod
def _get_batch_model_loss_and_outputs(self, batch):
"""
Calculate optimizing loss and get the output of the deep_model, given a batch of data
"""
raise NotImplementedError
def to_gpu(self):
"""
Move deep model to GPU
"""
if torch.cuda.is_available():
if self.deep_model is not None:
device = torch.device("cuda")
self.deep_model = self.deep_model.to(device)
else:
logger.warning("GPU not available, using CPU instead")
self.to_cpu()
def to_cpu(self):
"""
Move deep model to CPU
"""
if self.deep_model is not None:
device = torch.device("cpu")
self.deep_model = self.deep_model.to(device)
def __getstate__(self):
state = copy.copy(self.__dict__)
deep_model = state.pop("deep_model", None)
state.pop("optimizer", None)
state.pop("loss_fn", None)
state = copy.deepcopy(state)
if deep_model is not None:
state["deep_model_state_dict"] = deep_model.state_dict()
return state
def __setstate__(self, state):
deep_model_state_dict = state.pop("deep_model_state_dict", None)
super().__setstate__(state)
if deep_model_state_dict:
if self.deep_model is None:
self._create_model()
buffer = io.BytesIO()
torch.save(deep_model_state_dict, buffer)
buffer.seek(0)
self.deep_model.load_state_dict(torch.load(buffer, map_location=self.deep_model.device)) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/deep_base.py | 0.918786 | 0.383641 | deep_base.py | pypi |
import copy
import inspect
import logging
from typing import Dict, Tuple, Type, Union
import dill
from merlion.models.base import ModelBase
from merlion.utils import dynamic_import
logger = logging.getLogger(__name__)
import_alias = dict(
# Default models
DefaultDetector="merlion.models.defaults:DefaultDetector",
DefaultForecaster="merlion.models.defaults:DefaultForecaster",
# Anomaly detection models
ArimaDetector="merlion.models.anomaly.forecast_based.arima:ArimaDetector",
DynamicBaseline="merlion.models.anomaly.dbl:DynamicBaseline",
IsolationForest="merlion.models.anomaly.isolation_forest:IsolationForest",
# Forecast-based anomaly detection models
ETSDetector="merlion.models.anomaly.forecast_based.ets:ETSDetector",
MSESDetector="merlion.models.anomaly.forecast_based.mses:MSESDetector",
ProphetDetector="merlion.models.anomaly.forecast_based.prophet:ProphetDetector",
RandomCutForest="merlion.models.anomaly.random_cut_forest:RandomCutForest",
SarimaDetector="merlion.models.anomaly.forecast_based.sarima:SarimaDetector",
WindStats="merlion.models.anomaly.windstats:WindStats",
SpectralResidual="merlion.models.anomaly.spectral_residual:SpectralResidual",
ZMS="merlion.models.anomaly.zms:ZMS",
DeepPointAnomalyDetector="merlion.models.anomaly.deep_point_anomaly_detector:DeepPointAnomalyDetector",
# Multivariate Anomaly Detection models
AutoEncoder="merlion.models.anomaly.autoencoder:AutoEncoder",
VAE="merlion.models.anomaly.vae:VAE",
DAGMM="merlion.models.anomaly.dagmm:DAGMM",
LSTMED="merlion.models.anomaly.lstm_ed:LSTMED",
# Change point detection models
BOCPD="merlion.models.anomaly.change_point.bocpd:BOCPD",
# Forecasting models
Arima="merlion.models.forecast.arima:Arima",
ETS="merlion.models.forecast.ets:ETS",
MSES="merlion.models.forecast.smoother:MSES",
Prophet="merlion.models.forecast.prophet:Prophet",
Sarima="merlion.models.forecast.sarima:Sarima",
StatThreshold="merlion.models.anomaly.stat_threshold:StatThreshold",
VectorAR="merlion.models.forecast.vector_ar:VectorAR",
RandomForestForecaster="merlion.models.forecast.trees:RandomForestForecaster",
ExtraTreesForecaster="merlion.models.forecast.trees:ExtraTreesForecaster",
LGBMForecaster="merlion.models.forecast.trees:LGBMForecaster",
TransformerForecaster="merlion.models.forecast.transformer:TransformerForecaster",
InformerForecaster="merlion.models.forecast.informer:InformerForecaster",
AutoformerForecaster="merlion.models.forecast.autoformer:AutoformerForecaster",
ETSformerForecaster="merlion.models.forecast.etsformer:ETSformerForecaster",
DeepARForecaster="merlion.models.forecast.deep_ar:DeepARForecaster",
# Ensembles
DetectorEnsemble="merlion.models.ensemble.anomaly:DetectorEnsemble",
ForecasterEnsemble="merlion.models.ensemble.forecast:ForecasterEnsemble",
# Layers
SeasonalityLayer="merlion.models.automl.seasonality:SeasonalityLayer",
AutoETS="merlion.models.automl.autoets:AutoETS",
AutoProphet="merlion.models.automl.autoprophet:AutoProphet",
AutoSarima="merlion.models.automl.autosarima:AutoSarima",
)
class ModelFactory:
@classmethod
def get_model_class(cls, name: str) -> Type[ModelBase]:
return dynamic_import(name, import_alias)
@classmethod
def create(cls, name, return_unused_kwargs=False, **kwargs) -> Union[ModelBase, Tuple[ModelBase, Dict]]:
model_class = cls.get_model_class(name)
config, kwargs = model_class.config_class.from_dict(kwargs, return_unused_kwargs=True)
# initialize the model
signature = inspect.signature(model_class)
init_kwargs = {k: v for k, v in kwargs.items() if k in signature.parameters}
kwargs = {k: v for k, v in kwargs.items() if k not in init_kwargs}
model = model_class(config=config, **init_kwargs)
# set model state with remaining kwargs, and return any unused kwargs if desired
if return_unused_kwargs:
state = {k: v for k, v in kwargs.items() if hasattr(model, k)}
model._load_state(state)
return model, {k: v for k, v in kwargs.items() if k not in state}
model._load_state(kwargs)
return model
@classmethod
def load(cls, name, model_path, **kwargs) -> ModelBase:
if model_path is None:
return cls.create(name, **kwargs)
else:
model_class = cls.get_model_class(name)
return model_class.load(model_path, **kwargs)
@classmethod
def load_bytes(cls, obj, **kwargs) -> ModelBase:
name = dill.loads(obj)[0]
model_class = cls.get_model_class(name)
return model_class.from_bytes(obj, **kwargs)
def instantiate_or_copy_model(model: Union[dict, ModelBase]):
if isinstance(model, ModelBase):
return copy.deepcopy(model)
elif isinstance(model, dict):
try:
return ModelFactory.create(**model)
except Exception as e:
logger.error(f"Invalid `dict` specifying a model config.\n\nGot {model}")
raise e
else:
raise TypeError(f"Expected model to be a `dict` or `ModelBase`. Got {model}") | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/factory.py | 0.843766 | 0.508117 | factory.py | pypi |
import logging
import numpy as np
import pandas as pd
from sklearn.ensemble import IsolationForest as skl_IsolationForest
from merlion.models.anomaly.base import DetectorConfig, DetectorBase
from merlion.transform.moving_average import DifferenceTransform
from merlion.transform.sequence import TransformSequence
from merlion.transform.resample import Shingle
from merlion.utils import UnivariateTimeSeries, TimeSeries
logger = logging.getLogger(__name__)
class IsolationForestConfig(DetectorConfig):
"""
Configuration class for `IsolationForest`.
"""
_default_transform = TransformSequence([DifferenceTransform(), Shingle(size=2, stride=1)])
def __init__(self, max_n_samples: int = None, n_estimators: int = 100, n_jobs=-1, **kwargs):
"""
:param max_n_samples: Maximum number of samples to allow the isolation
forest to train on. Specify ``None`` to use all samples in the
training data.
:param n_estimators: number of trees in the isolation forest.
"""
self.max_n_samples = 1.0 if max_n_samples is None else max_n_samples
self.n_estimators = n_estimators
self.n_jobs = n_jobs
# Isolation forest's uncalibrated scores are between 0 and 1
kwargs["max_score"] = 1.0
super().__init__(**kwargs)
class IsolationForest(DetectorBase):
"""
The classic isolation forest algorithm, proposed in
`Liu et al. 2008 <https://ieeexplore.ieee.org/document/4781136>`_
"""
config_class = IsolationForestConfig
def __init__(self, config: IsolationForestConfig):
super().__init__(config)
self.model = skl_IsolationForest(
max_samples=config.max_n_samples, n_estimators=config.n_estimators, random_state=0, n_jobs=config.n_jobs
)
@property
def require_even_sampling(self) -> bool:
return False
@property
def require_univariate(self) -> bool:
return False
def _train(self, train_data: pd.DataFrame, train_config=None) -> pd.DataFrame:
times, train_values = train_data.index, train_data.values
self.model.fit(train_values)
train_scores = -self.model.score_samples(train_values)
return pd.DataFrame(train_scores, index=times, columns=["anom_score"])
def _get_anomaly_score(self, time_series: pd.DataFrame, time_series_prev: pd.DataFrame = None) -> pd.DataFrame:
# Return the negative of model's score, since model scores are in [-1, 0), where more negative = more anomalous
scores = -self.model.score_samples(np.array(time_series.values))
return pd.DataFrame(scores, index=time_series.index) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/anomaly/isolation_forest.py | 0.889126 | 0.384017 | isolation_forest.py | pypi |
import logging
import numpy as np
import pandas as pd
from merlion.models.anomaly.base import DetectorConfig, DetectorBase
from merlion.transform.resample import TemporalResample
from merlion.utils import TimeSeries, UnivariateTimeSeries
logger = logging.getLogger(__name__)
class SpectralResidualConfig(DetectorConfig):
"""
Config class for `SpectralResidual` anomaly detector.
"""
_default_transform = TemporalResample(granularity=None)
def __init__(self, local_wind_sz=21, q=3, estimated_points=5, predicting_points=5, target_seq_index=None, **kwargs):
r"""
:param local_wind_sz: Number of previous saliency points to consider when computing the anomaly score
:param q: Window size of local frequency average computations
:param estimated_points: Number of padding points to add to the timeseries for saliency map calculations.
:param predicting_points: Number of points to consider when computing gradient for padding points
:param target_seq_index: Index of the univariate whose anomalies we want to detect.
The Saliency Map is computed as follows:
.. math::
R(f) &= \log(A(\mathscr{F}(\textbf{x}))) - \left(\frac{1}{q}\right)_{1 \times q}
* (A(\mathscr{F}(\textbf{x})) \\
S_m &= \mathscr{F}^{-1} (R(f))
where :math:`*` is the convolution operator, and :math:`\mathscr{F}` is the Fourier Transform.
The anomaly scores then are computed as:
.. math::
S(x) = \frac{S(x) - \overline{S(\textbf{x})}}{\overline{S(\textbf{x})}}
where :math:`\textbf{x}` are the last ``local_wind_sz`` points in the timeseries.
The ``estimated_points`` and ``predicting_points`` parameters are used to pad the end of the timeseries with reasonable
values. This is done so that the later points in the timeseries are in the middle of averaging windows rather
than in the end.
"""
self.estimated_points = estimated_points
self.q = q
self.predicting_points = predicting_points
self.local_wind_sz = local_wind_sz
self.target_seq_index = target_seq_index
super().__init__(**kwargs)
class SpectralResidual(DetectorBase):
"""
Spectral Residual Algorithm for Anomaly Detection.
Spectral Residual Anomaly Detection algorithm based on the algorithm described by
`Ren et al. (2019) <https://arxiv.org/abs/1906.03821>`__. After taking the frequency spectrum, compute the
log deviation from the mean. Use inverse fourier transform to obtain the saliency map. Anomaly scores
for a point in the time series are obtained by comparing the saliency score of the point to the
average of the previous points.
"""
config_class = SpectralResidualConfig
def __init__(self, config: SpectralResidualConfig = None):
super().__init__(SpectralResidualConfig() if config is None else config)
self.q_conv_map = np.ones(self.config.q) / self.config.q
self.local_wind_sz = self.config.local_wind_sz
self.local_conv_map = np.ones(self.local_wind_sz)
self.train_data = None
@property
def require_even_sampling(self) -> bool:
return True
@property
def require_univariate(self) -> bool:
return False
@property
def target_seq_index(self) -> int:
return self.config.target_seq_index
def _get_saliency_map(self, values: np.array) -> np.array:
transform = np.fft.fft(values)
log_amps = np.log(np.abs(transform))
phases = np.angle(transform)
avg_log_amps = np.convolve(log_amps, self.q_conv_map, mode="same") # approximation
residuals = log_amps - avg_log_amps
saliency_map = np.abs(np.fft.ifft(np.exp(residuals + 1j * phases)))
return saliency_map
def _compute_grad(self, values: np.array) -> int:
m = min(self.config.predicting_points, values.shape[0] - 1)
x_n = values[-1]
a = x_n - np.copy(values[-m - 1 : -1])
b = np.flip(np.arange(1, m + 1))
averages = a / b
return np.average(averages)
def _pad(self, values: np.array) -> np.array:
grad = self._compute_grad(values)
m = min(self.config.predicting_points, values.shape[0] - 1)
item = values[-m] + grad * m
return np.pad(values, ((0, self.config.estimated_points),), constant_values=item)
def _get_anomaly_score(self, time_series: pd.DataFrame, time_series_prev: pd.DataFrame = None) -> pd.DataFrame:
i = self.target_seq_index
if time_series_prev is None:
values = time_series.values[:, i]
else:
values = np.concatenate((time_series_prev.values[:, i], time_series.values[:, i]))
padded_values = self._pad(values) if self.config.estimated_points > 0 else values
saliency_map = self._get_saliency_map(padded_values)
if self.config.estimated_points > 0:
saliency_map = saliency_map[: -self.config.estimated_points]
average_values = np.convolve(saliency_map, self.local_conv_map, mode="full")[: values.shape[0]]
a = np.arange(1, average_values.shape[0] + 1)
a = np.where(a > self.local_wind_sz, self.local_wind_sz, a)
average_values = (average_values / a)[:-1]
output_values = np.append(np.asarray([0.0]), (saliency_map[1:] - average_values) / (average_values + 1e-8))
return pd.DataFrame(output_values[-len(time_series) :], index=time_series.index)
def _train(self, train_data: pd.DataFrame, train_config=None) -> pd.DataFrame:
dim = train_data.shape[1]
if dim == 1:
self.config.target_seq_index = 0
elif self.target_seq_index is None:
raise RuntimeError(
f"Attempting to use the SR algorithm on a {dim}-variable "
f"time series, but didn't specify a `target_seq_index` "
f"indicating which univariate is the target."
)
assert 0 <= self.target_seq_index < dim, (
f"Expected `target_seq_index` to be between 0 and {dim} "
f"(the dimension of the transformed data), but got {self.target_seq_index}"
)
return self._get_anomaly_score(train_data) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/anomaly/spectral_residual.py | 0.903322 | 0.573021 | spectral_residual.py | pypi |
from abc import abstractmethod
import copy
import inspect
import logging
from typing import Any, Dict, List, Union
import pandas as pd
from scipy.stats import norm
from merlion.models.base import Config, ModelBase, MultipleTimeseriesModelMixin
from merlion.plot import Figure, MTSFigure
from merlion.post_process.calibrate import AnomScoreCalibrator
from merlion.post_process.factory import PostRuleFactory
from merlion.post_process.sequence import PostRuleSequence
from merlion.post_process.threshold import AggregateAlarms, Threshold
from merlion.utils import TimeSeries, UnivariateTimeSeries
from merlion.utils.misc import call_with_accepted_kwargs
logger = logging.getLogger(__name__)
class DetectorConfig(Config):
"""
Config object used to define an anomaly detection model.
"""
_default_threshold = AggregateAlarms(alm_threshold=3.0)
calibrator: AnomScoreCalibrator = None
threshold: Threshold = None
enable_calibrator: bool = True
enable_threshold: bool = True
def __init__(
self, max_score: float = 1000, threshold=None, enable_calibrator=True, enable_threshold=True, **kwargs
):
"""
Base class of the object used to configure an anomaly detection model.
:param max_score: maximum possible uncalibrated anomaly score
:param threshold: the rule to use for thresholding anomaly scores
:param enable_threshold: whether to enable the thresholding rule
when post-processing anomaly scores
:param enable_calibrator: whether to enable a calibrator which
automatically transforms all raw anomaly scores to be z-scores
(i.e. distributed as N(0, 1)).
"""
super().__init__(**kwargs)
self.enable_threshold = enable_threshold
self.enable_calibrator = enable_calibrator
self.calibrator = AnomScoreCalibrator(max_score=max_score)
if threshold is None:
self.threshold = copy.deepcopy(self._default_threshold)
elif isinstance(threshold, dict):
self.threshold = PostRuleFactory.create(**threshold)
else:
self.threshold = threshold
@property
def post_rule(self):
"""
:return: The full post-processing rule. Includes calibration if
``enable_calibrator`` is ``True``, followed by thresholding if
``enable_threshold`` is ``True``.
"""
rules = []
if self.enable_calibrator and self.calibrator is not None:
rules.append(self.calibrator)
if self.enable_threshold and self.threshold is not None:
rules.append(self.threshold)
return PostRuleSequence(rules)
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], return_unused_kwargs=False, calibrator=None, **kwargs):
# Get the calibrator, but we will set it manually after the constructor by putting it in kwargs
calibrator = config_dict.pop("calibrator", calibrator)
config, kwargs = super().from_dict(config_dict, return_unused_kwargs=True, **kwargs)
if calibrator is not None:
calibrator = PostRuleFactory.create(**calibrator)
config.calibrator = calibrator
if len(kwargs) > 0 and not return_unused_kwargs:
logger.warning(f"Unused kwargs: {kwargs}", stack_info=True)
elif return_unused_kwargs:
return config, kwargs
return config
class NoCalibrationDetectorConfig(DetectorConfig):
"""
Abstract config object for an anomaly detection model that should never
perform anomaly score calibration.
"""
def __init__(self, enable_calibrator=False, **kwargs):
"""
:param enable_calibrator: ``False`` because this config assumes calibrated outputs from the model.
"""
super().__init__(enable_calibrator=enable_calibrator, **kwargs)
@property
def calibrator(self):
"""
:return: ``None``
"""
return None
@calibrator.setter
def calibrator(self, calibrator):
# no-op
pass
@property
def enable_calibrator(self):
"""
:return: ``False``
"""
return False
@enable_calibrator.setter
def enable_calibrator(self, e):
if e is not False:
logger.warning(f"Tried to set enable_calibrator={e}, but only False supported for {type(self).__name__}.")
class DetectorBase(ModelBase):
"""
Base class for an anomaly detection model.
"""
config_class = DetectorConfig
def __init__(self, config: DetectorConfig):
"""
:param config: model configuration
"""
super().__init__(config)
@property
def _default_post_rule_train_config(self):
"""
:return: the default config to use when training the post-rule.
"""
from merlion.evaluate.anomaly import TSADMetric
t = self.config._default_threshold.alm_threshold
# self.calibrator is only None if calibration has been manually disabled
# and the anomaly scores are expected to be calibrated by get_anomaly_score(). If
# self.config.enable_calibrator, the model will return a calibrated score.
if self.calibrator is None or self.config.enable_calibrator or t == 0:
q = None
# otherwise, choose the quantile corresponding to the given threshold
else:
q = 2 * norm.cdf(t) - 1
return dict(metric=TSADMetric.F1, unsup_quantile=q)
@property
def threshold(self):
return self.config.threshold
@threshold.setter
def threshold(self, threshold):
self.config.threshold = threshold
@property
def calibrator(self):
return self.config.calibrator
@property
def post_rule(self):
return self.config.post_rule
def train(
self, train_data: TimeSeries, train_config=None, anomaly_labels: TimeSeries = None, post_rule_train_config=None
) -> TimeSeries:
"""
Trains the anomaly detector (unsupervised) and its post-rule (supervised, if labels are given) on train data.
:param train_data: a `TimeSeries` of metric values to train the model.
:param train_config: Additional training configs, if needed. Only required for some models.
:param anomaly_labels: a `TimeSeries` indicating which timestamps are anomalous. Optional.
:param post_rule_train_config: The config to use for training the model's post-rule. The model's default
post-rule train config is used if none is supplied here.
:return: A `TimeSeries` of the model's anomaly scores on the training data.
"""
if train_config is None:
train_config = copy.deepcopy(self._default_train_config)
train_data = self.train_pre_process(train_data)
train_data = train_data.to_pd() if self._pandas_train else train_data
train_result = call_with_accepted_kwargs( # For ensembles
self._train, train_data=train_data, train_config=train_config, anomaly_labels=anomaly_labels
)
return self.train_post_process(
train_result=train_result, anomaly_labels=anomaly_labels, post_rule_train_config=post_rule_train_config
)
def train_post_process(
self, train_result: Union[TimeSeries, pd.DataFrame], anomaly_labels=None, post_rule_train_config=None
) -> TimeSeries:
"""
Converts the train result (anom scores on train data) into a TimeSeries object and trains the post-rule.
:param train_result: Raw anomaly scores on the training data.
:param anomaly_labels: a `TimeSeries` indicating which timestamps are anomalous. Optional.
:param post_rule_train_config: The config to use for training the model's post-rule. The model's default
post-rule train config is used if none is supplied here.
"""
anomaly_scores = UnivariateTimeSeries.from_pd(train_result, name="anom_score").to_ts()
if self.post_rule is not None:
kwargs = copy.copy(self._default_post_rule_train_config)
if post_rule_train_config is not None:
kwargs.update(post_rule_train_config)
kwargs.update(anomaly_scores=anomaly_scores, anomaly_labels=anomaly_labels)
call_with_accepted_kwargs(self.post_rule.train, **kwargs)
return anomaly_scores
@abstractmethod
def _train(self, train_data: pd.DataFrame, train_config=None) -> pd.DataFrame:
raise NotImplementedError
@abstractmethod
def _get_anomaly_score(self, time_series: pd.DataFrame, time_series_prev: pd.DataFrame = None) -> pd.DataFrame:
raise NotImplementedError
def get_anomaly_score(self, time_series: TimeSeries, time_series_prev: TimeSeries = None) -> TimeSeries:
"""
Returns the model's predicted sequence of anomaly scores.
:param time_series: the `TimeSeries` we wish to predict anomaly scores
for.
:param time_series_prev: a `TimeSeries` immediately preceding
``time_series``. If given, we use it to initialize the time series
anomaly detection model. Otherwise, we assume that ``time_series``
immediately follows the training data.
:return: a univariate `TimeSeries` of anomaly scores
"""
# Ensure the dimensions are correct
assert (
time_series.dim == self.dim
), f"Expected time_series to have dimension {self.dim}, but got {time_series.dim}."
if time_series_prev is not None:
assert (
time_series_prev.dim == self.dim
), f"Expected time_series_prev to have dimension {self.dim}, but got {time_series_prev.dim}."
# Transform the time series
time_series, time_series_prev = self.transform_time_series(time_series, time_series_prev)
if self.require_univariate:
assert time_series.dim == 1, (
f"{type(self).__name__} model only accepts univariate time series, but time series "
f"(after transform {self.transform}) has dimension {time_series.dim}."
)
time_series = time_series.to_pd()
if time_series_prev is not None:
time_series_prev = time_series_prev.to_pd()
# Get the anomaly scores & ensure the dimensions are correct
anom_scores = self._get_anomaly_score(time_series, time_series_prev)
assert anom_scores.shape[1] == 1, f"Expected anomaly scores returned by {type(self)} to be univariate."
return UnivariateTimeSeries.from_pd(anom_scores, name="anom_score").to_ts()
def get_anomaly_label(self, time_series: TimeSeries, time_series_prev: TimeSeries = None) -> TimeSeries:
"""
Returns the model's predicted sequence of anomaly scores, processed
by any relevant post-rules (calibration and/or thresholding).
:param time_series: the `TimeSeries` we wish to predict anomaly scores
for.
:param time_series_prev: a `TimeSeries` immediately preceding
``time_series``. If given, we use it to initialize the time series
anomaly detection model. Otherwise, we assume that ``time_series``
immediately follows the training data.
:return: a univariate `TimeSeries` of anomaly scores, filtered by the
model's post-rule
"""
scores = self.get_anomaly_score(time_series, time_series_prev)
return self.post_rule(scores) if self.post_rule is not None else scores
def get_figure(
self,
time_series: TimeSeries,
time_series_prev: TimeSeries = None,
*,
filter_scores=True,
plot_time_series_prev=False,
fig: Figure = None,
**kwargs,
) -> Figure:
"""
:param time_series: The `TimeSeries` we wish to plot & predict anomaly scores for.
:param time_series_prev: a `TimeSeries` immediately preceding
``time_stamps``. If given, we use it to initialize the time series
model. Otherwise, we assume that ``time_stamps`` immediately follows
the training data.
:param filter_scores: whether to filter the anomaly scores by the
post-rule before plotting them.
:param plot_time_series_prev: whether to plot ``time_series_prev`` (and
the model's fit for it). Only used if ``time_series_prev`` is given.
:param fig: a `Figure` we might want to add anomaly scores onto.
:return: a `Figure` of the model's anomaly score predictions.
"""
f = self.get_anomaly_label if filter_scores else self.get_anomaly_score
scores = f(time_series, time_series_prev=time_series_prev, **kwargs)
scores = scores.univariates[scores.names[0]]
# Get the severity level associated with each value & convert things to
# numpy arrays as well
assert time_series.dim == 1, (
f"Plotting only supported for univariate time series, but got a"
f"time series of dimension {time_series.dim}"
)
time_series = time_series.univariates[time_series.names[0]]
if fig is None:
if time_series_prev is not None and plot_time_series_prev:
k = time_series_prev.names[0]
time_series_prev = time_series_prev.univariates[k]
elif not plot_time_series_prev:
time_series_prev = None
fig = Figure(y=time_series, y_prev=time_series_prev, anom=scores)
else:
if fig.y is None:
fig.y = time_series
fig.anom = scores
return fig
def plot_anomaly(
self,
time_series: TimeSeries,
time_series_prev: TimeSeries = None,
*,
filter_scores=True,
plot_time_series_prev=False,
figsize=(1000, 600),
ax=None,
):
"""
Plots the time series in matplotlib as a line graph, with points in the
series overlaid as points color-coded to indicate their severity as
anomalies.
:param time_series: The `TimeSeries` we wish to plot & predict anomaly scores for.
:param time_series_prev: a `TimeSeries` immediately preceding
``time_series``. Plotted as context if given.
:param filter_scores: whether to filter the anomaly scores by the
post-rule before plotting them.
:param plot_time_series_prev: whether to plot ``time_series_prev`` (and
the model's fit for it). Only used if ``time_series_prev`` is given.
:param figsize: figure size in pixels
:param ax: matplotlib axes to add this plot to
:return: matplotlib figure & axes
"""
metric_name = time_series.names[0]
title = f"{type(self).__name__}: Anomalies in {metric_name}"
fig = self.get_figure(
time_series=time_series,
time_series_prev=time_series_prev,
filter_scores=filter_scores,
plot_time_series_prev=plot_time_series_prev,
)
return fig.plot(title=title, figsize=figsize, ax=ax)
def plot_anomaly_plotly(
self,
time_series: TimeSeries,
time_series_prev: TimeSeries = None,
*,
filter_scores=True,
plot_time_series_prev=False,
figsize=None,
):
"""
Plots the time series in plotly as a line graph, with points in the
series overlaid as points color-coded to indicate their severity as
anomalies.
:param time_series: The `TimeSeries` we wish to plot & predict anomaly scores for.
:param time_series_prev: a `TimeSeries` immediately preceding
``time_series``. Plotted as context if given.
:param filter_scores: whether to filter the anomaly scores by the
post-rule before plotting them.
:param plot_time_series_prev: whether to plot ``time_series_prev`` (and
the model's fit for it). Only used if ``time_series_prev`` is given.
:param figsize: figure size in pixels
:return: plotly figure
"""
title = f"{type(self).__name__}: Anomalies in Time Series"
f = self.get_anomaly_label if filter_scores else self.get_anomaly_score
scores = f(time_series, time_series_prev=time_series_prev)
fig = MTSFigure(y=time_series, y_prev=time_series_prev, anom=scores)
return fig.plot_plotly(title=title, figsize=figsize)
class MultipleTimeseriesDetectorMixin(MultipleTimeseriesModelMixin):
"""
Abstract mixin for anomaly detectors supporting training on multiple time series.
"""
@abstractmethod
def train_multiple(
self,
multiple_train_data: List[TimeSeries],
train_config=None,
anomaly_labels: List[TimeSeries] = None,
post_rule_train_config=None,
) -> List[TimeSeries]:
"""
Trains the anomaly detector (unsupervised) and its post-rule
(supervised, if labels are given) on the input multiple time series.
:param multiple_train_data: a list of `TimeSeries` of metric values to train the model.
:param anomaly_labels: a list of `TimeSeries` indicating which timestamps are anomalous. Optional.
:param train_config: Additional training configs, if needed. Only required for some models.
:param post_rule_train_config: The config to use for training the
model's post-rule. The model's default post-rule train config is
used if none is supplied here.
:return: A list of `TimeSeries` of the model's anomaly scores on the training
data with each element corresponds to time series from ``multiple_train_data``.
"""
raise NotImplementedError | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/anomaly/base.py | 0.930146 | 0.338309 | base.py | pypi |
import bisect
import copy
import logging
from os.path import abspath, dirname, join, pathsep
import numpy as np
import pandas as pd
from py4j.java_gateway import JavaGateway
from merlion.models.anomaly.base import DetectorConfig, DetectorBase
from merlion.transform.moving_average import DifferenceTransform
from merlion.transform.sequence import TransformSequence
from merlion.transform.resample import Shingle
from merlion.post_process.threshold import AggregateAlarms
from merlion.utils import UnivariateTimeSeries, TimeSeries
from merlion.utils.resample import to_timestamp
logger = logging.getLogger(__name__)
class JVMSingleton:
_gateway = None
@classmethod
def gateway(cls):
resource_dir = join(dirname(dirname(dirname(abspath(__file__)))), "resources")
jars = ["gson-2.8.9.jar", "randomcutforest-core-1.0.jar", "randomcutforest-serialization-json-1.0.jar"]
classpath = pathsep.join(join(resource_dir, jar) for jar in jars)
if cls._gateway is None:
# --add-opens necessary to avoid exceptions in newer Java versions
javaopts = ["--add-opens=java.base/java.util=ALL-UNNAMED", "--add-opens=java.base/java.nio=ALL-UNNAMED"]
cls._gateway = JavaGateway.launch_gateway(classpath=classpath, javaopts=javaopts)
return cls._gateway
class RandomCutForestConfig(DetectorConfig):
"""
Configuration class for `RandomCutForest`. Refer to
https://github.com/aws/random-cut-forest-by-aws/tree/main/Java for
further documentation and defaults of the Java class.
"""
_default_transform = TransformSequence([DifferenceTransform(), Shingle(size=5, stride=1)])
def __init__(
self,
n_estimators: int = 100,
parallel: bool = False,
seed: int = None,
max_n_samples: int = 512,
thread_pool_size: int = 1,
online_updates: bool = False,
**kwargs
):
"""
:param n_estimators: The number of trees in this forest.
:param parallel: If true, then the forest will create an internal thread
pool. Forest updates and traversals will be submitted to this thread
pool, and individual trees will be updated or traversed in parallel.
For larger shingle sizes, dimensions, and number of trees,
parallelization may improve throughput.
We recommend users benchmark against their target use case.
:param seed: the random seed
:param max_n_samples: The number of samples retained by by stream
samplers in this forest.
:param thread_pool_size: The number of threads to use in the internal
thread pool.
:param online_updates: Whether to update the model while running
using it to evaluate new data.
"""
self.n_estimators = n_estimators
self.parallel = parallel
self.seed = seed
self.max_n_samples = max_n_samples
self.thread_pool_size = thread_pool_size
self.online_updates = online_updates
kwargs["max_score"] = np.floor(np.log2(max_n_samples)) + 1
super().__init__(**kwargs)
@property
def _default_threshold(self):
if not self.enable_calibrator:
return AggregateAlarms(alm_threshold=self.calibrator.max_score / 5)
return AggregateAlarms(alm_threshold=3.0)
@property
def java_params(self):
items = [
("numberOfTrees", self.n_estimators),
("randomSeed", self.seed),
("sampleSize", self.max_n_samples),
("threadPoolSize", self.thread_pool_size if self.parallel else None),
("parallelExecutionEnabled", self.parallel and self.thread_pool_size is not None),
]
return {k: v for k, v in items if v is not None}
class RandomCutForest(DetectorBase):
"""
The random cut forest is a refinement of the classic isolation forest
algorithm. It was proposed in `Guha et al. 2016 <https://proceedings.mlr.press/v48/guha16.pdf>`__.
"""
config_class = RandomCutForestConfig
def __init__(self, config: RandomCutForestConfig):
super().__init__(config)
self.forest = None
@property
def require_even_sampling(self) -> bool:
return False
@property
def require_univariate(self) -> bool:
return False
@property
def online_updates(self) -> bool:
return self.config.online_updates
def __getstate__(self):
# Copy state, remove forest, and then deepcopy
# (since we can't deepcopy the forest)
state = copy.copy(self.__dict__)
forest = state.pop("forest", None)
state = copy.deepcopy(state)
# Set the forest in the copied state to the serialized version
# The transform is specified the config, so don't save it
RCFSerDe = JVMSingleton.gateway().jvm.com.amazon.randomcutforest.serialize.RandomCutForestSerDe
state["forest"] = str(RCFSerDe().toJson(forest))
return state
def __setstate__(self, state):
# Remove the serialized forest from the state before setting it
# Set the forest manually after deserializing it
RCFSerDe = JVMSingleton.gateway().jvm.com.amazon.randomcutforest.serialize.RandomCutForestSerDe
forest = RCFSerDe().fromJson(state.pop("forest", None))
super().__setstate__(state)
self.forest = forest
def _forest_predict(self, data: np.ndarray, online_updates: bool):
scores = []
n, d = data.shape
gateway = JVMSingleton.gateway()
data_bytes = data.astype(dtype=">d").tobytes()
data_jarray = gateway.new_array(gateway.jvm.double, n * d)
gateway.jvm.java.nio.ByteBuffer.wrap(data_bytes).asDoubleBuffer().get(data_jarray)
for i in range(n):
jpoint = data_jarray[d * i : d * (i + 1)]
scores.append(self.forest.getAnomalyScore(jpoint))
if online_updates:
self.forest.update(jpoint)
return np.array(scores)
def _train(self, train_data: pd.DataFrame, train_config=None) -> pd.DataFrame:
times, train_values = train_data.index, train_data.values
# Initialize the RRCF, now that we know the dimension of the data
JRCF = JVMSingleton.gateway().jvm.com.amazon.randomcutforest.RandomCutForest
forest = JRCF.builder()
forest = forest.dimensions(train_data.shape[1])
for k, v in self.config.java_params.items():
forest = getattr(forest, k)(v)
self.forest = forest.build()
train_scores = self._forest_predict(train_values, online_updates=True)
return pd.DataFrame(train_scores, index=times, columns=["anom_score"])
def _get_anomaly_score(self, time_series: pd.DataFrame, time_series_prev: pd.DataFrame = None) -> pd.DataFrame:
if self.last_train_time is None:
raise RuntimeError("train() must be called before you can invoke get_anomaly_score()")
t0 = bisect.bisect_right(time_series.index, self.last_train_time)
if 0 < t0 < len(time_series):
old = self._forest_predict(time_series.values[:t0], False)
new = self._forest_predict(time_series.values[t0:], self.online_updates)
scores = np.concatenate((old, new))
else:
scores = self._forest_predict(time_series.values, self.online_updates and t0 > 0)
if self.online_updates and t0 > 0:
self.last_train_time = time_series.index[-1]
return pd.DataFrame(scores, index=time_series.index) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/anomaly/random_cut_forest.py | 0.786254 | 0.248728 | random_cut_forest.py | pypi |
try:
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
from typing import Sequence
import numpy as np
import pandas as pd
from merlion.models.base import NormalizingConfig
from merlion.models.anomaly.base import DetectorBase, DetectorConfig
from merlion.post_process.threshold import AggregateAlarms
from merlion.utils.misc import ProgressBar, initializer
from merlion.models.utils.rolling_window_dataset import RollingWindowDataset
class AutoEncoderConfig(DetectorConfig, NormalizingConfig):
"""
Configuration class for AutoEncoder. The normalization is inherited from `NormalizingConfig`.
The input data will be standardized automatically.
"""
_default_threshold = AggregateAlarms(alm_threshold=2.5, abs_score=True)
@initializer
def __init__(
self,
hidden_size: int = 5,
layer_sizes: Sequence[int] = (25, 10, 5),
sequence_len: int = 1,
lr: float = 1e-3,
batch_size: int = 512,
num_epochs: int = 50,
**kwargs
):
"""
:param hidden_size: The latent size
:param layer_sizes: The hidden layer sizes for the MLP encoder and decoder,
e.g., (25, 10, 5) for encoder and (5, 10, 25) for decoder
:param sequence_len: The input series length, e.g., input = [x(t-sequence_len+1)...,x(t-1),x(t)]
:param lr: The learning rate during training
:param batch_size: The batch size during training
:param num_epochs: The number of training epochs
"""
super().__init__(**kwargs)
class AutoEncoder(DetectorBase):
"""
The autoencoder-based multivariate time series anomaly detector.
This detector utilizes an autoencoder to infer the correlations between
different time series and estimate the joint distribution of the variables
for anomaly detection.
- paper: `Pierre Baldi. Autoencoders, Unsupervised Learning, and Deep Architectures. 2012.
<https://proceedings.mlr.press/v27/baldi12a.html>`_
"""
config_class = AutoEncoderConfig
@property
def require_even_sampling(self) -> bool:
return False
@property
def require_univariate(self) -> bool:
return False
def __init__(self, config: AutoEncoderConfig):
super().__init__(config)
self.hidden_size = config.hidden_size
self.layer_sizes = config.layer_sizes
self.k = config.sequence_len
self.lr = config.lr
self.batch_size = config.batch_size
self.num_epochs = config.num_epochs
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model = None
self.data_dim = None
def _build_model(self, dim):
model = AEModule(input_size=dim * self.k, hidden_size=self.hidden_size, layer_sizes=self.layer_sizes)
return model
def _train(self, train_data: pd.DataFrame, train_config=None):
self.model = self._build_model(train_data.shape[1]).to(self.device)
self.data_dim = train_data.shape[1]
loader = RollingWindowDataset(
train_data,
target_seq_index=None,
shuffle=True,
flatten=False,
n_past=self.k,
n_future=0,
batch_size=self.batch_size,
)
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
bar = ProgressBar(total=self.num_epochs)
self.model.train()
for epoch in range(self.num_epochs):
total_loss = 0
for i, (batch, _, _, _) in enumerate(loader):
batch = torch.tensor(batch, dtype=torch.float, device=self.device)
loss = self.model.loss(batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss
if bar is not None:
bar.print(epoch + 1, prefix="", suffix="Complete, Loss {:.4f}".format(total_loss / len(train_data)))
return self._get_anomaly_score(train_data)
def _get_anomaly_score(self, time_series: pd.DataFrame, time_series_prev: pd.DataFrame = None) -> pd.DataFrame:
self.model.eval()
ts = pd.concat((time_series_prev, time_series)) if time_series_prev is None else time_series
loader = RollingWindowDataset(
ts,
target_seq_index=None,
shuffle=False,
flatten=False,
n_past=self.k,
n_future=0,
batch_size=self.batch_size,
)
scores = []
for y, _, _, _ in loader:
y = torch.tensor(y, dtype=torch.float, device=self.device)
scores.append(self.model(y).cpu().data.numpy())
scores = np.concatenate([np.ones(self.k - 1) * scores[0][0], *scores])
return pd.DataFrame(scores[-len(time_series) :], index=time_series.index)
class AEModule(nn.Module):
"""
The autoencoder module where the encoder and decoder are both MLPs.
:meta private:
"""
def __init__(self, input_size, hidden_size, layer_sizes, activation=nn.ReLU, dropout_prob=0.0):
"""
:param input_size: The input dimension
:param hidden_size: The latent size of the autoencoder
:param layer_sizes: The hidden layer sizes for the encoder and decoder
:param activation: The activation function for the hidden layers
:param dropout_prob: The dropout rate
"""
super().__init__()
self.encoder = MLP(
input_size=input_size,
output_size=hidden_size,
layer_sizes=layer_sizes,
activation=activation,
last_layer_activation=activation,
dropout_prob=dropout_prob,
)
self.decoder = MLP(
input_size=hidden_size,
output_size=input_size,
layer_sizes=layer_sizes[::-1],
activation=activation,
last_layer_activation=nn.Identity,
dropout_prob=dropout_prob,
)
self.loss_func = nn.MSELoss()
def forward(self, x):
x = torch.flatten(x, start_dim=1)
y = self.decoder(self.encoder(x))
return torch.norm(x - y, dim=1)
def loss(self, x):
x = torch.flatten(x, start_dim=1)
y = self.decoder(self.encoder(x))
loss = self.loss_func(y, x)
return loss
class MLP(nn.Module):
"""
The MLP module used in the encoder and decoder
:meta private:
"""
def __init__(self, input_size, output_size, layer_sizes, activation, last_layer_activation, dropout_prob):
"""
:param input_size: The input dimension
:param output_size: The output dimension
:param layer_sizes: The hidden layer sizes
:param activation: The activation function for the hidden layers
:param last_layer_activation: The activation function for the last layer
:param dropout_prob: The dropout rate
"""
super().__init__()
layers, layer_sizes = [], [input_size] + list(layer_sizes)
for i in range(1, len(layer_sizes)):
layers.append(nn.Linear(layer_sizes[i - 1], layer_sizes[i]))
layers.append(activation())
layers.append(nn.Dropout(p=dropout_prob))
layers.append(nn.Linear(layer_sizes[-1], output_size))
layers.append(last_layer_activation())
self.layers = torch.nn.Sequential(*layers)
def forward(self, x):
return self.layers(x) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/anomaly/autoencoder.py | 0.94252 | 0.531817 | autoencoder.py | pypi |
import copy
import random
from typing import List
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
import numpy as np
import pandas as pd
from merlion.utils import TimeSeries
from merlion.models.base import NormalizingConfig
from merlion.models.anomaly.base import DetectorBase, DetectorConfig, MultipleTimeseriesDetectorMixin
from merlion.post_process.threshold import AggregateAlarms
from merlion.utils.misc import ProgressBar, initializer
from merlion.models.utils.rolling_window_dataset import RollingWindowDataset
class DAGMMConfig(DetectorConfig, NormalizingConfig):
"""
Configuration class for DAGMM. The normalization is inherited from `NormalizingConfig`.
The input data will be standardized automatically.
"""
_default_threshold = AggregateAlarms(alm_threshold=2.5, abs_score=True)
@initializer
def __init__(
self,
gmm_k: int = 3,
hidden_size: int = 5,
sequence_len: int = 1,
lambda_energy: float = 0.1,
lambda_cov_diag: float = 0.005,
lr: float = 1e-3,
batch_size: int = 256,
num_epochs: int = 10,
**kwargs
):
"""
:param gmm_k: The number of Gaussian distributions
:param hidden_size: The hidden size of the autoencoder module in DAGMM
:param sequence_len: The input series length, e.g., input = [x(t-sequence_len+1)...,x(t-1),x(t)]
:param lambda_energy: The regularization weight for the energy term
:param lambda_cov_diag: The regularization weight for the covariance diagonal entries
:param lr: The learning rate during training
:param batch_size: The batch size during training
:param num_epochs: The number of training epochs
"""
super().__init__(**kwargs)
class DAGMM(DetectorBase, MultipleTimeseriesDetectorMixin):
"""
Deep autoencoding Gaussian mixture model for anomaly detection (DAGMM).
DAGMM combines an autoencoder with a Gaussian mixture model to model the distribution
of the reconstruction errors. DAGMM jointly optimizes the parameters of the deep autoencoder
and the mixture model simultaneously in an end-to-end fashion.
- paper: `Bo Zong, Qi Song, Martin Renqiang Min, Wei Cheng, Cristian Lumezanu, Daeki Cho and Haifeng Chen.
Deep Autoencoding Gaussian Mixture Model for Unsupervised Anomaly Detection. 2018.
<https://openreview.net/forum?id=BJJLHbb0->`_.
"""
config_class = DAGMMConfig
def __init__(self, config: DAGMMConfig):
super().__init__(config)
self.gmm_k = config.gmm_k
self.hidden_size = config.hidden_size
self.sequence_length = config.sequence_len
self.lambda_energy = config.lambda_energy
self.lambda_cov_diag = config.lambda_cov_diag
self.lr = config.lr
self.batch_size = config.batch_size
self.num_epochs = config.num_epochs
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.data_dim = -1
self.dagmm, self.optimizer = None, None
self.train_energy, self._threshold = None, None
@property
def require_even_sampling(self) -> bool:
return False
@property
def require_univariate(self) -> bool:
return False
@property
def _default_train_config(self):
return dict()
def _build_model(self, dim):
hidden_size = self.hidden_size + int(dim / 20)
dagmm = DAGMMModule(
autoencoder=AEModule(n_features=dim, sequence_length=self.sequence_length, hidden_size=hidden_size),
n_gmm=self.gmm_k,
latent_dim=hidden_size + 2,
device=self.device,
)
return dagmm
def _step(self, input_data, max_grad_norm=5):
enc, dec, z, gamma = self.dagmm(input_data)
total_loss, sample_energy, recon_error, cov_diag = self.dagmm.loss_func(
x=input_data,
recon_x=dec,
z=z,
gamma=gamma,
lambda_energy=self.lambda_energy,
lambda_cov_diag=self.lambda_cov_diag,
)
self.optimizer.zero_grad()
total_loss = torch.clamp(total_loss, max=1e7)
total_loss.backward()
torch.nn.utils.clip_grad_norm_(self.dagmm.parameters(), max_grad_norm)
self.optimizer.step()
return total_loss, sample_energy, recon_error, cov_diag
def _train(self, train_data: pd.DataFrame, train_config=None):
data_loader = RollingWindowDataset(
train_data,
target_seq_index=None,
shuffle=True,
flatten=False,
n_past=self.sequence_length,
n_future=0,
batch_size=self.batch_size,
)
if self.dagmm is None and self.optimizer is None:
self.dagmm = self._build_model(train_data.shape[1]).to(self.device)
self.optimizer = torch.optim.Adam(self.dagmm.parameters(), lr=self.lr)
self.dagmm.train()
self.data_dim = train_data.shape[1]
bar = ProgressBar(total=self.num_epochs)
for epoch in range(self.num_epochs):
total_loss, recon_error = 0, 0
for input_data, _, _, _ in data_loader:
input_data = torch.tensor(input_data, dtype=torch.float, device=self.device)
loss, _, error, _ = self._step(input_data)
total_loss += loss
recon_error += error
if bar is not None:
bar.print(
epoch + 1,
prefix="",
suffix="Complete, Loss {:.4f}, Recon_error: {:.4f}".format(
total_loss / len(data_loader), recon_error / len(data_loader)
),
)
return self._get_anomaly_score(train_data)
def _get_anomaly_score(self, time_series: pd.DataFrame, time_series_prev: pd.DataFrame = None) -> pd.DataFrame:
self.dagmm.eval()
ts = pd.concat((time_series_prev, time_series)) if time_series_prev is None else time_series
data_loader = RollingWindowDataset(
ts,
target_seq_index=None,
shuffle=False,
flatten=False,
n_past=self.sequence_length,
n_future=0,
batch_size=1,
)
test_energy = np.full((self.sequence_length, ts.shape[0]), np.nan)
for i, (sequence, _, _, _) in enumerate(data_loader):
sequence = torch.tensor(sequence, dtype=torch.float, device=self.device)
enc, dec, z, gamma = self.dagmm(sequence.float())
sample_energy, _ = self.dagmm.compute_energy(z, size_average=False)
idx = (i % self.sequence_length, np.arange(i, i + self.sequence_length))
test_energy[idx] = sample_energy.cpu().data.numpy()
test_energy = np.nanmean(test_energy, axis=0)
return pd.DataFrame(test_energy[-len(time_series) :], index=time_series.index)
def train_multiple(
self,
multiple_train_data: List[TimeSeries],
train_config=None,
anomaly_labels: List[TimeSeries] = None,
post_rule_train_config=None,
) -> List[TimeSeries]:
"""
Trains the anomaly detector (unsupervised) and its post-rule
(supervised, if labels are given) on the input multiple time series.
:param multiple_train_data: a list of `TimeSeries` of metric values to train the model.
:param train_config: Additional training config dict with keys:
* | "n_epochs": ``int`` indicating how many times the model must be
| trained on the timeseries in ``multiple_train_data``. Defaults to 1.
* | "shuffle": ``bool`` indicating if the ``multiple_train_data`` collection
| should be shuffled before every epoch. Defaults to True if "n_epochs" > 1.
:param anomaly_labels: a list of `TimeSeries` indicating which timestamps are anomalous. Optional.
:param post_rule_train_config: The config to use for training the
model's post-rule. The model's default post-rule train config is
used if none is supplied here.
:return: A list of `TimeSeries` of the model's anomaly scores on the training
data with each element corresponds to time series from ``multiple_train_data``.
"""
if train_config is None:
train_config = copy.deepcopy(self._default_train_config)
n_epochs = train_config.pop("n_epochs", 1)
shuffle = train_config.pop("shuffle", n_epochs > 1)
if anomaly_labels is not None:
assert len(multiple_train_data) == len(anomaly_labels)
else:
anomaly_labels = [None] * len(multiple_train_data)
train_scores_list = []
for _ in range(n_epochs):
if shuffle:
random.shuffle(multiple_train_data)
for train_data, anomaly_series in zip(multiple_train_data, anomaly_labels):
train_scores_list.append(
self.train(
train_data=train_data,
train_config=train_config,
anomaly_labels=anomaly_series,
post_rule_train_config=post_rule_train_config
# FIXME: the post-rule (calibrator and threshold) is trained individually on each time series
# but ideally it needs to be re-trained on all of the `train_scores_list`
)
)
return train_scores_list
class AEModule(nn.Module):
"""
The autoencoder module used in DAGMM.
:meta private:
"""
def __init__(self, n_features, sequence_length, hidden_size, activation=nn.Tanh):
"""
:param n_features: The number of the input features (number of variables)
:param sequence_length: The length of the input sequence
:param hidden_size: The latent size
:param activation: The activation function for the hidden layers
"""
super().__init__()
input_length = n_features * sequence_length
dec_steps = 2 ** np.arange(max(np.ceil(np.log2(hidden_size)), 2), np.log2(input_length))[1:]
dec_setup = np.concatenate([[hidden_size], dec_steps.repeat(2), [input_length]])
enc_setup = dec_setup[::-1]
layers = np.array([[nn.Linear(int(a), int(b)), activation()] for a, b in enc_setup.reshape(-1, 2)])
self.encoder = nn.Sequential(*layers.flatten()[:-1])
layers = np.array([[nn.Linear(int(a), int(b)), activation()] for a, b in dec_setup.reshape(-1, 2)])
self.decoder = nn.Sequential(*layers.flatten()[:-1])
def forward(self, x, return_latent=False):
enc = self.encoder(x.view(x.shape[0], -1).float())
dec = self.decoder(enc)
recon_x = dec.view(x.shape)
return (recon_x, enc) if return_latent else recon_x
class DAGMMModule(nn.Module):
"""
The DAGMM module used in the DAGMM detector.
:meta private:
"""
def __init__(self, autoencoder, n_gmm, latent_dim, device):
"""
:param autoencoder: The autoencoder model
:param n_gmm: The number of Gaussian mixtures
:param latent_dim: The latent dimension
:param device: CUDA or CPU
"""
super(DAGMMModule, self).__init__()
self.add_module("autoencoder", autoencoder)
self.device = device
self.estimation = nn.Sequential(
*[nn.Linear(latent_dim, 10), nn.Tanh(), nn.Linear(10, n_gmm), nn.Softmax(dim=1)]
)
self.register_buffer("phi", torch.zeros(n_gmm))
self.register_buffer("mu", torch.zeros(n_gmm, latent_dim))
self.register_buffer("cov", torch.zeros(n_gmm, latent_dim, latent_dim))
@staticmethod
def relative_euclidean_distance(a, b, dim=1):
return (a - b).norm(2, dim=dim) / torch.clamp(a.norm(2, dim=dim), min=1e-10)
def forward(self, x):
dec, enc = self.autoencoder(x, return_latent=True)
a, b = x.view(x.shape[0], -1), dec.view(dec.shape[0], -1)
cos_distance = F.cosine_similarity(a, b, dim=1).unsqueeze(-1)
euc_distance = DAGMMModule.relative_euclidean_distance(a, b, dim=1).unsqueeze(-1)
z = torch.cat([enc, euc_distance, cos_distance], dim=1)
return enc, dec, z, self.estimation(z)
def compute_gmms(self, z, gamma):
# weights
sum_gamma = torch.sum(gamma, dim=0)
phi = sum_gamma / gamma.shape[0]
# means and covariances
mu = torch.sum(gamma.unsqueeze(-1) * z.unsqueeze(1), dim=0) / sum_gamma.unsqueeze(-1)
z_mu = z.unsqueeze(1) - mu.unsqueeze(0)
z_mu_outer = z_mu.unsqueeze(-1) * z_mu.unsqueeze(-2)
cov = torch.sum(gamma.unsqueeze(-1).unsqueeze(-1) * z_mu_outer, dim=0) / sum_gamma.unsqueeze(-1).unsqueeze(-1)
# store these values for prediction
self.phi, self.mu, self.cov = phi.data, mu.data, cov.data
return phi, mu, cov
def compute_energy(self, z, phi=None, mu=None, cov=None, size_average=True, eps=1e-6):
phi = self.phi if phi is None else phi
mu = self.mu if mu is None else mu
cov = self.cov if cov is None else cov
cov_inv, cov_det, cov_diag = [], [], 0
for i in range(cov.shape[0]):
cov_k = cov[i] + torch.eye(cov.shape[1], device=self.device) * eps
inv_k = torch.tensor(np.linalg.pinv(cov_k.cpu().data.numpy()), dtype=torch.float, device=self.device)
cov_inv.append(inv_k.unsqueeze(0))
eigenvalues = np.linalg.eigvals(cov_k.data.cpu().numpy() * (2 * np.pi))
determinant = np.prod(np.clip(eigenvalues, a_min=eps, a_max=None))
cov_det.append(determinant)
cov_diag += torch.sum(1.0 / cov_k.diag())
z_mu = z.unsqueeze(1) - mu.unsqueeze(0)
cov_inv = torch.cat(cov_inv, dim=0)
cov_det = torch.tensor(cov_det, dtype=torch.float, device=self.device)
exp_term_tmp = -0.5 * torch.sum(torch.sum(z_mu.unsqueeze(-1) * cov_inv.unsqueeze(0), dim=-2) * z_mu, dim=-1)
max_val = torch.max(exp_term_tmp.clamp(min=0), dim=1, keepdim=True)[0]
exp_term = torch.exp(exp_term_tmp - max_val)
sample_energy = -max_val.squeeze() - torch.log(
torch.sum(phi.unsqueeze(0) * exp_term / (torch.sqrt(cov_det) + eps).unsqueeze(0), dim=1) + eps
)
if size_average:
sample_energy = torch.mean(sample_energy)
return sample_energy, cov_diag
def loss_func(self, x, recon_x, z, gamma, lambda_energy, lambda_cov_diag):
recon_error = torch.mean((x.view(*recon_x.shape) - recon_x) ** 2)
phi, mu, cov = self.compute_gmms(z, gamma)
sample_energy, cov_diag = self.compute_energy(z, phi, mu, cov)
loss = recon_error + lambda_energy * sample_energy + lambda_cov_diag * cov_diag
return loss, sample_energy, recon_error, cov_diag | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/anomaly/dagmm.py | 0.916573 | 0.49469 | dagmm.py | pypi |
from math import log
from typing import List
import numpy as np
import pandas as pd
from merlion.models.base import NormalizingConfig
from merlion.models.anomaly.base import DetectorBase, DetectorConfig
from merlion.transform.base import Identity
from merlion.transform.moving_average import LagTransform
from merlion.transform.normalize import MeanVarNormalize
from merlion.transform.sequence import TransformSequence, TransformStack
from merlion.transform.resample import TemporalResample
from merlion.utils import TimeSeries, UnivariateTimeSeries
class ZMSConfig(DetectorConfig, NormalizingConfig):
"""
Configuration class for `ZMS` anomaly detection model. The transform of this config is actually a
pre-processing step, followed by the desired number of lag transforms, and a final mean/variance
normalization step. This full transform may be accessed as `ZMSConfig.full_transform`. Note that
the normalization is inherited from `NormalizingConfig`.
"""
_default_transform = TemporalResample(trainable_granularity=True)
def __init__(self, base: int = 2, n_lags: int = None, lag_inflation: float = 1.0, **kwargs):
r"""
:param base: The base to use for computing exponentially distant lags.
:param n_lags: The number of lags to be used. If None, n_lags will be
chosen later as the maximum number of lags possible for the initial
training set.
:param lag_inflation: See math below for the precise mathematical role of
the lag inflation. Consider the lag inflation a measure of distrust
toward higher lags, If ``lag_inflation`` > 1, the higher the lag
inflation, the less likely the model is to select a higher lag's z-score
as the anomaly score.
.. math::
\begin{align*}
\text{Let } \space z_k(x_t) \text{ be the z-score of the } & k\text{-lag at } t, \space \Delta_k(x_t)
\text{ and } p \text{ be the lag inflation} \\
& \\
\text{the anomaly score } z(x_t) & = z_{k^*}(x_t) \\
\text{where } k^* & = \text{argmax}_k \space | z_k(x_t) | / k^p
\end{align*}
"""
assert lag_inflation >= 0.0
self.base = base
self.n_lags = n_lags
self.lag_inflation = lag_inflation
super().__init__(**kwargs)
@property
def full_transform(self):
"""
Returns the full transform, including the pre-processing step, lags, and
final mean/variance normalization.
"""
return TransformSequence([self.transform, self.lags, self.normalize])
def to_dict(self, _skipped_keys=None):
# self.lags isn't trainable & is set automatically via n_lags
_skipped_keys = _skipped_keys if _skipped_keys is not None else set()
return super().to_dict(_skipped_keys.union({"lags"}))
@property
def n_lags(self):
return self._n_lags
@n_lags.setter
def n_lags(self, n: int):
"""
Set the number of lags. Also resets the mean/var normalization, since
the output dimension (number of lags) will change.
"""
self._n_lags = n
lags = [LagTransform(self.base**k, pad=True) for k in range(n)] if n is not None else []
self.lags = TransformStack([Identity(), *lags])
self.normalize = MeanVarNormalize()
class ZMS(DetectorBase):
r"""
Multiple Z-Score based Anomaly Detector.
ZMS is designed to detect spikes, dips, sharp trend changes (up or down)
relative to historical data. Anomaly scores capture not only magnitude
but also direction. This lets one distinguish between positive (spike)
negative (dip) anomalies for example.
The algorithm builds models of normalcy at multiple exponentially-growing
time scales. The zeroth order model is just a model of the values seen
recently. The kth order model is similar except that it models not
values, but rather their k-lags, defined as x(t)-x(t-k), for k in
1, 2, 4, 8, 16, etc. The algorithm assigns the maximum absolute z-score
of all the models of normalcy as the overall anomaly score.
.. math::
\begin{align*}
\text{Let } \space z_k(x_t) \text{ be the z-score of the } & k\text{-lag at } t, \space \Delta_k(x_t)
\text{ and } p \text{ be the lag inflation} \\
& \\
\text{the anomaly score } z(x_t) & = z_{k^*}(x_t) \\
\text{where } k^* & = \text{argmax}_k \space | z_k(x_t) | / k^p
\end{align*}
"""
config_class = ZMSConfig
@property
def require_even_sampling(self) -> bool:
return False
@property
def require_univariate(self) -> bool:
return False
@property
def n_lags(self):
return self.config.n_lags
@n_lags.setter
def n_lags(self, n_lags):
self.config.n_lags = n_lags
@property
def lag_scales(self) -> List[int]:
return [lag.k for lag in self.config.lags.transforms[1:]]
@property
def lag_inflation(self):
return self.config.lag_inflation
@property
def adjust_z_scores(self) -> bool:
return self.lag_inflation > 0.0 and len(self.lag_scales) > 1
def train(
self, train_data: TimeSeries, train_config=None, anomaly_labels: TimeSeries = None, post_rule_train_config=None
) -> TimeSeries:
if self.n_lags is None:
self.n_lags = int(log(len(train_data), self.config.base))
return super().train(train_data, train_config, anomaly_labels, post_rule_train_config)
def _train(self, train_data: pd.DataFrame, train_config=None) -> pd.DataFrame:
return self._get_anomaly_score(train_data)
def _get_anomaly_score(self, time_series: pd.DataFrame, time_series_prev: pd.DataFrame = None) -> pd.DataFrame:
z_scores = time_series.values
if self.adjust_z_scores:
# choose z-score according to adjusted z-scores
adjusted_z_scores = np.hstack(
(z_scores[:, 0:1], z_scores[:, 1:] / (np.asarray(self.lag_scales) ** self.lag_inflation))
)
lag_args = np.argmax(adjusted_z_scores, axis=1)
scores = [z_scores[(i, a)] for i, a in enumerate(lag_args)]
else:
scores = np.nanmax(np.abs(z_scores), axis=1)
return pd.DataFrame(scores, index=time_series.index, columns=["anom_score"]) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/anomaly/zms.py | 0.925348 | 0.640847 | zms.py | pypi |
from typing import Sequence
import numpy as np
import pandas as pd
try:
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
from merlion.models.base import NormalizingConfig
from merlion.models.anomaly.base import DetectorBase, DetectorConfig
from merlion.post_process.threshold import AggregateAlarms
from merlion.utils.misc import ProgressBar, initializer
from merlion.models.utils.rolling_window_dataset import RollingWindowDataset
class VAEConfig(DetectorConfig, NormalizingConfig):
"""
Configuration class for VAE. The normalization is inherited from `NormalizingConfig`.
The input data will be standardized automatically.
"""
_default_threshold = AggregateAlarms(alm_threshold=2.5, abs_score=True)
@initializer
def __init__(
self,
encoder_hidden_sizes: Sequence[int] = (25, 10, 5),
decoder_hidden_sizes: Sequence[int] = (5, 10, 25),
latent_size: int = 5,
sequence_len: int = 1,
kld_weight: float = 1.0,
dropout_rate: float = 0.0,
num_eval_samples: int = 10,
lr: float = 1e-3,
batch_size: int = 1024,
num_epochs: int = 10,
**kwargs
):
"""
:param encoder_hidden_sizes: The hidden layer sizes of the MLP encoder
:param decoder_hidden_sizes: The hidden layer sizes of the MLP decoder
:param latent_size: The latent size
:param sequence_len: The input series length, e.g., input = [x(t-sequence_len+1)...,x(t-1),x(t)]
:param kld_weight: The regularization weight for the KL divergence term
:param dropout_rate: The dropout rate for the encoder and decoder
:param num_eval_samples: The number of sampled latent variables during prediction
:param lr: The learning rate during training
:param batch_size: The batch size during training
:param num_epochs: The number of training epochs
"""
super().__init__(**kwargs)
class VAE(DetectorBase):
"""
The VAE-based multivariate time series anomaly detector.
This detector utilizes a variational autoencoder to infer the correlations between
different time series and estimate the distribution of the reconstruction errors
for anomaly detection.
- paper: `Diederik P Kingma and Max Welling. Auto-Encoding Variational Bayes. 2013.
<https://arxiv.org/abs/1312.6114>`_
"""
config_class = VAEConfig
def __init__(self, config: VAEConfig):
super().__init__(config)
self.k = config.sequence_len
self.encoder_hidden_sizes = config.encoder_hidden_sizes
self.decoder_hidden_sizes = config.decoder_hidden_sizes
self.latent_size = config.latent_size
self.activation = nn.ReLU
self.kld_weight = config.kld_weight
self.dropout_rate = config.dropout_rate
self.num_eval_samples = config.num_eval_samples
self.batch_size = config.batch_size
self.num_epochs = config.num_epochs
self.lr = config.lr
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model = None
self.data_dim = None
@property
def require_even_sampling(self) -> bool:
return False
@property
def require_univariate(self) -> bool:
return False
def _build_model(self, dim):
model = CVAE(
x_dim=dim * self.k,
c_dim=0,
encoder_hidden_sizes=self.encoder_hidden_sizes,
decoder_hidden_sizes=self.decoder_hidden_sizes,
latent_size=self.latent_size,
dropout_rate=self.dropout_rate,
activation=self.activation,
)
return model
def _train(self, train_data: pd.DataFrame, train_config=None) -> pd.DataFrame:
self.model = self._build_model(train_data.shape[1]).to(self.device)
self.data_dim = train_data.shape[1]
loader = RollingWindowDataset(
train_data,
target_seq_index=None,
shuffle=True,
flatten=True,
n_past=self.k,
n_future=0,
batch_size=self.batch_size,
)
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
loss_func = nn.MSELoss()
bar = ProgressBar(total=self.num_epochs)
self.model.train()
for epoch in range(self.num_epochs):
total_loss = 0
for i, (batch, _, _, _) in enumerate(loader):
x = torch.tensor(batch, dtype=torch.float, device=self.device)
recon_x, mu, log_var, _ = self.model(x, None)
recon_loss = loss_func(x, recon_x)
kld_loss = -0.5 * torch.mean(torch.sum(1 + log_var - mu**2 - log_var.exp(), dim=1), dim=0)
loss = recon_loss + kld_loss * self.kld_weight
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss
if bar is not None:
bar.print(epoch + 1, prefix="", suffix="Complete, Loss {:.4f}".format(total_loss / len(train_data)))
return self._get_anomaly_score(train_data)
def _get_anomaly_score(self, time_series: pd.DataFrame, time_series_prev: pd.DataFrame = None) -> pd.DataFrame:
self.model.eval()
ts = pd.concat((time_series_prev, time_series)) if time_series_prev is None else time_series
loader = RollingWindowDataset(
ts,
target_seq_index=None,
shuffle=False,
flatten=True,
n_past=self.k,
n_future=0,
batch_size=self.batch_size,
)
ys, rs = [], []
for y, _, _, _ in loader:
ys.append(y)
y = torch.tensor(y, dtype=torch.float, device=self.device)
r = np.zeros(y.shape)
for _ in range(self.num_eval_samples):
recon_y, _, _, _ = self.model(y, None)
r += recon_y.cpu().data.numpy()
r /= self.num_eval_samples
rs.append(r)
scores = np.zeros((ts.shape[0],), dtype=float)
test_scores = np.sum(np.abs(np.concatenate(rs) - np.concatenate(ys)), axis=1)
scores[self.k - 1 :] = test_scores
scores[: self.k - 1] = test_scores[0]
return pd.DataFrame(scores[-len(time_series) :], index=time_series.index)
class CVAE(nn.Module):
"""
Conditional variational autoencoder.
- paper: `Kihyuk Sohn and Honglak Lee and Xinchen Yan.
Learning Structured Output Representation using Deep Conditional Generative Models. 2015.
<https://papers.nips.cc/paper/2015/hash/8d55a249e6baa5c06772297520da2051-Abstract.html>`_
:meta private:
"""
def __init__(
self,
x_dim,
c_dim,
encoder_hidden_sizes,
decoder_hidden_sizes,
latent_size,
dropout_rate=0.0,
activation=nn.ReLU,
):
"""
:param x_dim: The input variable dimension
:param c_dim: The conditioned variable dimension
:param encoder_hidden_sizes: The hidden layer sizes for the encoder
:param decoder_hidden_sizes: The hidden layer sizes for the decoder
:param latent_size: The latent size for both the encoder and decoder
:param dropout_rate: The dropout rate
:param activation: The activation functions for the hidden layers
"""
super().__init__()
self.encoder = Encoder(x_dim, c_dim, encoder_hidden_sizes, latent_size, dropout_rate, activation)
self.decoder = Decoder(x_dim, c_dim, decoder_hidden_sizes, latent_size, dropout_rate, activation)
def forward(self, x, c):
means, log_var = self.encoder(x, c)
z = self.reparameterize(means, log_var)
recon_x = self.decoder(z, c)
return recon_x, means, log_var, z
def reparameterize(self, mu, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return mu + eps * std
def inference(self, z, c=None):
return self.decoder(z, c)
class Encoder(nn.Module):
"""
The encoder for the conditional VAE model
:meta private:
"""
def __init__(self, x_dim, c_dim, hidden_sizes, latent_size, dropout_rate, activation):
"""
:param x_dim: The input variable dimension
:param c_dim: The conditioned variable dimension
:param hidden_sizes: The hidden layer sizes
:param latent_size: The latent size
:param dropout_rate: The dropout rate
:param activation: The activation function for the hidden layers
"""
super().__init__()
assert len(hidden_sizes) > 0, "hidden sizes cannot be empty"
self.mlp = build_hidden_layers(x_dim + c_dim, hidden_sizes, dropout_rate, activation)
self.linear_means = nn.Linear(hidden_sizes[-1], latent_size)
self.linear_vars = nn.Linear(hidden_sizes[-1], latent_size)
self._init_log_var_weights()
def _init_log_var_weights(self):
torch.nn.init.uniform_(self.linear_vars.weight, -0.01, 0.01)
torch.nn.init.constant_(self.linear_vars.bias, 0)
def forward(self, x, c):
if c is not None:
x = torch.cat([x, c], dim=-1)
x = self.mlp(x)
means = self.linear_means(x)
log_vars = self.linear_vars(x)
return means, log_vars
class Decoder(nn.Module):
"""
The decoder for the conditional VAE model
:meta private:
"""
def __init__(self, x_dim, c_dim, hidden_sizes, latent_size, dropout_rate, activation):
"""
:param x_dim: The input variable dimension
:param c_dim: The conditioned variable dimension
:param hidden_sizes: The hidden layer sizes
:param latent_size: The latent size
:param dropout_rate: The dropout rate
:param activation: The activation function for the hidden layers
"""
super().__init__()
assert len(hidden_sizes) > 0, "hidden sizes cannot be empty"
self.mlp = build_hidden_layers(latent_size + c_dim, hidden_sizes, dropout_rate, activation)
self.output_layer = nn.Linear(hidden_sizes[-1], x_dim)
def forward(self, z, c):
if c is not None:
z = torch.cat([z, c], dim=-1)
return self.output_layer(self.mlp(z))
def build_hidden_layers(input_size, hidden_sizes, dropout_rate, activation):
"""
:meta private:
"""
hidden_layers = []
for i in range(len(hidden_sizes)):
s = input_size if i == 0 else hidden_sizes[i - 1]
hidden_layers.append(nn.Linear(s, hidden_sizes[i]))
hidden_layers.append(activation())
hidden_layers.append(nn.Dropout(dropout_rate))
return torch.nn.Sequential(*hidden_layers) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/anomaly/vae.py | 0.955651 | 0.442335 | vae.py | pypi |
from typing import Sequence
import numpy as np
import pandas as pd
try:
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
from merlion.models.base import NormalizingConfig
from merlion.models.anomaly.base import DetectorBase, DetectorConfig
from merlion.post_process.threshold import AggregateAlarms
from merlion.utils.misc import ProgressBar, initializer
from merlion.models.utils.rolling_window_dataset import RollingWindowDataset
class LSTMEDConfig(DetectorConfig, NormalizingConfig):
"""
Configuration class for LSTM-encoder-decoder. The normalization is inherited from `NormalizingConfig`.
The input data will be standardized automatically.
"""
_default_threshold = AggregateAlarms(alm_threshold=2.5, abs_score=True)
@initializer
def __init__(
self,
hidden_size: int = 5,
sequence_len: int = 20,
n_layers: Sequence[int] = (1, 1),
dropout: Sequence[int] = (0, 0),
lr: float = 1e-3,
batch_size: int = 256,
num_epochs: int = 10,
**kwargs
):
"""
:param hidden_size: The hidden state size of the LSTM modules
:param sequence_len: The input series length, e.g., input = [x(t-sequence_len+1)...,x(t-1),x(t)]
:param n_layers: The number of layers for the LSTM encoder and decoder. ``n_layer`` has two values, i.e.,
``n_layer[0]`` is the number of encoder layers and ``n_layer[1]`` is the number of decoder layers.
:param dropout: The dropout rate for the LSTM encoder and decoder. ``dropout`` has two values, i.e.,
``dropout[0]`` is the dropout rate for the encoder and ``dropout[1]`` is the dropout rate for the decoder.
:param lr: The learning rate during training
:param batch_size: The batch size during training
:param num_epochs: The number of training epochs
"""
super().__init__(**kwargs)
class LSTMED(DetectorBase):
"""
The LSTM-encoder-decoder-based multivariate time series anomaly detector.
The time series representation is modeled by an encoder-decoder network where
both encoder and decoder are LSTMs. The distribution of the reconstruction error
is estimated for anomaly detection.
"""
config_class = LSTMEDConfig
def __init__(self, config: LSTMEDConfig):
super().__init__(config)
self.num_epochs = config.num_epochs
self.batch_size = config.batch_size
self.lr = config.lr
self.hidden_size = config.hidden_size
self.sequence_length = config.sequence_len
self.n_layers = config.n_layers
self.dropout = config.dropout
assert (
len(self.n_layers) == 2
), "Param n_layers should contain two values: (num_layers for LSTM encoder, num_layers for LSTM decoder)"
assert len(self.n_layers) == len(self.dropout), "Param dropout should contain two values"
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.lstmed = None
self.data_dim = None
@property
def require_even_sampling(self) -> bool:
return False
@property
def require_univariate(self) -> bool:
return False
def _build_model(self, dim):
return LSTMEDModule(dim, self.hidden_size, self.n_layers, self.dropout, self.device)
def _train(self, train_data: pd.DataFrame, train_config=None) -> pd.DataFrame:
train_loader = RollingWindowDataset(
train_data,
target_seq_index=None,
shuffle=True,
flatten=False,
n_past=self.sequence_length,
n_future=0,
batch_size=self.batch_size,
)
self.data_dim = train_data.shape[1]
self.lstmed = self._build_model(train_data.shape[1]).to(self.device)
optimizer = torch.optim.Adam(self.lstmed.parameters(), lr=self.lr)
loss_func = torch.nn.MSELoss(reduction="sum")
bar = ProgressBar(total=self.num_epochs)
self.lstmed.train()
for epoch in range(self.num_epochs):
total_loss = 0
for batch, _, _, _ in train_loader:
batch = torch.tensor(batch, dtype=torch.float, device=self.device)
output = self.lstmed(batch)
loss = loss_func(output, batch)
self.lstmed.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss
if bar is not None:
bar.print(epoch + 1, prefix="", suffix="Complete, Loss {:.4f}".format(total_loss / len(train_loader)))
return self._get_anomaly_score(train_data)
def _get_anomaly_score(self, time_series: pd.DataFrame, time_series_prev: pd.DataFrame = None) -> pd.DataFrame:
self.lstmed.eval()
ts = pd.concat((time_series_prev, time_series)) if time_series_prev is None else time_series
data_loader = RollingWindowDataset(
ts,
target_seq_index=None,
shuffle=False,
flatten=False,
n_past=self.sequence_length,
n_future=0,
batch_size=self.batch_size,
)
scores, outputs = [], []
for idx, (batch, _, _, _) in enumerate(data_loader):
batch = torch.tensor(batch, dtype=torch.float, device=self.device)
output = self.lstmed(batch)
error = nn.L1Loss(reduction="none")(output, batch)
score = np.mean(error.view(-1, ts.shape[1]).data.cpu().numpy(), axis=1)
scores.append(score.reshape(batch.shape[0], self.sequence_length))
scores = np.concatenate(scores)
lattice = np.full((self.sequence_length, ts.shape[0]), np.nan)
for i, score in enumerate(scores):
lattice[i % self.sequence_length, i : i + self.sequence_length] = score
scores = np.nanmean(lattice, axis=0)
return pd.DataFrame(scores[-len(time_series) :], index=time_series.index)
class LSTMEDModule(nn.Module):
"""
The LSTM-encoder-decoder module. Both the encoder and decoder are LSTMs.
:meta private:
"""
def __init__(self, n_features, hidden_size, n_layers, dropout, device):
"""
:param n_features: The input feature dimension
:param hidden_size: The LSTM hidden size
:param n_layers: The number of LSTM layers
:param dropout: The dropout rate
:param device: CUDA or CPU
"""
super().__init__()
self.n_features = n_features
self.hidden_size = hidden_size
self.n_layers = n_layers
self.dropout = dropout
self.device = device
self.encoder = nn.LSTM(
self.n_features,
self.hidden_size,
batch_first=True,
num_layers=self.n_layers[0],
bias=True,
dropout=self.dropout[0],
)
self.decoder = nn.LSTM(
self.n_features,
self.hidden_size,
batch_first=True,
num_layers=self.n_layers[1],
bias=True,
dropout=self.dropout[1],
)
self.output_layer = nn.Linear(self.hidden_size, self.n_features)
def init_hidden_state(self, batch_size):
h = torch.zeros((self.n_layers[0], batch_size, self.hidden_size)).to(self.device)
c = torch.zeros((self.n_layers[0], batch_size, self.hidden_size)).to(self.device)
return h, c
def forward(self, x, return_latent=False):
# Encoder
enc_hidden = self.init_hidden_state(x.shape[0])
_, enc_hidden = self.encoder(x.float(), enc_hidden)
# Decoder
dec_hidden = enc_hidden
output = torch.zeros(x.shape).to(self.device)
for i in reversed(range(x.shape[1])):
output[:, i, :] = self.output_layer(dec_hidden[0][0, :])
if self.training:
_, dec_hidden = self.decoder(x[:, i].unsqueeze(1).float(), dec_hidden)
else:
_, dec_hidden = self.decoder(output[:, i].unsqueeze(1), dec_hidden)
return (output, enc_hidden[1][-1]) if return_latent else output | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/anomaly/lstm_ed.py | 0.960768 | 0.539832 | lstm_ed.py | pypi |
import copy
import logging
from typing import List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from merlion.models.anomaly.base import DetectorBase
from merlion.models.forecast.base import ForecasterBase
from merlion.plot import Figure
from merlion.utils import TimeSeries
from merlion.utils.misc import AutodocABCMeta
logger = logging.getLogger(__name__)
class ForecastingDetectorBase(ForecasterBase, DetectorBase, metaclass=AutodocABCMeta):
"""
Base class for a forecast-based anomaly detector.
"""
@property
def _default_post_rule_train_config(self):
from merlion.evaluate.anomaly import TSADMetric
return dict(metric=TSADMetric.F1, unsup_quantile=None)
def forecast_to_anom_score(
self, time_series: TimeSeries, forecast: TimeSeries, stderr: Optional[TimeSeries]
) -> pd.DataFrame:
"""
Compare a model's forecast to a ground truth time series, in order to compute anomaly scores. By default, we
compute a z-score if model uncertainty (``stderr``) is given, or the residuals if there is no model uncertainty.
:param time_series: the ground truth time series.
:param forecast: the model's forecasted values for the time series
:param stderr: the standard errors of the model's forecast
:return: Anomaly scores based on the difference between the ground truth values and the model's forecast.
"""
if len(forecast) == 0:
return pd.DataFrame(columns=["anom_score"])
time_series = time_series.align(reference=forecast.time_stamps)
time_series = time_series.univariates[time_series.names[self.target_seq_index]]
times = time_series.index
y = time_series.np_values
yhat = forecast.univariates[forecast.names[0]].np_values
if stderr is None:
return pd.DataFrame(y - yhat, index=times, columns=["anom_score"])
else:
sigma = stderr.univariates[stderr.names[0]].np_values
if np.isnan(sigma).all():
sigma = 1
else:
sigma[np.isnan(sigma)] = np.mean(sigma)
return pd.DataFrame((y - yhat) / (sigma + 1e-8), index=times, columns=["anom_score"])
def train(
self,
train_data: TimeSeries,
train_config=None,
exog_data: TimeSeries = None,
anomaly_labels=None,
post_rule_train_config=None,
) -> TimeSeries:
if train_config is None:
train_config = copy.deepcopy(self._default_train_config)
train_data, exog_data = self.train_pre_process(train_data, exog_data=exog_data, return_exog=True)
if self._pandas_train:
train_data = train_data.to_pd()
exog_data = None if exog_data is None else exog_data.to_pd()
if exog_data is None:
train_result = self._train(train_data=train_data, train_config=train_config)
else:
train_result = self._train_with_exog(train_data=train_data, train_config=train_config, exog_data=exog_data)
return self.train_post_process(
train_result, anomaly_labels=anomaly_labels, post_rule_train_config=post_rule_train_config
)
def train_post_process(
self,
train_result: Tuple[Union[TimeSeries, pd.DataFrame], Optional[Union[TimeSeries, pd.DataFrame]]],
anomaly_labels=None,
post_rule_train_config=None,
) -> TimeSeries:
if isinstance(train_result, tuple) and len(train_result) == 2:
train_pred, train_err = ForecasterBase.train_post_process(self, train_result)
train_data = self.train_data if self.invert_transform else self.transform(self.train_data)
train_result = self.forecast_to_anom_score(train_data, train_pred, train_err)
return DetectorBase.train_post_process(
self, train_result, anomaly_labels=anomaly_labels, post_rule_train_config=post_rule_train_config
)
def get_anomaly_score(
self, time_series: TimeSeries, time_series_prev: TimeSeries = None, exog_data: TimeSeries = None
) -> TimeSeries:
if not self.invert_transform:
time_series, _ = self.transform_time_series(time_series, time_series_prev)
forecast, err = self.forecast(time_series.time_stamps, time_series_prev=time_series_prev, exog_data=exog_data)
# Make sure stderr & forecast are of the appropriate lengths
assert err is None or len(forecast) == len(err), (
f"Expected forecast & standard error of forecast to have the same "
f"length, but len(forecast) = {len(forecast)}, len(err) = {len(err)}"
)
assert len(forecast) == len(
time_series
), f"forecast() returned a forecast with length {len(forecast)}, but expected length {len(time_series)}"
return TimeSeries.from_pd(self.forecast_to_anom_score(time_series, forecast, err))
def _get_anomaly_score(self, time_series: pd.DataFrame, time_series_prev: pd.DataFrame = None) -> pd.DataFrame:
raise NotImplementedError("_get_anomaly_score() should not be called from a forecast-based anomaly detector.")
def get_anomaly_label(
self, time_series: TimeSeries, time_series_prev: TimeSeries = None, exog_data: TimeSeries = None
) -> TimeSeries:
scores = self.get_anomaly_score(time_series, time_series_prev, exog_data=exog_data)
return self.post_rule(scores) if self.post_rule is not None else scores
def get_figure(
self,
*,
time_series: TimeSeries = None,
time_stamps: List[int] = None,
time_series_prev: TimeSeries = None,
exog_data: TimeSeries = None,
plot_anomaly=True,
filter_scores=True,
plot_forecast=False,
plot_forecast_uncertainty=False,
plot_time_series_prev=False,
) -> Figure:
"""
:param time_series: the time series over whose timestamps we wish to make a forecast. Exactly one of
``time_series`` or ``time_stamps`` should be provided.
:param time_stamps: Either a ``list`` of timestamps we wish to forecast for, or the number of steps (``int``)
we wish to forecast for. Exactly one of ``time_series`` or ``time_stamps`` should be provided.
:param time_series_prev: a time series immediately preceding ``time_series``. If given, we use it to initialize
the forecaster's state. Otherwise, we assume that ``time_series`` immediately follows the training data.
:param exog_data: A time series of exogenous variables. Exogenous variables are known a priori, and they are
independent of the variable being forecasted. ``exog_data`` must include data for all of ``time_stamps``;
if ``time_series_prev`` is given, it must include data for all of ``time_series_prev.time_stamps`` as well.
Optional. Only supported for models which inherit from `ForecasterExogBase`.
:param plot_anomaly: Whether to plot the model's predicted anomaly scores.
:param filter_scores: whether to filter the anomaly scores by the post-rule before plotting them.
:param plot_forecast: Whether to plot the model's forecasted values.
:param plot_forecast_uncertainty: whether to plot uncertainty estimates (the inter-quartile range) for forecast
values. Not supported for all models.
:param plot_time_series_prev: whether to plot ``time_series_prev`` (and the model's fit for it). Only used if
``time_series_prev`` is given.
:return: a `Figure` of the model's anomaly score predictions and/or forecast.
"""
assert not (
time_series is None and time_stamps is None
), "Must provide at least one of time_series or time_stamps"
fig = None
plot_forecast = plot_forecast or plot_forecast_uncertainty or not plot_anomaly
if plot_forecast or time_series is None:
fig = ForecasterBase.get_figure(
self,
time_series=time_series,
time_stamps=time_stamps,
exog_data=exog_data,
time_series_prev=time_series_prev,
plot_forecast_uncertainty=plot_forecast_uncertainty,
plot_time_series_prev=plot_time_series_prev,
)
if time_series is None or not plot_anomaly:
return fig
return DetectorBase.get_figure(
self,
time_series=time_series,
time_series_prev=time_series_prev,
exog_data=exog_data,
plot_time_series_prev=plot_time_series_prev,
filter_scores=filter_scores,
fig=fig,
)
def plot_anomaly(
self,
time_series: TimeSeries,
time_series_prev: TimeSeries = None,
exog_data: TimeSeries = None,
*,
filter_scores=True,
plot_forecast=False,
plot_forecast_uncertainty=False,
plot_time_series_prev=False,
figsize=(1000, 600),
ax=None,
):
"""
Plots the time series in matplotlib as a line graph, with points in the
series overlaid as points color-coded to indicate their severity as
anomalies. Optionally allows you to overlay the model's forecast & the
model's uncertainty in its forecast (if applicable).
:param time_series: the time series over whose timestamps we wish to make a forecast. Exactly one of
``time_series`` or ``time_stamps`` should be provided.
:param time_series_prev: a time series immediately preceding ``time_series``. If given, we use it to initialize
the forecaster's state. Otherwise, we assume that ``time_series`` immediately follows the training data.
:param exog_data: A time series of exogenous variables. Exogenous variables are known a priori, and they are
independent of the variable being forecasted. ``exog_data`` must include data for all of ``time_stamps``;
if ``time_series_prev`` is given, it must include data for all of ``time_series_prev.time_stamps`` as well.
Optional. Only supported for models which inherit from `ForecasterExogBase`.
:param filter_scores: whether to filter the anomaly scores by the post-rule before plotting them.
:param plot_forecast: Whether to plot the model's forecast, in addition to the anomaly scores.
:param plot_forecast_uncertainty: whether to plot uncertainty estimates (the inter-quartile range) for forecast
values. Not supported for all models.
:param plot_time_series_prev: whether to plot ``time_series_prev`` (and the model's fit for it). Only used if
``time_series_prev`` is given.
:param figsize: figure size in pixels
:param ax: matplotlib axis to add this plot to
:return: matplotlib figure & axes
"""
metric_name = time_series.names[0]
fig = self.get_figure(
time_series=time_series,
time_series_prev=time_series_prev,
exog_data=exog_data,
filter_scores=filter_scores,
plot_anomaly=True,
plot_forecast=plot_forecast,
plot_forecast_uncertainty=plot_forecast_uncertainty,
plot_time_series_prev=plot_time_series_prev,
)
title = f"{type(self).__name__}: Anomalies in {metric_name}"
if plot_forecast:
title += " (Forecast Overlaid)"
return fig.plot(title=title, figsize=figsize, ax=ax)
def plot_anomaly_plotly(
self,
time_series: TimeSeries,
time_series_prev: TimeSeries = None,
exog_data: TimeSeries = None,
*,
filter_scores=True,
plot_forecast=False,
plot_forecast_uncertainty=False,
plot_time_series_prev=False,
figsize=(1000, 600),
):
"""
Plots the time series in matplotlib as a line graph, with points in the
series overlaid as points color-coded to indicate their severity as
anomalies. Optionally allows you to overlay the model's forecast & the
model's uncertainty in its forecast (if applicable).
:param time_series: the time series over whose timestamps we wish to make a forecast. Exactly one of
``time_series`` or ``time_stamps`` should be provided.
:param time_series_prev: a time series immediately preceding ``time_series``. If given, we use it to initialize
the forecaster's state. Otherwise, we assume that ``time_series`` immediately follows the training data.
:param exog_data: A time series of exogenous variables. Exogenous variables are known a priori, and they are
independent of the variable being forecasted. ``exog_data`` must include data for all of ``time_stamps``;
if ``time_series_prev`` is given, it must include data for all of ``time_series_prev.time_stamps`` as well.
Optional. Only supported for models which inherit from `ForecasterExogBase`.
:param filter_scores: whether to filter the anomaly scores by the post-rule before plotting them.
:param plot_forecast: Whether to plot the model's forecast, in addition to the anomaly scores.
:param plot_forecast_uncertainty: whether to plot uncertainty estimates (the inter-quartile range) for forecast
values. Not supported for all models.
:param plot_time_series_prev: whether to plot ``time_series_prev`` (and the model's fit for it). Only used if
``time_series_prev`` is given.
:param figsize: figure size in pixels
:return: plotly figure
"""
metric_name = time_series.names[0]
fig = self.get_figure(
time_series=time_series,
time_series_prev=time_series_prev,
exog_data=exog_data,
filter_scores=filter_scores,
plot_forecast=plot_forecast,
plot_anomaly=True,
plot_forecast_uncertainty=plot_forecast_uncertainty,
plot_time_series_prev=plot_time_series_prev,
)
title = f"{type(self).__name__}: Anomalies in {metric_name}"
if plot_forecast:
title += " (Forecast Overlaid)"
return fig.plot_plotly(title=title, metric_name=metric_name, figsize=figsize)
def plot_forecast(
self,
*,
time_series: TimeSeries = None,
time_stamps: List[int] = None,
time_series_prev: TimeSeries = None,
exog_data: TimeSeries = None,
plot_forecast_uncertainty=False,
plot_time_series_prev=False,
figsize=(1000, 600),
ax=None,
):
fig = self.get_figure(
time_series=time_series,
time_stamps=time_stamps,
time_series_prev=time_series_prev,
exog_data=exog_data,
plot_forecast_uncertainty=plot_forecast_uncertainty,
plot_time_series_prev=plot_time_series_prev,
plot_anomaly=False,
plot_forecast=True,
)
title = f"{type(self).__name__}: Forecast of {self.target_name}"
return fig.plot(title=title, metric_name=self.target_name, figsize=figsize, ax=ax)
def plot_forecast_plotly(
self,
*,
time_series: TimeSeries = None,
time_stamps: List[int] = None,
time_series_prev: TimeSeries = None,
exog_data: TimeSeries = None,
plot_forecast_uncertainty=False,
plot_time_series_prev=False,
figsize=(1000, 600),
):
fig = self.get_figure(
time_series=time_series,
time_stamps=time_stamps,
time_series_prev=time_series_prev,
exog_data=exog_data,
plot_forecast_uncertainty=plot_forecast_uncertainty,
plot_time_series_prev=plot_time_series_prev,
plot_anomaly=False,
plot_forecast=True,
)
title = f"{type(self).__name__}: Forecast of {self.target_name}"
return fig.plot_plotly(title=title, metric_name=self.target_name, figsize=figsize) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/anomaly/forecast_based/base.py | 0.905168 | 0.515437 | base.py | pypi |
import bisect
import copy
from enum import Enum
import logging
from typing import List, Tuple, Union
import warnings
import numpy as np
import pandas as pd
import scipy.sparse
from scipy.special import logsumexp
from scipy.stats import norm
from tqdm import tqdm
from merlion.models.anomaly.base import NoCalibrationDetectorConfig, DetectorBase
from merlion.models.anomaly.forecast_based.base import ForecastingDetectorBase
from merlion.models.forecast.base import ForecasterConfig
from merlion.plot import Figure
from merlion.post_process.threshold import AggregateAlarms
from merlion.utils.conj_priors import ConjPrior, MVNormInvWishart, BayesianMVLinReg
from merlion.utils.time_series import TimeSeries, UnivariateTimeSeries, to_pd_datetime
logger = logging.getLogger(__name__)
class ChangeKind(Enum):
"""
Enum representing the kinds of changes points we would like to detect.
Enum values correspond to the Bayesian `ConjPrior` class used to detect each sort of change point.
"""
Auto = None
"""
Automatically choose the Bayesian conjugate prior we would like to use.
"""
LevelShift = MVNormInvWishart
"""
Model data points with a normal distribution, to detect level shifts.
"""
TrendChange = BayesianMVLinReg
"""
Model data points as a linear function of time, to detect trend changes.
"""
class _PosteriorBeam:
"""
Utility class to track the posterior beam in the dynamic programming for BOCPD.
"""
def __init__(self, run_length: int, posterior: ConjPrior, cp_prior: float, logp: float):
self.run_length: int = run_length
self.posterior: ConjPrior = posterior
self.cp_prior = cp_prior
self.logp = logp # joint probability P(r_t = self.run_length, x_{1:t})
def update(self, x):
# self.logp starts as log P(r_{t-1} = self.run_length, x_{1:t-1})
n = 1 if isinstance(x, tuple) and len(x) == 2 else len(x)
# logp_x is log P(x_t)
if n == 1:
method = getattr(self.posterior, "posterior_explicit", self.posterior.posterior)
else:
method = self.posterior.posterior
logp_x, updated = method(x, log=True, return_updated=True)
self.posterior = updated
self.run_length += n
# P(r_t = self.run_length + 1, x_{1:t}) = P(r_{t-1} = self.run_length, x_{1:t-1}) * P(x_t) * (1 - self.cp_prior)
self.logp += sum(logp_x) + n * np.log1p(-self.cp_prior)
class BOCPDConfig(ForecasterConfig, NoCalibrationDetectorConfig):
"""
Config class for `BOCPD` (Bayesian Online Change Point Detection).
"""
_default_threshold = AggregateAlarms(alm_threshold=norm.ppf((1 + 0.5) / 2), min_alm_in_window=1)
"""
Default threshold is for a >=50% probability that a point is a change point.
"""
def __init__(
self,
change_kind: Union[str, ChangeKind] = ChangeKind.Auto,
cp_prior=1e-2,
lag=None,
min_likelihood=1e-16,
max_forecast_steps=None,
**kwargs,
):
"""
:param change_kind: the kind of change points we would like to detect
:param cp_prior: prior belief probability of how frequently changepoints occur
:param lag: the maximum amount of delay/lookback (in number of steps) allowed for detecting change points.
If ``lag`` is ``None``, we will consider the entire history. Note: we do not recommend ``lag = 0``.
:param min_likelihood: we will discard any hypotheses whose probability of being a change point is
lower than this threshold. Lower values improve accuracy at the cost of time and space complexity.
:param max_forecast_steps: the maximum number of steps the model is allowed to forecast. Ignored.
"""
self.change_kind = change_kind
self.min_likelihood = min_likelihood
self.cp_prior = cp_prior # Kats checks [0.001, 0.002, 0.005, 0.01, 0.02]
self.lag = lag
super().__init__(max_forecast_steps=max_forecast_steps, **kwargs)
@property
def change_kind(self) -> ChangeKind:
return self._change_kind
@change_kind.setter
def change_kind(self, change_kind: Union[str, ChangeKind]):
if isinstance(change_kind, str):
valid = set(ChangeKind.__members__.keys())
if change_kind not in valid:
raise KeyError(f"{change_kind} is not a valid change kind. Valid change kinds are: {valid}")
change_kind = ChangeKind[change_kind]
self._change_kind = change_kind
class BOCPD(ForecastingDetectorBase):
"""
Bayesian online change point detection algorithm described by
`Adams & MacKay (2007) <https://arxiv.org/abs/0710.3742>`__.
At a high level, this algorithm models the observed data using Bayesian conjugate priors. If an observed value
deviates too much from the current posterior distribution, it is likely a change point, and we should start
modeling the time series from that point forwards with a freshly initialized Bayesian conjugate prior.
The ``get_anomaly_score()`` method returns a z-score corresponding to the probability of each point being
a change point. The ``forecast()`` method returns the predicted values (and standard error) of the underlying
piecewise model on the relevant data.
"""
config_class = BOCPDConfig
def __init__(self, config: BOCPDConfig = None):
config = BOCPDConfig() if config is None else config
super().__init__(config)
self.posterior_beam: List[_PosteriorBeam] = []
self.train_timestamps: List[float] = []
self.full_run_length_posterior = scipy.sparse.dok_matrix((0, 0), dtype=float)
self.pw_model: List[Tuple[pd.Timestamp, ConjPrior]] = []
@property
def _pandas_train(self):
return False
@property
def _online_model(self) -> bool:
return True
@property
def require_even_sampling(self) -> bool:
return False
@property
def require_univariate(self) -> bool:
return False
@property
def last_train_time(self):
return None if len(self.train_timestamps) == 0 else to_pd_datetime(self.train_timestamps[-1])
@last_train_time.setter
def last_train_time(self, t):
pass
@property
def n_seen(self):
"""
:return: the number of data points seen so far
"""
return self.full_run_length_posterior.get_shape()[0]
@property
def change_kind(self) -> ChangeKind:
"""
:return: the kind of change points we would like to detect
"""
return self.config.change_kind
@property
def cp_prior(self) -> float:
"""
:return: prior belief probability of how frequently changepoints occur
"""
return self.config.cp_prior
@property
def lag(self) -> int:
"""
:return: the maximum amount of delay allowed for detecting change points. A higher lag can increase
recall, but it may decrease precision.
"""
return self.config.lag
@property
def min_likelihood(self) -> float:
"""
:return: we will not consider any hypotheses (about whether a particular point is a change point)
with likelihood lower than this threshold
"""
return self.config.min_likelihood
def _create_posterior(self, logp: float) -> _PosteriorBeam:
posterior = self.change_kind.value()
return _PosteriorBeam(run_length=0, posterior=posterior, cp_prior=self.cp_prior, logp=logp)
def _get_anom_scores(self, time_stamps: List[Union[int, float]]) -> TimeSeries:
# Convert sparse posterior matrix to a form where it's fast to access its diagonals
with warnings.catch_warnings():
warnings.simplefilter("ignore")
posterior = scipy.sparse.dia_matrix(self.full_run_length_posterior)
# Compute the MAP probability that each point is a change point.
# full_run_length_posterior[i, r] = P[run length = r at time t_i]
i_0 = bisect.bisect_left(self.train_timestamps, time_stamps[0])
i_f = bisect.bisect_right(self.train_timestamps, time_stamps[-1])
probs = np.zeros(i_f - i_0)
n_lag = None if self.lag is None else self.lag + 1
for i_prob, i_posterior in enumerate(range(max(i_0, 1), i_f)):
probs[i_prob] = posterior.diagonal(-i_posterior)[:n_lag].max()
# Convert P[changepoint] to z-score units, and align it to the right time stamps
scores = norm.ppf((1 + probs) / 2)
ts = UnivariateTimeSeries(time_stamps=self.train_timestamps[i_0:i_f], values=scores, name="anom_score").to_ts()
return ts.align(reference=time_stamps)
def _update_model(self, timestamps):
# Figure out where the changepoints are in the data
changepoints = self.threshold.to_simple_threshold()(self._get_anom_scores(timestamps))
changepoints = changepoints.to_pd().iloc[:, 0]
cp_times = changepoints[changepoints != 0].index
# Remove every sub-model that takes effect after the first timestamp provided.
self.pw_model = [(t0, model) for t0, model in self.pw_model if t0 < changepoints.index[0]]
# Update the final piece of the existing model (if there is one)
t0 = changepoints.index[0] if len(self.pw_model) == 0 else self.pw_model[-1][0]
tf = changepoints.index[-1] if len(cp_times) == 0 else cp_times[0]
train_data = self.transform(self.train_data)
data = train_data.window(t0, tf, include_tf=len(cp_times) == 0)
if len(data) > 0:
if len(self.pw_model) == 0:
self.pw_model.append((t0, self.change_kind.value(data)))
else:
self.pw_model[-1] = (t0, self.change_kind.value(data))
# Build a piecewise model by using the data between each subsequent change point
t0 = tf
for tf in cp_times[1:]:
data = train_data.window(t0, tf)
if len(data) > 0:
self.pw_model.append((t0, self.change_kind.value(data)))
t0 = tf
if t0 < changepoints.index[-1]:
_, data = train_data.bisect(t0, t_in_left=False)
self.pw_model.append((t0, self.change_kind.value(data)))
def train_pre_process(
self, train_data: TimeSeries, exog_data: TimeSeries = None, return_exog=False
) -> Union[TimeSeries, Tuple[TimeSeries, Union[TimeSeries, None]]]:
# BOCPD doesn't _require_ target_seq_index to be specified, but train_pre_process() does.
if self.target_seq_index is None and train_data.dim > 1:
self.config.target_seq_index = 0
logger.warning(
f"Received a {train_data.dim}-variate time series, but `target_seq_index` was not "
f"specified. Setting `target_seq_index = 0` so the `forecast()` method will work."
)
ret = super().train_pre_process(train_data, exog_data=exog_data, return_exog=return_exog)
# We manually update self.train_data in update(), so do nothing here
self.train_data = None
return ret
def _forecast(
self, time_stamps: List[int], time_series_prev: pd.DataFrame = None, return_prev=False
) -> Tuple[pd.DataFrame, pd.DataFrame]:
time_stamps = to_pd_datetime(time_stamps)
if return_prev and time_series_prev is not None:
time_stamps = time_series_prev.index.union(time_stamps)
# Initialize output accumulators
pred_full, err_full = None, None
# Split the time stamps based on which model piece should be used
j = 0
i = bisect.bisect_left([t0 for t0, model in self.pw_model], time_stamps[j], hi=len(self.pw_model) - 1)
for i, (t0, posterior) in enumerate(self.pw_model[i:], i):
# Stop forecasting if we've finished with all the input timestamps
if j >= len(time_stamps):
break
# If this is the last piece, use it to forecast the rest of the timestamps
if i == len(self.pw_model) - 1:
pred, err = posterior.forecast(time_stamps[j:])
# Otherwise, predict until the next piece takes over
else:
t_next = self.pw_model[i + 1][0]
j_next = bisect.bisect_left(time_stamps, t_next)
pred, err = posterior.forecast(time_stamps[j:j_next])
j = j_next
# Accumulate results
pred_full = pred if pred_full is None else pred_full + pred
err_full = err if err_full is None else err_full + err
pred = pred_full.univariates[pred_full.names[self.target_seq_index]].to_pd()
err = err_full.univariates[err_full.names[self.target_seq_index]].to_pd()
pred[pred.isna() | np.isinf(pred)] = 0
err[err.isna() | np.isinf(err)] = 0
return pd.DataFrame(pred), pd.DataFrame(err)
def update(self, time_series: TimeSeries):
"""
Updates the BOCPD model's internal state using the time series values provided.
:param time_series: time series whose values we are using to update the internal state of the model
:return: anomaly score associated with each point (based on the probability of it being a change point)
"""
# Only update on the portion of the time series after the last training timestamp
time_stamps = time_series.time_stamps
if self.last_train_time is not None:
_, time_series = time_series.bisect(self.last_train_time, t_in_left=True)
# Update the training data accumulated so far
if self.train_data is None:
self.train_data = time_series
else:
self.train_data = self.train_data + time_series
# Align the time series & expand the array storing the full posterior distribution of run lengths
time_series = time_series.align()
n_seen, T = self.n_seen, len(time_series)
self.full_run_length_posterior = scipy.sparse.block_diag(
(self.full_run_length_posterior, scipy.sparse.dok_matrix((T, T), dtype=float)), format="dok"
)
# Compute the minimum log likelihood threshold that we consider.
min_ll = -np.inf if self.min_likelihood is None or self.min_likelihood <= 0 else np.log(self.min_likelihood)
if self.change_kind is ChangeKind.TrendChange:
min_ll = min_ll * time_series.dim
min_ll = min_ll + np.log(self.cp_prior)
# Iterate over the time series
for i, (t, x) in enumerate(tqdm(time_series, desc="BOCPD Update", disable=(T == 0))):
# Update posterior beams
for post in self.posterior_beam:
post.update((t, x))
# Calculate posterior probability that this is change point with
# P_changepoint = \sum_{r_{t-1}} P(r_{t-1}, x_{1:t-1}) * P(x_t) * cp_prior
# After the updates, post.logp = log P(r_t, x_{1:t})
# = log P(r_{t-1}, x_1{1:t-1}) + log P(x_t) - log(1 - cp_prior)
# So we can just add log(cp_prior) - log(1 - cp_prior) to each of the logp's
if len(self.posterior_beam) == 0:
cp_logp = 0
else:
cp_delta = np.log(self.cp_prior) - np.log1p(-self.cp_prior)
cp_logp = logsumexp([post.logp + cp_delta for post in self.posterior_beam])
self.posterior_beam.append(self._create_posterior(logp=cp_logp))
# P(x_{1:t}) = \sum_{r_t} P(r_t, x_{1:t})
evidence = logsumexp([post.logp for post in self.posterior_beam])
# P(r_t) = P(r_t, x_{1:t}) / P(x_{1:t})
run_length_dist_0 = {post.run_length: post.logp - evidence for post in self.posterior_beam}
# Remove posterior beam candidates whose run length probability is too low
run_length_dist, to_remove = {}, {}
for r, logp in run_length_dist_0.items():
if logp < min_ll and r > 2: # allow at least 2 updates for each change point hypothesis
to_remove[r] = logp
else:
run_length_dist[r] = logp
# Re-normalize all remaining probabilities to sum to 1
self.posterior_beam = [post for post in self.posterior_beam if post.run_length not in to_remove]
if len(to_remove) > 0:
excess_p = np.exp(logsumexp(list(to_remove.values()))) # log P[to_remove]
for post in self.posterior_beam:
post.logp -= np.log1p(-excess_p)
run_length_dist[post.run_length] -= np.log1p(-excess_p)
# Update the full posterior distribution of run-length at each time, up to the desired lag
run_length_dist = [(r, logp) for r, logp in run_length_dist.items()]
if len(run_length_dist) > 0:
all_r, all_logp_r = zip(*run_length_dist)
self.full_run_length_posterior[n_seen + i, all_r] = np.exp(all_logp_r)
# Add this timestamp to the list of timestamps we've trained on
self.train_timestamps.append(t)
# Update the predictive model if there is any new data
if len(time_series) > 0:
if self.lag is None:
n = len(self.train_timestamps)
else:
n = T + self.lag
self._update_model(self.train_timestamps[-n:])
# Return the anomaly scores
return self._get_anom_scores(time_stamps)
def _train(self, train_data: TimeSeries, train_config=None) -> TimeSeries:
# If not automatically detecting the change kind, train as normal
if self.change_kind is not ChangeKind.Auto:
return self.update(time_series=train_data)
# Otherwise, evaluate all change kinds as options
candidates = []
for change_kind in [ck for ck in ChangeKind if ck is not ChangeKind.Auto]:
candidate = copy.deepcopy(self)
candidate.config.change_kind = change_kind
train_scores = candidate._train(train_data, train_config=train_config)
nll = -logsumexp([p.logp for p in candidate.posterior_beam]).item()
n_params = sum(model.n_params for t, model in candidate.pw_model)
aicc = 2 * n_params + 2 * nll + (2 * n_params * (n_params + 1)) / max(1, len(train_scores) - n_params - 1)
logger.info(
f"Change kind {change_kind.name} has AICc {aicc:.3f} "
f"(NLL={nll:.3f}, n_params={n_params}, n_data={len(train_scores)})."
)
candidates.append((aicc, candidate, train_scores))
# Choose the model with the best log likelihood
i_best = np.argmin([candidate[0] for candidate in candidates])
log_likelihood, best, train_scores = candidates[i_best]
self.__setstate__(best.__getstate__())
logger.info(f"Using change kind {self.change_kind.name} because it has the best AICc.")
return train_scores
def get_anomaly_score(
self, time_series: TimeSeries, time_series_prev: TimeSeries = None, exog_data: TimeSeries = None
) -> TimeSeries:
return DetectorBase.get_anomaly_score(self, time_series, time_series_prev)
def _get_anomaly_score(self, time_series: pd.DataFrame, time_series_prev: pd.DataFrame = None) -> pd.DataFrame:
if time_series_prev is not None:
self.update(TimeSeries.from_pd(time_series_prev))
return self.update(TimeSeries.from_pd(time_series)).to_pd()
def get_figure(self, *, time_series: TimeSeries = None, **kwargs) -> Figure:
if time_series is not None:
self.update(self.transform(time_series))
return super().get_figure(time_series=time_series, **kwargs) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/anomaly/change_point/bocpd.py | 0.926053 | 0.532182 | bocpd.py | pypi |
from abc import abstractmethod
from enum import Enum, auto
import logging
from typing import Any, Iterator, List, Optional, Tuple, Union
import numpy as np
from scipy.signal import argrelmax
from scipy.stats import norm
import statsmodels.api as sm
from statsmodels.tsa.exponential_smoothing.ets import ETSModel
from merlion.models.automl.base import AutoMLMixIn
from merlion.models.base import ModelBase
from merlion.models.layers import LayeredModelConfig
from merlion.transform.resample import TemporalResample
from merlion.utils import TimeSeries, UnivariateTimeSeries
from merlion.utils.misc import AutodocABCMeta
logger = logging.getLogger(__name__)
class PeriodicityStrategy(Enum):
"""
Strategy to choose the seasonality if multiple candidates are detected.
"""
ACF = auto()
"""
Select the seasonality value with the highest autocorrelation.
"""
Min = auto()
"""
Select the minimum seasonality.
"""
Max = auto()
"""
Select the maximum seasonality.
"""
All = auto()
"""
Use all seasonalities. Only valid for models which support multiple seasonalities.
"""
class SeasonalityModel(metaclass=AutodocABCMeta):
"""
Class provides simple implementation to set the seasonality in a model. Extend this class to implement custom
behavior for seasonality processing.
"""
@abstractmethod
def set_seasonality(self, theta, train_data: UnivariateTimeSeries):
"""
Implement this method to do any model-specific adjustments on the seasonality that was provided by
`SeasonalityLayer`.
:param theta: Seasonality processed by `SeasonalityLayer`.
:param train_data: Training data (or numpy array representing the target univariate)
for any model-specific adjustments you might want to make.
"""
raise NotImplementedError
class SeasonalityConfig(LayeredModelConfig):
"""
Config object for an automatic seasonality detection layer.
"""
_default_transform = TemporalResample()
def __init__(
self, model, periodicity_strategy=PeriodicityStrategy.ACF, pval: float = 0.05, max_lag: int = None, **kwargs
):
"""
:param periodicity_strategy: Strategy to choose the seasonality if multiple candidates are detected.
:param pval: p-value for deciding whether a detected seasonality is statistically significant.
:param max_lag: max lag considered for seasonality detection.
"""
self.periodicity_strategy = periodicity_strategy
assert 0 < pval < 1
self.pval = pval
self.max_lag = max_lag
super().__init__(model=model, **kwargs)
@property
def multi_seasonality(self):
"""
:return: Whether the model supports multiple seasonalities. ``False`` unless explicitly overridden.
"""
return False
@property
def periodicity_strategy(self) -> PeriodicityStrategy:
"""
:return: Strategy to choose the seasonality if multiple candidates are detected.
"""
return self._periodicity_strategy
@periodicity_strategy.setter
def periodicity_strategy(self, p: Union[PeriodicityStrategy, str]):
if not isinstance(p, PeriodicityStrategy):
valid = {k.lower(): k for k in PeriodicityStrategy.__members__}
assert p.lower() in valid, f"Unsupported PeriodicityStrategy {p}. Supported values: {valid.values()}"
p = PeriodicityStrategy[valid[p.lower()]]
if p is PeriodicityStrategy.All and not self.multi_seasonality:
raise ValueError(
"Periodicity strategy All is not supported for a model which does not support multiple seasonalities."
)
self._periodicity_strategy = p
class SeasonalityLayer(AutoMLMixIn):
"""
Seasonality Layer that uses automatically determines the seasonality of your data. Can be used directly on
any model that implements `SeasonalityModel` class. The algorithmic idea is from the
`theta method <https://github.com/Mcompetitions/M4-methods/blob/master/4Theta%20method.R>`__. We find a set of
multiple candidate seasonalites, and we return the best one(s) based on the `PeriodicityStrategy`.
"""
config_class = SeasonalityConfig
@property
def require_even_sampling(self) -> bool:
return False
@property
def require_univariate(self):
return getattr(self.config, "target_seq_index", None) is None
@property
def multi_seasonality(self):
"""
:return: Whether the model supports multiple seasonalities.
"""
return self.config.multi_seasonality
@property
def periodicity_strategy(self):
"""
:return: Strategy to choose the seasonality if multiple candidates are detected.
"""
return self.config.periodicity_strategy
@property
def pval(self):
"""
:return: p-value for deciding whether a detected seasonality is statistically significant.
"""
return self.config.pval
@property
def max_lag(self):
"""
:return: max_lag for seasonality detection
"""
return self.config.max_lag
@staticmethod
def detect_seasonality(
x: np.array,
max_lag: int = None,
pval: float = 0.05,
periodicity_strategy: PeriodicityStrategy = PeriodicityStrategy.ACF,
) -> List[int]:
"""
Helper method to detect the seasonality of a time series.
:param x: The numpy array of values whose seasonality we want to detect. Must be univariate & flattened.
:param periodicity_strategy: Strategy to choose the seasonality if multiple candidates are detected.
:param pval: p-value for deciding whether a detected seasonality is statistically significant.
:param max_lag: max lag considered for seasonality detection.
"""
# Use the residuals of an ETS model fit to the data, to handle any trend. This makes the ACF more robust.
# For each candidate seasonality, the ACF we assign is the higher of the raw ACF and residual ACF.
candidate2score = {}
y = x if len(x) < 10 else ETSModel(x, error="add", trend="add").fit(disp=False).resid
for x in [x] if x is y else [x, y]:
# compute max lag & ACF function
max_lag = max(min(int(10 * np.log10(len(x))), len(x) - 1), 40) if max_lag is None else max_lag
xacf = sm.tsa.acf(x, nlags=max_lag, fft=False)
xacf[np.isnan(xacf)] = 0
# select the local maximum points with acf > 0, and smaller than 1/2 the length of the time series
xacf = xacf[: np.ceil(len(x) / 2).astype(int)]
candidates = np.intersect1d(np.where(xacf > 0), argrelmax(xacf)[0])
if len(candidates) > 0:
# filter out potential harmonics by applying peak-finding on the peaks of the ACF
if len(candidates) > 1:
candidates_idx = []
if xacf[candidates[0]] > xacf[candidates[1]]:
candidates_idx += [0]
candidates_idx += argrelmax(xacf[candidates])[0].tolist()
if xacf[candidates[-1]] > xacf[candidates[-2]]:
candidates_idx += [-1]
candidates = candidates[candidates_idx]
# statistical test if ACF is significant with respect to a normal distribution
xacf = xacf[1:]
xacf_var = np.cumsum(np.concatenate(([1], 2 * xacf[:-1] ** 2))) / len(x)
z_scores = xacf / np.sqrt(xacf_var)
candidates = candidates[z_scores[candidates - 1] > norm.ppf(1 - pval / 2)]
for c in candidates.tolist():
candidate2score[c] = max(candidate2score.get(c, -np.inf), z_scores[c - 1])
# sort the candidates by z-score and choose the desired candidates based on periodicity strategy
candidates = sorted(candidate2score.keys(), key=lambda c: candidate2score[c], reverse=True)
for c in candidates:
logger.info(f"Detected seas = {c:3d} with z-score = {candidate2score[c]:5.2f}.")
if len(candidates) == 0:
candidates = [1]
if periodicity_strategy is PeriodicityStrategy.ACF:
candidates = [candidates[0]]
elif periodicity_strategy is PeriodicityStrategy.Min:
candidates = [min(candidates)]
elif periodicity_strategy is PeriodicityStrategy.Max:
candidates = [max(candidates)]
elif periodicity_strategy is PeriodicityStrategy.All:
candidates = candidates
else:
raise ValueError(f"Periodicity strategy {periodicity_strategy} not supported.")
return candidates
def set_theta(self, model, theta, train_data: TimeSeries = None):
model.set_seasonality(theta, train_data.univariates[self.target_name])
def evaluate_theta(
self, thetas: Iterator, train_data: TimeSeries, train_config=None, exog_data: TimeSeries = None
) -> Tuple[Any, Optional[ModelBase], Optional[Tuple[TimeSeries, Optional[TimeSeries]]]]:
# If multiple seasonalities are supported, return a list of all detected seasonalities
return (list(thetas) if self.config.multi_seasonality else next(thetas)), None, None
def generate_theta(self, train_data: TimeSeries) -> Iterator:
x = train_data.univariates[self.target_name].np_values
candidates = self.detect_seasonality(
x=x, max_lag=self.max_lag, pval=self.pval, periodicity_strategy=self.periodicity_strategy
)
if candidates[: None if self.config.multi_seasonality else 1] != [1]:
logger.info(f"Automatically detect the periodicity is {candidates}")
return iter(candidates) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/automl/seasonality.py | 0.941681 | 0.541106 | seasonality.py | pypi |
from collections import OrderedDict
import logging
from typing import Union, Iterator, Tuple
import numpy as np
import pandas as pd
from merlion.models.forecast.ets import ETS
from merlion.models.automl.base import InformationCriterion, ICConfig, ICAutoMLForecaster
from merlion.models.automl.search import GridSearch
from merlion.models.automl.seasonality import PeriodicityStrategy, SeasonalityConfig, SeasonalityLayer
from merlion.utils import TimeSeries
logger = logging.getLogger(__name__)
class AutoETSConfig(SeasonalityConfig, ICConfig):
"""
Configuration class for `AutoETS`. Act as a wrapper around a `ETS` model, which automatically detects
the hyperparameters ``seasonal_periods``, ``error``, ``trend``, ``damped_trend`` and ``seasonal``.
"""
# This is adapted from ets.R from forecast package
def __init__(
self,
model: Union[ETS, dict] = None,
auto_seasonality: bool = True,
auto_error: bool = True,
auto_trend: bool = True,
auto_seasonal: bool = True,
auto_damped: bool = True,
periodicity_strategy: PeriodicityStrategy = PeriodicityStrategy.ACF,
information_criterion: InformationCriterion = InformationCriterion.AIC,
additive_only: bool = False,
allow_multiplicative_trend: bool = False,
restrict: bool = True,
**kwargs,
):
"""
:param auto_seasonality: Whether to automatically detect the seasonality.
:param auto_error: Whether to automatically detect the error components.
:param auto_trend: Whether to automatically detect the trend components.
:param auto_seasonal: Whether to automatically detect the seasonal components.
:param auto_damped: Whether to automatically detect the damped trend components.
:param additive_only: If True, the search space will only consider additive models.
:param allow_multiplicative_trend: If True, models with multiplicative trend are allowed in the search space.
:param restrict: If True, the models with infinite variance will not be allowed in the search space.
"""
model = dict(name="ETS") if model is None else model
super().__init__(
model=model,
periodicity_strategy=periodicity_strategy,
information_criterion=information_criterion,
**kwargs,
)
self.auto_seasonality = auto_seasonality
self.auto_trend = auto_trend
self.auto_seasonal = auto_seasonal
self.auto_error = auto_error
self.auto_damped = auto_damped
self.additive_only = additive_only
self.allow_multiplicative_trend = allow_multiplicative_trend
self.restrict = restrict
class AutoETS(ICAutoMLForecaster, SeasonalityLayer):
"""
Wrapper around a `ETS` model, which automatically detects
the hyperparameters ``seasonal_periods``, ``error``, ``trend``, ``damped_trend`` and ``seasonal``.
"""
config_class = AutoETSConfig
def __init__(self, config: AutoETSConfig):
super().__init__(config)
def generate_theta(self, train_data: TimeSeries) -> Iterator:
"""
generate [theta]. theta is a list of parameter combination [error, trend, damped_trend, seasonal]
"""
y = train_data.univariates[self.target_name].np_values
# check the size of y
n_samples = y.shape[0]
if n_samples <= 3:
self.information_criterion = InformationCriterion.AIC
# auto-detect seasonality if desired, otherwise just get it from seasonal order
if self.config.auto_seasonality:
candidate_m = SeasonalityLayer.generate_theta(self, train_data=train_data)
m, _, _ = SeasonalityLayer.evaluate_theta(self, thetas=candidate_m, train_data=train_data)
else:
if self.model.config.seasonal_periods is None:
m = 1
else:
m = max(1, self.model.config.seasonal_periods)
# set the parameters ranges for error, trend, damped_trend and seasonal
if np.any(y <= 0):
E_range = ["add"]
T_range = ["add", None]
else:
E_range = ["add", "mul"]
if self.config.allow_multiplicative_trend:
T_range = ["add", "mul", None]
else:
T_range = ["add", None]
if m <= 1 or y.shape[0] <= m:
m = 1
S_range = [None]
elif np.any(y <= 0):
S_range = ["add"]
else:
S_range = ["add", "mul"]
D_range = [True, False]
if not self.config.auto_error:
E_range = [self.model.config.error]
if not self.config.auto_trend:
T_range = [self.model.config.trend]
if not self.config.auto_seasonal:
S_range = [self.model.config.seasonal]
if not self.config.auto_damped:
D_range = [self.model.config.damped_trend]
# Construct a grid search object
param_values = OrderedDict(error=E_range, trend=T_range, seasonal=S_range, damped=D_range, m=[m])
restrictions = [dict(trend=None, damped=True)]
if self.config.additive_only:
restrictions.extend([dict(error="mul"), dict(trend="mul"), dict(seasonal="mul")])
if self.config.restrict:
restrictions.append(dict(error="add", trend="mul"))
restrictions.append(dict(error="add", seasonal="mul"))
restrictions.append(dict(error="mul", trend="mul", seasonal="add"))
return iter(GridSearch(param_values=param_values, restrictions=restrictions))
def set_theta(self, model, theta, train_data: TimeSeries = None):
m = theta["m"]
model.config.error = theta["error"]
model.config.trend = theta["trend"]
model.config.damped_trend = theta["damped"]
model.config.seasonal = None if m <= 1 else theta["seasonal"]
model.config.seasonal_periods = m
def _model_name(self, theta):
return f"ETS(err={theta['error']},trend={theta['trend']},seas={theta['seasonal']},damped={theta['damped']})"
def get_ic(self, model, train_data: pd.DataFrame, train_result: Tuple[pd.DataFrame, pd.DataFrame]) -> float:
ic = self.config.information_criterion.name
if ic in ["AIC", "BIC", "AICc"]:
return getattr(model.base_model.model, ic.lower())
else:
raise ValueError(f"{type(self.model).__name__} doesn't support information criterion {ic}") | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/automl/autoets.py | 0.929023 | 0.528168 | autoets.py | pypi |
from abc import abstractmethod
from copy import deepcopy
from enum import Enum, auto
import logging
from typing import Any, Iterator, Optional, Tuple, Union
import time
import pandas as pd
from merlion.models.layers import Config, ModelBase, LayeredModel, ForecasterBase
from merlion.utils import TimeSeries
from merlion.utils.misc import AutodocABCMeta
logger = logging.getLogger(__name__)
class AutoMLMixIn(LayeredModel, metaclass=AutodocABCMeta):
"""
Abstract base class which converts `LayeredModel` into an AutoML model.
"""
@property
def _pandas_train(self):
return False
def _train_with_exog(self, train_data: TimeSeries, train_config=None, exog_data: TimeSeries = None):
"""
Generates a set of candidate models and picks the best one.
:param train_data: the data to train on.
:param train_config: the train config of the underlying model (optional).
"""
# don't call train_pre_process() in generate/evaluate theta. get model.train_data for the original train data.
candidate_thetas = self.generate_theta(train_data)
theta, model, train_result = self.evaluate_theta(candidate_thetas, train_data, exog_data=exog_data)
if model is not None:
self.model = model
return train_result
else:
self.set_theta(self.model, theta, train_data)
train_data = train_data.to_pd() if self.model._pandas_train else train_data
exog_data = exog_data.to_pd() if exog_data is not None and self.model._pandas_train else exog_data
if exog_data is None:
return self.model._train(train_data, train_config=train_config)
else:
return self.model._train_with_exog(train_data, train_config=train_config, exog_data=exog_data)
def _train(self, train_data: TimeSeries, train_config=None):
return self._train_with_exog(train_data, train_config=train_config, exog_data=None)
@abstractmethod
def generate_theta(self, train_data: TimeSeries) -> Iterator:
r"""
:param train_data: Pre-processed training data to use for generation of hyperparameters :math:`\theta`
Returns an iterator of hyperparameter candidates for consideration with th underlying model.
"""
raise NotImplementedError
@abstractmethod
def evaluate_theta(
self, thetas: Iterator, train_data: TimeSeries, train_config=None, exog_data: TimeSeries = None
) -> Tuple[Any, Optional[ModelBase], Optional[Tuple[TimeSeries, Optional[TimeSeries]]]]:
r"""
:param thetas: Iterator of the hyperparameter candidates
:param train_data: Pre-processed training data
:param train_config: Training configuration
Return the optimal hyperparameter, as well as optionally a model and result of the training procedure.
"""
raise NotImplementedError
@abstractmethod
def set_theta(self, model, theta, train_data: TimeSeries = None):
r"""
:param model: Underlying base model to which the new theta is applied
:param theta: Hyperparameter to apply
:param train_data: Pre-processed training data (Optional)
Sets the hyperparameter to the provided ``model``. This is used to apply the :math:`\theta` to the model, since
this behavior is custom to every model. Oftentimes in internal implementations, ``model`` is the optimal model.
"""
raise NotImplementedError
class InformationCriterion(Enum):
AIC = auto()
r"""
Akaike information criterion. Computed as
.. math::
\mathrm{AIC} = 2k - 2\mathrm{ln}(L)
where k is the number of parameters, and L is the model's likelihood.
"""
BIC = auto()
r"""
Bayesian information criterion. Computed as
.. math::
k \mathrm{ln}(n) - 2 \mathrm{ln}(L)
where n is the sample size, k is the number of parameters, and L is the model's likelihood.
"""
AICc = auto()
r"""
Akaike information criterion with correction for small sample size. Computed as
.. math::
\mathrm{AICc} = \mathrm{AIC} + \frac{2k^2 + 2k}{n - k - 1}
where n is the sample size, and k is the number of paramters.
"""
class ICConfig(Config):
"""
Mix-in to add an information criterion parameter to a model config.
"""
def __init__(self, information_criterion: InformationCriterion = InformationCriterion.AIC, **kwargs):
"""
:param information_criterion: information criterion to select the best model.
"""
super().__init__(**kwargs)
self.information_criterion = information_criterion
@property
def information_criterion(self):
return self._information_criterion
@information_criterion.setter
def information_criterion(self, ic: Union[InformationCriterion, str]):
if not isinstance(ic, InformationCriterion):
valid = {k.lower(): k for k in InformationCriterion.__members__}
assert ic.lower() in valid, f"Unsupported InformationCriterion {ic}. Supported values: {valid.values()}"
ic = InformationCriterion[valid[ic.lower()]]
self._information_criterion = ic
class ICAutoMLForecaster(AutoMLMixIn, ForecasterBase, metaclass=AutodocABCMeta):
"""
AutoML model which uses an information criterion to determine which model paramters are best.
"""
config_class = ICConfig
@property
def information_criterion(self):
return self.config.information_criterion
@abstractmethod
def get_ic(
self, model, train_data: pd.DataFrame, train_result: Tuple[pd.DataFrame, Optional[pd.DataFrame]]
) -> float:
"""
Returns the information criterion of the model based on the given training data & the model's train result.
:param model: One of the models being tried. Must be trained.
:param train_data: The target sequence of the training data as a ``pandas.DataFrame``.
:param train_result: The result of calling ``model._train()``.
:return: The information criterion evaluating the model's goodness of fit.
"""
raise NotImplementedError
@abstractmethod
def _model_name(self, theta) -> str:
"""
:return: a string describing the current model.
"""
raise NotImplementedError
def evaluate_theta(
self, thetas: Iterator, train_data: TimeSeries, train_config=None, exog_data: TimeSeries = None
) -> Tuple[Any, ModelBase, Tuple[TimeSeries, Optional[TimeSeries]]]:
best = None
y = train_data.to_pd() if self.model._pandas_train else train_data
y_exog = exog_data.to_pd() if exog_data is not None and self.model._pandas_train else exog_data
y_target = pd.DataFrame(y[self.model.target_name])
for theta in thetas:
# Start timer & fit model using the current theta
start = time.time()
model = deepcopy(self.model)
self.set_theta(model, theta, train_data)
if exog_data is None:
train_result = model._train(y, train_config=train_config)
else:
train_result = model._train_with_exog(y, train_config=train_config, exog_data=y_exog)
fit_time = time.time() - start
ic = float(self.get_ic(model=model, train_data=y_target, train_result=train_result))
logger.debug(f"{self._model_name(theta)}: {self.information_criterion.name}={ic:.3f}, Time={fit_time:.2f}s")
# Determine if current model is better than the best seen yet
curr = {"theta": theta, "model": model, "train_result": train_result, "ic": ic}
if best is None:
best = curr
logger.debug("First best model found (%.3f)" % ic)
current_ic = best["ic"]
if ic < current_ic:
logger.debug("New best model found (%.3f < %.3f)" % (ic, current_ic))
best = curr
# Return best model after post-processing its train result
theta, model, train_result = best["theta"], best["model"], best["train_result"]
logger.info(f"Best model: {self._model_name(theta)}")
return theta, model, train_result | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/automl/base.py | 0.932967 | 0.466299 | base.py | pypi |
from copy import copy, deepcopy
import logging
from typing import Any, Iterator, Optional, Tuple, Union
import numpy as np
from merlion.models.automl.seasonality import PeriodicityStrategy, SeasonalityConfig, SeasonalityLayer
from merlion.models.forecast.sarima import Sarima
from merlion.models.utils import autosarima_utils
from merlion.transform.resample import TemporalResample
from merlion.utils import TimeSeries, UnivariateTimeSeries
logger = logging.getLogger(__name__)
# FIXME: convert to information criterion version
class AutoSarimaConfig(SeasonalityConfig):
"""
Configuration class for `AutoSarima`. Acts as a wrapper around a `Sarima` model, which automatically detects
the seasonality, (seasonal) differencing order, and (seasonal) AR/MA orders. If a non-numeric value is specified
for any of the relevant parameters in the order or seasonal order, we assume that the user wishes to detect that
parameter automatically.
.. note::
The automatic selection of AR, MA, seasonal AR, and seasonal MA parameters is implemented in a coupled way.
The user must specify all of these parameters explicitly to avoid automatic selection.
"""
_default_transform = TemporalResample()
def __init__(
self,
model: Union[Sarima, dict] = None,
auto_seasonality: bool = True,
periodicity_strategy: PeriodicityStrategy = PeriodicityStrategy.ACF,
auto_pqPQ: bool = True,
auto_d: bool = True,
auto_D: bool = True,
maxiter: int = None,
max_k: int = 100,
max_dur: float = 3600,
approximation: bool = None,
approx_iter: int = None,
**kwargs,
):
"""
:param auto_seasonality: Whether to automatically detect the seasonality.
:param periodicity_strategy: Periodicity Detection Strategy.
:param auto_pqPQ: Whether to automatically choose AR/MA orders ``p, q`` and seasonal AR/MA orders ``P, Q``.
:param auto_d: Whether to automatically choose the difference order ``d``.
:param auto_D: Whether to automatically choose the seasonal difference order ``D``.
:param maxiter: The maximum number of iterations to perform
:param max_k: Maximum number of models considered in the stepwise search
:param max_dur: Maximum training time considered in the stepwise search
:param approximation: Whether to use ``approx_iter`` iterations (instead
of ``maxiter``) to speed up computation. If ``None``, we use
approximation mode when the training data is too long (>150), or when
the length off the period is too high (``periodicity > 12``).
:param approx_iter: The number of iterations to perform in approximation mode
"""
if model is None:
model = dict(name="Sarima", transform=dict(name="Identity"))
super().__init__(model=model, periodicity_strategy=periodicity_strategy, **kwargs)
p, d, q = self.order
P, D, Q, m = self.seasonal_order
self.auto_seasonality = auto_seasonality or not isinstance(m, (int, float))
self.auto_pqPQ = auto_pqPQ or any(not isinstance(x, (int, float)) for x in (p, q, P, Q))
self.auto_d = auto_d or not isinstance(d, (int, float))
self.auto_D = auto_D or not isinstance(D, (int, float))
self.maxiter = maxiter
self.max_k = max_k
self.max_dur = max_dur
self.approximation = approximation
self.approx_iter = approx_iter
@property
def order(self):
return self.model.order
@property
def seasonal_order(self):
return self.model.seasonal_order
class AutoSarima(SeasonalityLayer):
config_class = AutoSarimaConfig
@property
def supports_exog(self):
return True
def _generate_sarima_parameters(self, train_data: TimeSeries) -> dict:
y = train_data.univariates[self.target_name].np_values
order = list(self.config.order)
seasonal_order = list(self.config.seasonal_order)
approximation = self.config.approximation
maxiter = self.config.maxiter
approx_iter = self.config.approx_iter
max_k = self.config.max_k
max_dur = self.config.max_dur
# These should be set in config
stationary = False
seasonal_test = "seas"
method = "lbfgs"
test = "kpss"
stepwise = True
max_d = 2
start_p = 2
max_p = 5
start_q = 2
max_q = 5
max_D = 1
start_P = 1
max_P = 2
start_Q = 1
max_Q = 2
relative_improve = 0
trend = None
information_criterion = "aic"
# auto-detect seasonality if desired, otherwise just get it from seasonal order
if self.config.auto_seasonality:
candidate_m = super().generate_theta(train_data=train_data)
m, _, _ = super().evaluate_theta(thetas=candidate_m, train_data=train_data)
else:
m = max(1, seasonal_order[-1])
# adjust max p,q,P,Q start p,q,P,Q
n_samples = len(y)
max_p = int(min(max_p, np.floor(n_samples / 3)))
max_q = int(min(max_q, np.floor(n_samples / 3)))
max_P = int(min(max_P, np.floor(n_samples / 3 / m)))
max_Q = int(min(max_Q, np.floor(n_samples / 3 / m)))
start_p = min(start_p, max_p)
start_q = min(start_q, max_q)
start_P = min(start_P, max_Q)
start_Q = min(start_Q, max_Q)
# set the seasonal differencing order with statistical test
D = None if self.config.auto_D else seasonal_order[1]
D = 0 if m == 1 else D
xx = y.copy()
if stationary:
D = 0
elif D is None:
D = autosarima_utils.nsdiffs(xx, m=m, max_D=max_D, test=seasonal_test)
if D > 0:
dx = autosarima_utils.diff(xx, differences=D, lag=m)
if dx.shape[0] == 0:
D = D - 1
dx = autosarima_utils.diff(xx, differences=D, lag=m) if D > 0 else xx
logger.info(f"Seasonal difference order is {str(D)}")
# set the differencing order by estimating the number of orders
# it would take in order to make the time series stationary
d = autosarima_utils.ndiffs(dx, alpha=0.05, max_d=max_d, test=test) if self.config.auto_d else order[1]
if stationary:
d = 0
if d > 0:
dx = autosarima_utils.diff(dx, differences=d, lag=1)
logger.info(f"Difference order is {str(d)}")
# pqPQ is an indicator about whether need to automatically select
# AR, MA, seasonal AR and seasonal MA parameters
pqPQ = not self.config.auto_pqPQ
# automatically detect whether to use approximation method and the periodicity
if approximation is None:
approximation = (y.shape[0] > 150) or (m > 12)
# check the size of y
n_samples = y.shape[0]
if n_samples <= 3:
information_criterion = "aic"
if m > 1:
if max_P > 0:
max_p = min(max_p, m - 1)
if max_Q > 0:
max_q = min(max_q, m - 1)
if (d + D) in (0, 1):
trend = "c"
if n_samples < 10:
start_p = min(start_p, 1)
start_q = min(start_q, 1)
start_P = start_Q = 0
# seed p, q, P, Q vals
p = min(start_p, max_p)
q = min(start_q, max_q)
P = min(start_P, max_P)
Q = min(start_Q, max_Q)
refititer = maxiter
return_dict = dict(
y=y,
p=p,
d=d,
q=q,
P=P,
D=D,
Q=Q,
m=m,
dx=dx,
pqPQ=pqPQ,
max_p=max_p,
max_d=max_d,
max_q=max_q,
max_P=max_P,
max_D=max_D,
max_Q=max_Q,
trend=trend,
method=method,
maxiter=maxiter,
information_criterion=information_criterion,
relative_improve=relative_improve,
approximation=approximation,
max_k=max_k,
max_dur=max_dur,
approx_iter=approx_iter,
refititer=refititer,
stepwise=stepwise,
order=order,
seasonal_order=seasonal_order,
)
return return_dict
def generate_theta(self, train_data: TimeSeries) -> Iterator:
"""
generate [action, theta]. action is an indicator for stepwise seach (stepwsie) of
p, q, P, Q, trend parameters or use a predefined parameter combination (pqPQ)
theta is a list of parameter combination [order, seasonal_order, trend]
"""
val_dict = self._generate_sarima_parameters(train_data)
y = val_dict["y"]
pqPQ = val_dict["pqPQ"]
order = val_dict["order"]
seasonal_order = val_dict["seasonal_order"]
d = val_dict["d"]
D = val_dict["D"]
m = val_dict["m"]
dx = val_dict["dx"]
stepwise = val_dict["stepwise"]
action = None
trend = None
# input time-series is completely constant
if np.max(y) == np.min(y):
order = [0, 0, 0]
seasonal_order = [0, 0, 0, 0]
elif pqPQ:
action = "pqPQ"
order[1] = d
seasonal_order[1] = D
seasonal_order[3] = m
if m == 1:
seasonal_order = [0, 0, 0, m]
elif np.max(dx) == np.min(dx):
order = [0, 0, 0]
seasonal_order = (0, 0, 0, m) if m == 1 else (0, D, 0, m)
elif stepwise:
action = "stepwise"
return iter([{"action": action, "theta": [order, seasonal_order, trend], "val_dict": val_dict}])
def evaluate_theta(
self, thetas: Iterator, train_data: TimeSeries, train_config=None, exog_data: TimeSeries = None
) -> Tuple[Any, Optional[Sarima], Optional[Tuple[TimeSeries, Optional[TimeSeries]]]]:
# preprocess
train_config = copy(train_config) if train_config is not None else {}
if exog_data is not None:
train_config["exog"] = exog_data.to_pd()
# read from val_dict
theta_value = next(thetas)
val_dict = theta_value["val_dict"]
y = val_dict["y"]
method = val_dict["method"]
maxiter = val_dict["maxiter"]
information_criterion = val_dict["information_criterion"]
approximation = val_dict["approximation"]
approx_iter = val_dict["approx_iter"]
# use zero model to automatically detect the optimal maxiter
if maxiter is None:
maxiter = autosarima_utils.detect_maxiter_sarima_model(**val_dict)
if theta_value["action"] == "stepwise":
refititer = maxiter
if approximation:
if approx_iter is None:
maxiter = max(int(maxiter / 5), 1)
else:
maxiter = approx_iter
logger.info(f"Fitting models using approximations(approx_iter is {str(maxiter)}) to speed things up")
train_config["maxiter"] = maxiter
# stepwise search
stepwise_search = autosarima_utils._StepwiseFitWrapper(**{**val_dict, **train_config})
filtered_models_ics = stepwise_search.stepwisesearch()
if approximation:
logger.debug(f"Now re-fitting the best model(s) without approximations...")
if len(filtered_models_ics) > 0:
best_model_theta = filtered_models_ics[0][1]
best_model_fit = autosarima_utils._refit_sarima_model(
filtered_models_ics[0][0],
filtered_models_ics[0][2],
method,
maxiter,
refititer,
information_criterion,
)
logger.info(f"Best model: {autosarima_utils._model_name(best_model_fit.model)}")
else:
raise ValueError("Could not successfully fit a viable SARIMA model")
else:
if len(filtered_models_ics) > 0:
best_model_fit = filtered_models_ics[0][0]
best_model_theta = filtered_models_ics[0][1]
logger.info(f"Best model: {autosarima_utils._model_name(best_model_fit.model)}")
else:
raise ValueError("Could not successfully fit a viable SARIMA model")
elif theta_value["action"] == "pqPQ":
best_model_theta = theta_value["theta"]
order, seasonal_order, trend = theta_value["theta"]
if seasonal_order[3] == 1:
seasonal_order = [0, 0, 0, 0]
best_model_fit, fit_time, ic = autosarima_utils._fit_sarima_model(
y=y,
order=order,
seasonal_order=seasonal_order,
trend=trend,
method=method,
maxiter=maxiter,
information_criterion=information_criterion,
**train_config,
)
else:
return theta_value["theta"], None, None
# FIXME: model._last_val is not set correctly here
model = deepcopy(self.model)
self.set_theta(model, best_model_theta, train_data)
model.model = best_model_fit
name = model.target_name
times = train_data.univariates[name].index
yhat = model.model.fittedvalues
err = [np.sqrt(model.model.params[-1])] * len(yhat)
train_result = (
UnivariateTimeSeries(times, yhat, name).to_ts(),
UnivariateTimeSeries(times, err, f"{name}_err").to_ts(),
)
return best_model_theta, model, train_result
def set_theta(self, model, theta, train_data: TimeSeries = None):
order, seasonal_order, trend = theta
model.config.order = order
model.config.seasonal_order = seasonal_order | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/automl/autosarima.py | 0.877628 | 0.561034 | autosarima.py | pypi |
from collections import OrderedDict
import logging
from typing import Iterator, Tuple, Union
import numpy as np
import pandas as pd
from scipy.stats import norm
from merlion.models.automl.base import InformationCriterion, ICConfig, ICAutoMLForecaster
from merlion.models.automl.search import GridSearch
from merlion.models.automl.seasonality import PeriodicityStrategy, SeasonalityConfig, SeasonalityLayer
from merlion.models.forecast.prophet import Prophet
from merlion.utils import TimeSeries
logger = logging.getLogger(__name__)
class AutoProphetConfig(SeasonalityConfig, ICConfig):
"""
Config class for `Prophet` with automatic seasonality detection & other hyperparameter selection.
"""
def __init__(
self,
model: Union[Prophet, dict] = None,
periodicity_strategy: Union[PeriodicityStrategy, str] = PeriodicityStrategy.All,
information_criterion: InformationCriterion = InformationCriterion.AIC,
**kwargs,
):
model = dict(name="Prophet") if model is None else model
super().__init__(
model=model,
periodicity_strategy=periodicity_strategy,
information_criterion=information_criterion,
**kwargs,
)
@property
def multi_seasonality(self):
"""
:return: ``True`` because Prophet supports multiple seasonality.
"""
return True
class AutoProphet(ICAutoMLForecaster, SeasonalityLayer):
"""
`Prophet` with automatic seasonality detection. Automatically detects and adds
additional seasonalities that the existing Prophet may not detect (e.g. hourly).
Also automatically chooses other hyperparameters.
"""
config_class = AutoProphetConfig
@property
def supports_exog(self):
return True
def generate_theta(self, train_data: TimeSeries) -> Iterator:
seas = list(super().generate_theta(train_data))
modes = ["additive", "multiplicative"]
return iter(GridSearch(param_values=OrderedDict(seas=[seas], seasonality_mode=modes)))
def set_theta(self, model, theta, train_data: TimeSeries = None):
seasonalities, seasonality_mode = theta["seas"], theta["seasonality_mode"]
seasonalities, _, _ = SeasonalityLayer.evaluate_theta(self, thetas=iter(seasonalities), train_data=train_data)
SeasonalityLayer.set_theta(self, model=model, theta=seasonalities, train_data=train_data)
model.base_model.config.seasonality_mode = seasonality_mode
model.base_model.model.seasonality_mode = seasonality_mode
def _model_name(self, theta) -> str:
return f"Prophet({','.join(f'{k}={v}' for k, v in theta.items())})"
def get_ic(self, model, train_data: pd.DataFrame, train_result: Tuple[pd.DataFrame, pd.DataFrame]) -> float:
pred, stderr = train_result
n = len(train_data)
log_like = norm.logpdf((pred.values - train_data.values) / stderr.values).sum()
n_params = sum(len(v.flatten()) for k, v in model.base_model.model.params.items() if k != "trend")
ic_id = self.config.information_criterion
if ic_id is InformationCriterion.AIC:
return 2 * n_params - 2 * log_like.sum()
elif ic_id is InformationCriterion.BIC:
return n_params * np.log(n) - 2 * log_like
elif ic_id is InformationCriterion.AICc:
return 2 * n_params - 2 * log_like + (2 * n_params * (n_params + 1)) / max(1, n - n_params - 1)
else:
raise ValueError(f"{type(self.model).__name__} doesn't support information criterion {ic_id.name}") | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/automl/autoprophet.py | 0.909116 | 0.388386 | autoprophet.py | pypi |
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
class TimeFeature:
def __init__(self):
pass
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
pass
def __repr__(self):
return self.__class__.__name__ + "()"
class SecondOfMinute(TimeFeature):
"""
Second of minute encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.second / 59.0 - 0.5
class MinuteOfHour(TimeFeature):
"""
Minute of hour encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.minute / 59.0 - 0.5
class HourOfDay(TimeFeature):
"""
Hour of day encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.hour / 23.0 - 0.5
class DayOfWeek(TimeFeature):
"""
Day of week encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.dayofweek / 6.0 - 0.5
class DayOfMonth(TimeFeature):
"""
Day of month encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.day - 1) / 30.0 - 0.5
class DayOfYear(TimeFeature):
"""
Day of year encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.dayofyear - 1) / 365.0 - 0.5
class MonthOfYear(TimeFeature):
"""
Month of year encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.month - 1) / 11.0 - 0.5
class WeekOfYear(TimeFeature):
"""
Week of year encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.isocalendar().week - 1) / 52.0 - 0.5
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
:param freq_str: Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
:return: a list of time features that will be appropriate for the given frequency string.
"""
features_by_offsets = {
offsets.YearEnd: [],
offsets.QuarterEnd: [MonthOfYear],
offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
offsets.Minute: [
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
offsets.Second: [
SecondOfMinute,
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
}
offset = to_offset(freq_str)
for offset_type, feature_classes in features_by_offsets.items():
if isinstance(offset, offset_type):
return [cls() for cls in feature_classes]
supported_freq_msg = f"""
Unsupported frequency {freq_str}
The following frequencies are supported:
Y - yearly
alias: A
M - monthly
W - weekly
D - daily
B - business days
H - hourly
T - minutely
alias: min
S - secondly
"""
raise RuntimeError(supported_freq_msg)
def get_time_features(dates: pd.DatetimeIndex, ts_encoding: str = "h"):
"""
Convert pandas Datetime to numerical vectors that can be used for training
"""
features = np.vstack([feat(dates) for feat in time_features_from_frequency_str(ts_encoding)])
return features.transpose(1, 0) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/utils/time_features.py | 0.937376 | 0.573858 | time_features.py | pypi |
import logging
import math
import numpy as np
from typing import Optional, Union
import pandas as pd
from merlion.utils.time_series import TimeSeries, to_pd_datetime
from merlion.models.utils.time_features import get_time_features
logger = logging.getLogger(__name__)
class RollingWindowDataset:
def __init__(
self,
data: Union[TimeSeries, pd.DataFrame],
target_seq_index: Optional[int],
n_past: int,
n_future: int,
exog_data: Union[TimeSeries, pd.DataFrame] = None,
shuffle: bool = False,
ts_index: bool = False,
batch_size: Optional[int] = 1,
flatten: bool = True,
ts_encoding: Union[None, str] = None,
valid_fraction: float = 0.0,
validation: Union[bool, None] = False,
seed: int = 0,
):
"""
A rolling window dataset which returns ``(past, future)`` windows for the whole time series.
If ``ts_index=True`` is used, a batch size of 1 is employed, and each window returned by the dataset is
``(past, future)``, where ``past`` and ``future`` are both `TimeSeries` objects.
If ``ts_index=False`` is used (default option, more efficient), each window returned by the dataset is
``(past_np, past_time, future_np, future_time)``:
- ``past_np`` is a numpy array with shape ``(batch_size, n_past * dim)`` if ``flatten`` is ``True``, otherwise
``(batch_size, n_past, dim)``.
- ``past_time`` is a numpy array of times with shape ``(batch_size, n_past)``
- ``future_np`` is a numpy array with shape ``(batch_size, dim)`` if ``target_seq_index`` is ``None``
(autoregressive prediction), or shape ``(batch_size, n_future)`` if ``target_seq_index`` is specified.
- ``future_time`` is a numpy array of times with shape ``(batch_size, n_future)``
:param data: time series data in the format of TimeSeries or pandas DataFrame with DatetimeIndex
:param target_seq_index: The index of the univariate (amongst all univariates in a general multivariate time
series) whose value we would like to use for the future labeling. If ``target_seq_index = None``, it implies
that all the sequences are required for the future labeling. In this case, we set ``n_future = 1`` and
use the time series for 1-step autoregressive prediction.
:param n_past: number of steps for past
:param n_future: number of steps for future. If ``target_seq_index = None``, we manually set ``n_future = 1``.
:param exog_data: exogenous data to as inputs for the model, but not as outputs to predict.
We assume the future values of exogenous variables are known a priori at test time.
:param shuffle: whether the windows of the time series should be shuffled.
:param ts_index: keep original TimeSeries internally for all the slicing, and output TimeSeries.
by default, Numpy array will handle the internal data workflow and Numpy array will be the output.
:param batch_size: the number of windows to return in parallel. If ``None``, return the whole dataset.
:param flatten: whether the output time series arrays should be flattened to 2 dimensions.
:param ts_encoding: whether the timestamp should be encoded to a float vector, which can be used
for training deep learning based time series models; if ``None``, the timestamp is not encoded.
If not ``None``, it represents the frequency for time features encoding options:[s:secondly, t:minutely, h:hourly,
d:daily, b:business days, w:weekly, m:monthly]
:param valid_fraction: Fraction of validation set splitted from training data. if ``valid_fraction = 0``
or ``valid_fraction = 1``, we iterate over the entire dataset.
:param validation: Whether the data is from the validation set or not. if ``validation = None``, we iterate over
the entire dataset.
"""
assert isinstance(
data, (TimeSeries, pd.DataFrame)
), "RollingWindowDataset expects to receive TimeSeries or pd.DataFrame data "
if isinstance(data, TimeSeries):
data = data.align()
self.dim = data.dim
if exog_data is not None:
assert isinstance(exog_data, TimeSeries), "Expected exog_data to be TimeSeries if data is TimeSeries"
exog_data = exog_data.align(reference=data.time_stamps)
else:
assert isinstance(data.index, pd.DatetimeIndex)
if exog_data is not None:
if isinstance(exog_data, TimeSeries):
exog_data = exog_data.align(reference=data.index).to_pd()
assert isinstance(exog_data.index, pd.DatetimeIndex) and list(exog_data.index) == list(data.index)
assert ts_index is False, "Only TimeSeries data support ts_index = True "
self.dim = data.shape[1]
if ts_index and batch_size != 1:
logger.warning("Setting batch_size = 1 because ts_index=True.")
batch_size = 1
self.batch_size = batch_size
self.n_past = n_past
self.shuffle = shuffle
self.flatten = flatten
self.ts_encoding = ts_encoding
self.target_seq_index = target_seq_index
self.n_future = n_future
self.ts_index = ts_index
if ts_index:
self.data = data.concat(exog_data, axis=1) if exog_data is not None else data
self.target = (
data if self.target_seq_index is None else data.univariates[data.names[target_seq_index]].to_ts()
)
self.timestamp = to_pd_datetime(data.np_time_stamps)
else:
df = data.to_pd() if isinstance(data, TimeSeries) else data
exog_df = data.to_pd() if isinstance(exog_data, TimeSeries) else exog_data
if exog_data is not None:
if n_future > 0:
exog_vals = np.concatenate((exog_df.values[1:], np.full((1, exog_df.shape[1]), np.nan)))
else:
exog_vals = exog_df.values
self.data = np.concatenate((df.values, exog_vals), axis=1)
self.data = np.concatenate((df.values, exog_df.values), axis=1) if exog_data is not None else df.values
self.timestamp = df.index
self.target = df.values if self.target_seq_index is None else df.values[:, target_seq_index]
if self.ts_encoding:
self.timestamp = get_time_features(self.timestamp, self.ts_encoding)
self._seed = seed
self._valid = validation
self.valid_fraction = valid_fraction
if valid_fraction <= 0.0 or valid_fraction >= 1.0 or (self.validation is None):
n_train = self.n_windows
else:
n_train = self.n_windows - math.ceil(self.n_windows * self.valid_fraction)
data_indices = np.arange(self.n_windows)
# use seed 0 to perturb the dataset
if shuffle:
data_indices = np.random.RandomState(seed).permutation(data_indices)
self.train_indices = data_indices[:n_train]
self.valid_indices = data_indices[n_train:]
@property
def validation(self):
"""
If set ``False``, we only provide access to the training windows; if set ``True``,
we only provide access to the validation windows. if set``None``, we iterate over
the entire dataset.
"""
return self._valid
@validation.setter
def validation(self, valid: bool):
self._valid = valid
@property
def seed(self):
"""
Set Random seed to perturb the training data
"""
return self._seed
@seed.setter
def seed(self, seed: int):
"""
Set Random seed to perturb the training data
"""
self._seed = seed
@property
def n_windows(self):
"""
Number of total slides windows
"""
return len(self.data) - self.n_past - self.n_future + 1
@property
def n_valid(self):
"""
Number of slides windows in validation set
"""
return len(self.valid_indices)
@property
def n_train(self):
"""
Number of slides windows in training set
"""
return len(self.train_indices)
@property
def n_points(self):
n_train, n_valid = self.n_train, self.n_valid
return n_train + n_valid if self.validation is None else n_valid if self.validation else n_train
def __len__(self):
return int(np.ceil(self.n_points / self.batch_size)) if self.batch_size is not None else 1
def __iter__(self):
batch = []
if self.validation is None:
order = sorted(np.concatenate((self.train_indices, self.valid_indices)))
if self.shuffle:
order = np.random.RandomState(self.seed).permutation(order)
elif self.validation:
order = self.valid_indices
elif self.shuffle and self.batch_size is not None:
order = np.random.RandomState(self.seed).permutation(self.train_indices)
else:
order = self.train_indices
for i in order:
batch.append(self[i])
if self.batch_size is not None and len(batch) >= self.batch_size:
yield self.collate_batch(batch)
batch = []
if len(batch) > 0:
yield self.collate_batch(batch)
def collate_batch(self, batch):
if self.ts_index:
return batch[0]
# TODO: allow output shape to be specified as class parameter
past, past_ts, future, future_ts = zip(*batch)
past = np.stack(past)
past_ts = np.stack(past_ts)
if self.flatten:
past = past.reshape((len(batch), -1))
past_ts = past_ts.reshape((len(batch), -1)) if self.ts_encoding else past_ts
if future is not None:
future = np.stack(future)
future = future.reshape((len(batch), -1)) if self.flatten else future
future_ts = np.stack(future_ts)
if self.flatten and self.ts_encoding:
future_ts = future_ts.reshape((len(batch), -1))
else:
future, future_ts = None, None
return past, past_ts, future, future_ts
def __getitem__(self, idx):
if self.validation is None:
assert 0 <= idx < self.n_points
elif self.validation:
assert idx in self.valid_indices
else:
assert idx in self.train_indices
past_start = idx
past_end = past_start + self.n_past
future_start = past_end
future_end = future_start + self.n_future
past = self.data[past_start:past_end]
past_timestamp = self.timestamp[past_start:past_end]
future = self.target[future_start:future_end]
future_timestamp = self.timestamp[future_start:future_end]
return (past, future) if self.ts_index else (past, past_timestamp, future, future_timestamp) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/utils/rolling_window_dataset.py | 0.911613 | 0.579847 | rolling_window_dataset.py | pypi |
import os
import math
import random
import numpy as np
try:
import torch
import torch.nn as nn
import torch.fft as fft
import torch.nn.functional as F
from einops import rearrange, reduce, repeat
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
from merlion.models.utils.nn_modules.layers import GrowthLayer, FourierLayer, LevelLayer, DampingLayer, MLPLayer
class EncoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
c_out,
seq_len,
pred_len,
k,
dim_feedforward=None,
dropout=0.1,
activation="sigmoid",
layer_norm_eps=1e-5,
output_attention=False,
):
super().__init__()
self.d_model = d_model
self.nhead = nhead
self.c_out = c_out
self.seq_len = seq_len
self.pred_len = pred_len
dim_feedforward = dim_feedforward or 4 * d_model
self.dim_feedforward = dim_feedforward
self.growth_layer = GrowthLayer(d_model, nhead, dropout=dropout, output_attention=output_attention)
self.seasonal_layer = FourierLayer(d_model, pred_len, k=k, output_attention=output_attention)
self.level_layer = LevelLayer(d_model, c_out, dropout=dropout)
# Implementation of Feedforward model
self.ff = MLPLayer(d_model, dim_feedforward, dropout=dropout, activation=activation)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def forward(self, res, level, attn_mask=None):
season, season_attn = self._season_block(res)
res = res - season[:, : -self.pred_len]
growth, growth_attn = self._growth_block(res)
res = self.norm1(res - growth[:, 1:])
res = self.norm2(res + self.ff(res))
level = self.level_layer(level, growth[:, :-1], season[:, : -self.pred_len])
return res, level, growth, season, season_attn, growth_attn
def _growth_block(self, x):
x, growth_attn = self.growth_layer(x)
return self.dropout1(x), growth_attn
def _season_block(self, x):
x, season_attn = self.seasonal_layer(x)
return self.dropout2(x), season_attn
class Encoder(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = nn.ModuleList(layers)
def forward(self, res, level, attn_mask=None):
growths = []
seasons = []
season_attns = []
growth_attns = []
for layer in self.layers:
res, level, growth, season, season_attn, growth_attn = layer(res, level, attn_mask=None)
growths.append(growth)
seasons.append(season)
season_attns.append(season_attn)
growth_attns.append(growth_attn)
return level, growths, seasons, season_attns, growth_attns
class DecoderLayer(nn.Module):
def __init__(self, d_model, nhead, c_out, pred_len, dropout=0.1, output_attention=False):
super().__init__()
self.d_model = d_model
self.nhead = nhead
self.c_out = c_out
self.pred_len = pred_len
self.output_attention = output_attention
self.growth_damping = DampingLayer(pred_len, nhead, dropout=dropout, output_attention=output_attention)
self.dropout1 = nn.Dropout(dropout)
def forward(self, growth, season):
growth_horizon, growth_damping = self.growth_damping(growth[:, -1:])
growth_horizon = self.dropout1(growth_horizon)
seasonal_horizon = season[:, -self.pred_len :]
if self.output_attention:
return growth_horizon, seasonal_horizon, growth_damping
return growth_horizon, seasonal_horizon, None
class Decoder(nn.Module):
def __init__(self, layers):
super().__init__()
self.d_model = layers[0].d_model
self.c_out = layers[0].c_out
self.pred_len = layers[0].pred_len
self.nhead = layers[0].nhead
self.layers = nn.ModuleList(layers)
self.pred = nn.Linear(self.d_model, self.c_out)
def forward(self, growths, seasons):
growth_repr = []
season_repr = []
growth_dampings = []
for idx, layer in enumerate(self.layers):
growth_horizon, season_horizon, growth_damping = layer(growths[idx], seasons[idx])
growth_repr.append(growth_horizon)
season_repr.append(season_horizon)
growth_dampings.append(growth_damping)
growth_repr = sum(growth_repr)
season_repr = sum(season_repr)
return self.pred(growth_repr), self.pred(season_repr), growth_dampings | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/utils/nn_modules/enc_dec_etsformer.py | 0.915422 | 0.241512 | enc_dec_etsformer.py | pypi |
try:
import torch
import torch.nn as nn
import torch.fft as fft
import torch.nn.functional as F
from einops import rearrange, reduce, repeat
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
import numpy as np
import math
from math import sqrt
import os
from scipy.fftpack import next_fast_len
from merlion.models.utils.nn_modules.blocks import ExponentialSmoothing, conv1d_fft
class ConvLayer(nn.Module):
def __init__(self, c_in):
super(ConvLayer, self).__init__()
self.downConv = nn.Conv1d(
in_channels=c_in, out_channels=c_in, kernel_size=3, padding=2, padding_mode="circular"
)
self.norm = nn.BatchNorm1d(c_in)
self.activation = nn.ELU()
self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.downConv(x.permute(0, 2, 1))
x = self.norm(x)
x = self.activation(x)
x = self.maxPool(x)
x = x.transpose(1, 2)
return x
class MLPLayer(nn.Module):
def __init__(self, d_model, dim_feedforward, dropout=0.1, activation="sigmoid"):
# Implementation of Feedforward model
super().__init__()
self.linear1 = nn.Linear(d_model, dim_feedforward, bias=False)
self.dropout1 = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model, bias=False)
self.dropout2 = nn.Dropout(dropout)
self.activation = getattr(torch, activation)
def forward(self, x):
x = self.linear2(self.dropout1(self.activation(self.linear1(x))))
return self.dropout2(x)
class AutoCorrelationLayer(nn.Module):
def __init__(self, correlation, d_model, n_heads, d_keys=None, d_values=None):
super(AutoCorrelationLayer, self).__init__()
d_keys = d_keys or (d_model // n_heads)
d_values = d_values or (d_model // n_heads)
self.inner_correlation = correlation
self.query_projection = nn.Linear(d_model, d_keys * n_heads)
self.key_projection = nn.Linear(d_model, d_keys * n_heads)
self.value_projection = nn.Linear(d_model, d_values * n_heads)
self.out_projection = nn.Linear(d_values * n_heads, d_model)
self.n_heads = n_heads
def forward(self, queries, keys, values, attn_mask):
B, L, _ = queries.shape
_, S, _ = keys.shape
H = self.n_heads
queries = self.query_projection(queries).view(B, L, H, -1)
keys = self.key_projection(keys).view(B, S, H, -1)
values = self.value_projection(values).view(B, S, H, -1)
out, attn = self.inner_correlation(queries, keys, values, attn_mask)
out = out.view(B, L, -1)
return self.out_projection(out), attn
# Attention Layers
class AttentionLayer(nn.Module):
def __init__(self, attention, d_model, n_heads, d_keys=None, d_values=None):
super(AttentionLayer, self).__init__()
d_keys = d_keys or (d_model // n_heads)
d_values = d_values or (d_model // n_heads)
self.inner_attention = attention
self.query_projection = nn.Linear(d_model, d_keys * n_heads)
self.key_projection = nn.Linear(d_model, d_keys * n_heads)
self.value_projection = nn.Linear(d_model, d_values * n_heads)
self.out_projection = nn.Linear(d_values * n_heads, d_model)
self.n_heads = n_heads
def forward(self, queries, keys, values, attn_mask):
B, L, _ = queries.shape
_, S, _ = keys.shape
H = self.n_heads
queries = self.query_projection(queries).view(B, L, H, -1)
keys = self.key_projection(keys).view(B, S, H, -1)
values = self.value_projection(values).view(B, S, H, -1)
out, attn = self.inner_attention(queries, keys, values, attn_mask)
out = out.view(B, L, -1)
return self.out_projection(out), attn
# layers from ETS
class GrowthLayer(nn.Module):
def __init__(self, d_model, nhead, d_head=None, dropout=0.1, output_attention=False):
super().__init__()
self.d_head = d_head or (d_model // nhead)
self.d_model = d_model
self.nhead = nhead
self.output_attention = output_attention
self.z0 = nn.Parameter(torch.randn(self.nhead, self.d_head))
self.in_proj = nn.Linear(self.d_model, self.d_head * self.nhead)
self.es = ExponentialSmoothing(self.d_head, self.nhead, dropout=dropout)
self.out_proj = nn.Linear(self.d_head * self.nhead, self.d_model)
assert self.d_head * self.nhead == self.d_model, "d_model must be divisible by nhead"
def forward(self, inputs):
"""
:param inputs: shape: (batch, seq_len, dim)
:return: shape: (batch, seq_len, dim)
"""
b, t, d = inputs.shape
values = self.in_proj(inputs).view(b, t, self.nhead, -1)
values = torch.cat([repeat(self.z0, "h d -> b 1 h d", b=b), values], dim=1)
values = values[:, 1:] - values[:, :-1]
out = self.es(values)
out = torch.cat([repeat(self.es.v0, "1 1 h d -> b 1 h d", b=b), out], dim=1)
out = rearrange(out, "b t h d -> b t (h d)")
out = self.out_proj(out)
if self.output_attention:
return out, self.es.get_exponential_weight(t)[1]
return out, None
class FourierLayer(nn.Module):
def __init__(self, d_model, pred_len, k=None, low_freq=1, output_attention=False):
super().__init__()
self.d_model = d_model
self.pred_len = pred_len
self.k = k
self.low_freq = low_freq
self.output_attention = output_attention
def forward(self, x):
"""x: (b, t, d)"""
if self.output_attention:
return self.dft_forward(x)
b, t, d = x.shape
x_freq = fft.rfft(x, dim=1)
if t % 2 == 0:
x_freq = x_freq[:, self.low_freq : -1]
f = fft.rfftfreq(t)[self.low_freq : -1]
else:
x_freq = x_freq[:, self.low_freq :]
f = fft.rfftfreq(t)[self.low_freq :]
x_freq, index_tuple = self.topk_freq(x_freq)
f = repeat(f, "f -> b f d", b=x_freq.size(0), d=x_freq.size(2))
f = rearrange(f[index_tuple], "b f d -> b f () d").to(x_freq.device)
return self.extrapolate(x_freq, f, t), None
def extrapolate(self, x_freq, f, t):
x_freq = torch.cat([x_freq, x_freq.conj()], dim=1)
f = torch.cat([f, -f], dim=1)
t_val = rearrange(torch.arange(t + self.pred_len, dtype=torch.float), "t -> () () t ()").to(x_freq.device)
amp = rearrange(x_freq.abs() / t, "b f d -> b f () d")
phase = rearrange(x_freq.angle(), "b f d -> b f () d")
x_time = amp * torch.cos(2 * math.pi * f * t_val + phase)
return reduce(x_time, "b f t d -> b t d", "sum")
def topk_freq(self, x_freq):
values, indices = torch.topk(x_freq.abs(), self.k, dim=1, largest=True, sorted=True)
mesh_a, mesh_b = torch.meshgrid(torch.arange(x_freq.size(0)), torch.arange(x_freq.size(2)))
index_tuple = (mesh_a.unsqueeze(1), indices, mesh_b.unsqueeze(1))
x_freq = x_freq[index_tuple]
return x_freq, index_tuple
def dft_forward(self, x):
T = x.size(1)
dft_mat = fft.fft(torch.eye(T))
i, j = torch.meshgrid(torch.arange(self.pred_len + T), torch.arange(T))
omega = np.exp(2 * math.pi * 1j / T)
idft_mat = (np.power(omega, i * j) / T).cfloat()
x_freq = torch.einsum("ft,btd->bfd", [dft_mat, x.cfloat()])
if T % 2 == 0:
x_freq = x_freq[:, self.low_freq : T // 2]
else:
x_freq = x_freq[:, self.low_freq : T // 2 + 1]
_, indices = torch.topk(x_freq.abs(), self.k, dim=1, largest=True, sorted=True)
indices = indices + self.low_freq
indices = torch.cat([indices, -indices], dim=1)
dft_mat = repeat(dft_mat, "f t -> b f t d", b=x.shape[0], d=x.shape[-1])
idft_mat = repeat(idft_mat, "t f -> b t f d", b=x.shape[0], d=x.shape[-1])
mesh_a, mesh_b = torch.meshgrid(torch.arange(x.size(0)), torch.arange(x.size(2)))
dft_mask = torch.zeros_like(dft_mat)
dft_mask[mesh_a, indices, :, mesh_b] = 1
dft_mat = dft_mat * dft_mask
idft_mask = torch.zeros_like(idft_mat)
idft_mask[mesh_a, :, indices, mesh_b] = 1
idft_mat = idft_mat * idft_mask
attn = torch.einsum("bofd,bftd->botd", [idft_mat, dft_mat]).real
return torch.einsum("botd,btd->bod", [attn, x]), rearrange(attn, "b o t d -> b d o t")
class LevelLayer(nn.Module):
def __init__(self, d_model, c_out, dropout=0.1):
super().__init__()
self.d_model = d_model
self.c_out = c_out
self.es = ExponentialSmoothing(1, self.c_out, dropout=dropout, aux=True)
self.growth_pred = nn.Linear(self.d_model, self.c_out)
self.season_pred = nn.Linear(self.d_model, self.c_out)
def forward(self, level, growth, season):
b, t, _ = level.shape
growth = self.growth_pred(growth).view(b, t, self.c_out, 1)
season = self.season_pred(season).view(b, t, self.c_out, 1)
growth = growth.view(b, t, self.c_out, 1)
season = season.view(b, t, self.c_out, 1)
level = level.view(b, t, self.c_out, 1)
out = self.es(level - season, aux_values=growth)
out = rearrange(out, "b t h d -> b t (h d)")
return out
class DampingLayer(nn.Module):
def __init__(self, pred_len, nhead, dropout=0.1, output_attention=False):
super().__init__()
self.pred_len = pred_len
self.nhead = nhead
self.output_attention = output_attention
self._damping_factor = nn.Parameter(torch.randn(1, nhead))
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = repeat(x, "b 1 d -> b t d", t=self.pred_len)
b, t, d = x.shape
powers = torch.arange(self.pred_len).to(self._damping_factor.device) + 1
powers = powers.view(self.pred_len, 1)
damping_factors = self.damping_factor**powers
damping_factors = damping_factors.cumsum(dim=0)
x = x.view(b, t, self.nhead, -1)
x = self.dropout(x) * damping_factors.unsqueeze(-1)
x = x.view(b, t, d)
if self.output_attention:
return x, damping_factors
return x, None
@property
def damping_factor(self):
return torch.sigmoid(self._damping_factor) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/utils/nn_modules/layers.py | 0.942705 | 0.400984 | layers.py | pypi |
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
import math
class EncoderLayer(nn.Module):
def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation="relu"):
super(EncoderLayer, self).__init__()
d_ff = d_ff or 4 * d_model
self.attention = attention
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = F.relu if activation == "relu" else F.gelu
def forward(self, x, attn_mask=None):
new_x, attn = self.attention(x, x, x, attn_mask=attn_mask)
x = x + self.dropout(new_x)
y = x = self.norm1(x)
y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))
y = self.dropout(self.conv2(y).transpose(-1, 1))
return self.norm2(x + y), attn
class Encoder(nn.Module):
def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
super(Encoder, self).__init__()
self.attn_layers = nn.ModuleList(attn_layers)
self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None
self.norm = norm_layer
def forward(self, x, attn_mask=None):
# x [B, L, D]
attns = []
if self.conv_layers is not None:
for attn_layer, conv_layer in zip(self.attn_layers, self.conv_layers):
x, attn = attn_layer(x, attn_mask=attn_mask)
x = conv_layer(x)
attns.append(attn)
x, attn = self.attn_layers[-1](x)
attns.append(attn)
else:
for attn_layer in self.attn_layers:
x, attn = attn_layer(x, attn_mask=attn_mask)
attns.append(attn)
if self.norm is not None:
x = self.norm(x)
return x, attns
class DecoderLayer(nn.Module):
def __init__(self, self_attention, cross_attention, d_model, d_ff=None, dropout=0.1, activation="relu"):
super(DecoderLayer, self).__init__()
d_ff = d_ff or 4 * d_model
self.self_attention = self_attention
self.cross_attention = cross_attention
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = F.relu if activation == "relu" else F.gelu
def forward(self, x, cross, x_mask=None, cross_mask=None):
x = x + self.dropout(self.self_attention(x, x, x, attn_mask=x_mask)[0])
x = self.norm1(x)
x = x + self.dropout(self.cross_attention(x, cross, cross, attn_mask=cross_mask)[0])
y = x = self.norm2(x)
y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))
y = self.dropout(self.conv2(y).transpose(-1, 1))
return self.norm3(x + y)
class Decoder(nn.Module):
def __init__(self, layers, norm_layer=None, projection=None):
super(Decoder, self).__init__()
self.layers = nn.ModuleList(layers)
self.norm = norm_layer
self.projection = projection
def forward(self, x, cross, x_mask=None, cross_mask=None):
for layer in self.layers:
x = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask)
if self.norm is not None:
x = self.norm(x)
if self.projection is not None:
x = self.projection(x)
return x | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/utils/nn_modules/enc_dec_transformer.py | 0.949024 | 0.362292 | enc_dec_transformer.py | pypi |
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
import math
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=5000):
super(PositionalEmbedding, self).__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer("pe", pe)
def forward(self, x):
return self.pe[:, : x.size(1)]
class TokenEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(TokenEmbedding, self).__init__()
padding = 1 if torch.__version__ >= "1.5.0" else 2
self.tokenConv = nn.Conv1d(
in_channels=c_in, out_channels=d_model, kernel_size=3, padding=padding, padding_mode="circular", bias=False
)
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode="fan_in", nonlinearity="leaky_relu")
def forward(self, x):
x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)
return x
class FixedEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(FixedEmbedding, self).__init__()
w = torch.zeros(c_in, d_model).float()
w.require_grad = False
position = torch.arange(0, c_in).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()
w[:, 0::2] = torch.sin(position * div_term)
w[:, 1::2] = torch.cos(position * div_term)
self.emb = nn.Embedding(c_in, d_model)
self.emb.weight = nn.Parameter(w, requires_grad=False)
def forward(self, x):
return self.emb(x).detach()
class TemporalEmbedding(nn.Module):
def __init__(self, d_model, embed_type="fixed", freq="h"):
super(TemporalEmbedding, self).__init__()
minute_size = 4
hour_size = 24
weekday_size = 7
day_size = 32
month_size = 13
Embed = FixedEmbedding if embed_type == "fixed" else nn.Embedding
if freq == "t":
self.minute_embed = Embed(minute_size, d_model)
self.hour_embed = Embed(hour_size, d_model)
self.weekday_embed = Embed(weekday_size, d_model)
self.day_embed = Embed(day_size, d_model)
self.month_embed = Embed(month_size, d_model)
def forward(self, x):
x = x.long()
minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self, "minute_embed") else 0.0
hour_x = self.hour_embed(x[:, :, 3])
weekday_x = self.weekday_embed(x[:, :, 2])
day_x = self.day_embed(x[:, :, 1])
month_x = self.month_embed(x[:, :, 0])
return hour_x + weekday_x + day_x + month_x + minute_x
class TimeFeatureEmbedding(nn.Module):
def __init__(self, d_model, embed_type="timeF", freq="h"):
super(TimeFeatureEmbedding, self).__init__()
freq_map = {"h": 4, "t": 5, "s": 6, "m": 1, "a": 1, "w": 2, "d": 3, "b": 3}
d_inp = freq_map[freq]
self.embed = nn.Linear(d_inp, d_model, bias=False)
def forward(self, x):
return self.embed(x)
class DataEmbedding(nn.Module):
def __init__(self, c_in, d_model, embed_type="fixed", freq="h", dropout=0.1):
super(DataEmbedding, self).__init__()
self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)
self.position_embedding = PositionalEmbedding(d_model=d_model)
self.temporal_embedding = (
TemporalEmbedding(d_model=d_model, embed_type=embed_type, freq=freq)
if embed_type != "timeF"
else TimeFeatureEmbedding(d_model=d_model, embed_type=embed_type, freq=freq)
)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, x_mark):
x = self.value_embedding(x) + self.temporal_embedding(x_mark) + self.position_embedding(x)
return self.dropout(x)
class DataEmbeddingWoPos(nn.Module):
def __init__(self, c_in, d_model, embed_type="fixed", freq="h", dropout=0.1):
super(DataEmbeddingWoPos, self).__init__()
self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model)
self.position_embedding = PositionalEmbedding(d_model=d_model)
self.temporal_embedding = (
TemporalEmbedding(d_model=d_model, embed_type=embed_type, freq=freq)
if embed_type != "timeF"
else TimeFeatureEmbedding(d_model=d_model, embed_type=embed_type, freq=freq)
)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, x_mark):
x = self.value_embedding(x) + self.temporal_embedding(x_mark)
return self.dropout(x)
class ETSEmbedding(nn.Module):
def __init__(self, c_in, d_model, dropout=0.1):
super().__init__()
self.conv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=2, bias=False)
self.dropout = nn.Dropout(p=dropout)
nn.init.kaiming_normal_(self.conv.weight)
def forward(self, x):
x = self.conv(x.permute(0, 2, 1))[..., :-2]
return self.dropout(x.transpose(1, 2)) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/utils/nn_modules/embed.py | 0.93511 | 0.432663 | embed.py | pypi |
import os
import math
import numpy as np
try:
import torch
import torch.nn as nn
import torch.fft as fft
import torch.nn.functional as F
from einops import rearrange, reduce, repeat
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
from math import sqrt
from scipy.fftpack import next_fast_len
class AutoCorrelation(nn.Module):
"""
AutoCorrelation Mechanism with the following two phases:
(1) period-based dependencies discovery
(2) time delay aggregation
This block can replace the self-attention family mechanism seamlessly.
"""
def __init__(self, mask_flag=True, factor=1, scale=None, attention_dropout=0.1, output_attention=False):
super(AutoCorrelation, self).__init__()
self.factor = factor
self.scale = scale
self.mask_flag = mask_flag
self.output_attention = output_attention
self.dropout = nn.Dropout(attention_dropout)
def time_delay_agg_training(self, values, corr):
"""
SpeedUp version of Autocorrelation (a batch-normalization style design)
This is for the training phase.
"""
head = values.shape[1]
channel = values.shape[2]
length = values.shape[3]
# find top k
top_k = int(self.factor * math.log(length))
mean_value = torch.mean(torch.mean(corr, dim=1), dim=1)
index = torch.topk(torch.mean(mean_value, dim=0), top_k, dim=-1)[1]
weights = torch.stack([mean_value[:, index[i]] for i in range(top_k)], dim=-1)
# update corr
tmp_corr = torch.softmax(weights, dim=-1)
# aggregation
tmp_values = values
delays_agg = torch.zeros_like(values).float()
for i in range(top_k):
pattern = torch.roll(tmp_values, -int(index[i]), -1)
delays_agg = delays_agg + pattern * (
tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length)
)
return delays_agg
def time_delay_agg_inference(self, values, corr):
"""
SpeedUp version of Autocorrelation (a batch-normalization style design)
This is for the inference phase.
"""
batch = values.shape[0]
head = values.shape[1]
channel = values.shape[2]
length = values.shape[3]
# index init
init_index = (
torch.arange(length)
.unsqueeze(0)
.unsqueeze(0)
.unsqueeze(0)
.repeat(batch, head, channel, 1)
.to(values.device)
)
# find top k
top_k = int(self.factor * math.log(length))
mean_value = torch.mean(torch.mean(corr, dim=1), dim=1)
weights, delay = torch.topk(mean_value, top_k, dim=-1)
# update corr
tmp_corr = torch.softmax(weights, dim=-1)
# aggregation
tmp_values = values.repeat(1, 1, 1, 2)
delays_agg = torch.zeros_like(values).float()
for i in range(top_k):
tmp_delay = init_index + delay[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length)
pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay)
delays_agg = delays_agg + pattern * (
tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length)
)
return delays_agg
def time_delay_agg_full(self, values, corr):
"""
Standard version of Autocorrelation
"""
batch = values.shape[0]
head = values.shape[1]
channel = values.shape[2]
length = values.shape[3]
# index init
init_index = (
torch.arange(length)
.unsqueeze(0)
.unsqueeze(0)
.unsqueeze(0)
.repeat(batch, head, channel, 1)
.to(values.device)
)
# find top k
top_k = int(self.factor * math.log(length))
weights, delay = torch.topk(corr, top_k, dim=-1)
# update corr
tmp_corr = torch.softmax(weights, dim=-1)
# aggregation
tmp_values = values.repeat(1, 1, 1, 2)
delays_agg = torch.zeros_like(values).float()
for i in range(top_k):
tmp_delay = init_index + delay[..., i].unsqueeze(-1)
pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay)
delays_agg = delays_agg + pattern * (tmp_corr[..., i].unsqueeze(-1))
return delays_agg
def forward(self, queries, keys, values, attn_mask):
B, L, H, E = queries.shape
_, S, _, D = values.shape
if L > S:
zeros = torch.zeros_like(queries[:, : (L - S), :]).float()
values = torch.cat([values, zeros], dim=1)
keys = torch.cat([keys, zeros], dim=1)
else:
values = values[:, :L, :, :]
keys = keys[:, :L, :, :]
# period-based dependencies
q_fft = torch.fft.rfft(queries.permute(0, 2, 3, 1).contiguous(), dim=-1)
k_fft = torch.fft.rfft(keys.permute(0, 2, 3, 1).contiguous(), dim=-1)
res = q_fft * torch.conj(k_fft)
corr = torch.fft.irfft(res, dim=-1)
# time delay agg
if self.training:
V = self.time_delay_agg_training(values.permute(0, 2, 3, 1).contiguous(), corr).permute(0, 3, 1, 2)
else:
V = self.time_delay_agg_inference(values.permute(0, 2, 3, 1).contiguous(), corr).permute(0, 3, 1, 2)
if self.output_attention:
return (V.contiguous(), corr.permute(0, 3, 1, 2))
else:
return (V.contiguous(), None)
# Attention building blocks
class TriangularCausalMask:
def __init__(self, B, L, device="cpu"):
mask_shape = [B, 1, L, L]
with torch.no_grad():
self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device)
@property
def mask(self):
return self._mask
class ProbMask:
def __init__(self, B, H, L, index, scores, device="cpu"):
_mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1)
_mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1])
indicator = _mask_ex[torch.arange(B)[:, None, None], torch.arange(H)[None, :, None], index, :].to(device)
self._mask = indicator.view(scores.shape).to(device)
@property
def mask(self):
return self._mask
class FullAttention(nn.Module):
def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):
super(FullAttention, self).__init__()
self.scale = scale
self.mask_flag = mask_flag
self.output_attention = output_attention
self.dropout = nn.Dropout(attention_dropout)
def forward(self, queries, keys, values, attn_mask):
B, L, H, E = queries.shape
_, S, _, D = values.shape
scale = self.scale or 1.0 / sqrt(E)
scores = torch.einsum("blhe,bshe->bhls", queries, keys)
if self.mask_flag:
if attn_mask is None:
attn_mask = TriangularCausalMask(B, L, device=queries.device)
scores.masked_fill_(attn_mask.mask, -np.inf)
A = self.dropout(torch.softmax(scale * scores, dim=-1))
V = torch.einsum("bhls,bshd->blhd", A, values)
if self.output_attention:
return (V.contiguous(), A)
else:
return (V.contiguous(), None)
class ProbAttention(nn.Module):
def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):
super(ProbAttention, self).__init__()
self.factor = factor
self.scale = scale
self.mask_flag = mask_flag
self.output_attention = output_attention
self.dropout = nn.Dropout(attention_dropout)
def _prob_QK(self, Q, K, sample_k, n_top): # n_top: c*ln(L_q)
# Q [B, H, L, D]
B, H, L_K, E = K.shape
_, _, L_Q, _ = Q.shape
# calculate the sampled Q_K
K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E)
index_sample = torch.randint(L_K, (L_Q, sample_k)) # real U = U_part(factor*ln(L_k))*L_q
K_sample = K_expand[:, :, torch.arange(L_Q).unsqueeze(1), index_sample, :]
Q_K_sample = torch.matmul(Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze()
# find the Top_k query with sparisty measurement
M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K)
M_top = M.topk(n_top, sorted=False)[1]
# use the reduced Q to calculate Q_K
Q_reduce = Q[torch.arange(B)[:, None, None], torch.arange(H)[None, :, None], M_top, :] # factor*ln(L_q)
Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1)) # factor*ln(L_q)*L_k
return Q_K, M_top
def _get_initial_context(self, V, L_Q):
B, H, L_V, D = V.shape
if not self.mask_flag:
# V_sum = V.sum(dim=-2)
V_sum = V.mean(dim=-2)
contex = V_sum.unsqueeze(-2).expand(B, H, L_Q, V_sum.shape[-1]).clone()
else: # use mask
assert L_Q == L_V # requires that L_Q == L_V, i.e. for self-attention only
contex = V.cumsum(dim=-2)
return contex
def _update_context(self, context_in, V, scores, index, L_Q, attn_mask):
B, H, L_V, D = V.shape
if self.mask_flag:
attn_mask = ProbMask(B, H, L_Q, index, scores, device=V.device)
scores.masked_fill_(attn_mask.mask, -np.inf)
attn = torch.softmax(scores, dim=-1) # nn.Softmax(dim=-1)(scores)
context_in[torch.arange(B)[:, None, None], torch.arange(H)[None, :, None], index, :] = torch.matmul(
attn, V
).type_as(context_in)
if self.output_attention:
attns = (torch.ones([B, H, L_V, L_V]) / L_V).type_as(attn).to(attn.device)
attns[torch.arange(B)[:, None, None], torch.arange(H)[None, :, None], index, :] = attn
return (context_in, attns)
else:
return (context_in, None)
def forward(self, queries, keys, values, attn_mask):
B, L_Q, H, D = queries.shape
_, L_K, _, _ = keys.shape
queries = queries.transpose(2, 1)
keys = keys.transpose(2, 1)
values = values.transpose(2, 1)
U_part = self.factor * np.ceil(np.log(L_K)).astype("int").item() # c*ln(L_k)
u = self.factor * np.ceil(np.log(L_Q)).astype("int").item() # c*ln(L_q)
U_part = U_part if U_part < L_K else L_K
u = u if u < L_Q else L_Q
scores_top, index = self._prob_QK(queries, keys, sample_k=U_part, n_top=u)
# add scale factor
scale = self.scale or 1.0 / sqrt(D)
if scale is not None:
scores_top = scores_top * scale
# get the context
context = self._get_initial_context(values, L_Q)
# update the context with selected top_k queries
context, attn = self._update_context(context, values, scores_top, index, L_Q, attn_mask)
return context.contiguous(), attn
class SeasonalLayernorm(nn.Module):
"""
Special designed layernorm for the seasonal part
Build for Autoformer
"""
def __init__(self, channels):
super(SeasonalLayernorm, self).__init__()
self.layernorm = nn.LayerNorm(channels)
def forward(self, x):
x_hat = self.layernorm(x)
bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1)
return x_hat - bias
class MovingAverageBlock(nn.Module):
"""
Moving average block to highlight the trend of time series
Build for Autoformer
"""
def __init__(self, kernel_size, stride):
super(MovingAverageBlock, self).__init__()
self.kernel_size = kernel_size
self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0)
def forward(self, x):
# padding on the both ends of time series
front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1)
end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1)
x = torch.cat([front, x, end], dim=1)
x = self.avg(x.permute(0, 2, 1))
x = x.permute(0, 2, 1)
return x
class SeriesDecomposeBlock(nn.Module):
"""
Series decomposition block
Build for Autoformer
"""
def __init__(self, kernel_size):
super(SeriesDecomposeBlock, self).__init__()
self.moving_avg = MovingAverageBlock(kernel_size, stride=1)
def forward(self, x):
moving_mean = self.moving_avg(x)
res = x - moving_mean
return res, moving_mean
def conv1d_fft(f, g, dim=-1):
N = f.size(dim)
M = g.size(dim)
fast_len = next_fast_len(N + M - 1)
F_f = fft.rfft(f, fast_len, dim=dim)
F_g = fft.rfft(g, fast_len, dim=dim)
F_fg = F_f * F_g.conj()
out = fft.irfft(F_fg, fast_len, dim=dim)
out = out.roll((-1,), dims=(dim,))
idx = torch.as_tensor(range(fast_len - N, fast_len)).to(out.device)
out = out.index_select(dim, idx)
return out
class ExponentialSmoothing(nn.Module):
def __init__(self, dim, nhead, dropout=0.1, aux=False):
super().__init__()
self._smoothing_weight = nn.Parameter(torch.randn(nhead, 1))
self.v0 = nn.Parameter(torch.randn(1, 1, nhead, dim))
self.dropout = nn.Dropout(dropout)
if aux:
self.aux_dropout = nn.Dropout(dropout)
def forward(self, values, aux_values=None):
b, t, h, d = values.shape
init_weight, weight = self.get_exponential_weight(t)
output = conv1d_fft(self.dropout(values), weight, dim=1)
output = init_weight * self.v0 + output
if aux_values is not None:
aux_weight = weight / (1 - self.weight) * self.weight
aux_output = conv1d_fft(self.aux_dropout(aux_values), aux_weight)
output = output + aux_output
return output
def get_exponential_weight(self, T):
# Generate array [0, 1, ..., T-1]
powers = torch.arange(T, dtype=torch.float, device=self.weight.device)
# (1 - \alpha) * \alpha^t, for all t = T-1, T-2, ..., 0]
weight = (1 - self.weight) * (self.weight ** torch.flip(powers, dims=(0,)))
# \alpha^t for all t = 1, 2, ..., T
init_weight = self.weight ** (powers + 1)
return rearrange(init_weight, "h t -> 1 t h 1"), rearrange(weight, "h t -> 1 t h 1")
@property
def weight(self):
return torch.sigmoid(self._smoothing_weight) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/utils/nn_modules/blocks.py | 0.940312 | 0.475788 | blocks.py | pypi |
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
from merlion.models.utils.nn_modules.blocks import SeriesDecomposeBlock, MovingAverageBlock
class EncoderLayer(nn.Module):
"""
Autoformer encoder layer with the progressive decomposition architecture
"""
def __init__(self, attention, d_model, d_ff=None, moving_avg=25, dropout=0.1, activation="relu"):
super(EncoderLayer, self).__init__()
d_ff = d_ff or 4 * d_model
self.attention = attention
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1, bias=False)
self.decomp1 = SeriesDecomposeBlock(moving_avg)
self.decomp2 = SeriesDecomposeBlock(moving_avg)
self.dropout = nn.Dropout(dropout)
self.activation = F.relu if activation == "relu" else F.gelu
def forward(self, x, attn_mask=None):
new_x, attn = self.attention(x, x, x, attn_mask=attn_mask)
x = x + self.dropout(new_x)
x, _ = self.decomp1(x)
y = x
y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))
y = self.dropout(self.conv2(y).transpose(-1, 1))
res, _ = self.decomp2(x + y)
return res, attn
class Encoder(nn.Module):
"""
Autoformer encoder
"""
def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
super(Encoder, self).__init__()
self.attn_layers = nn.ModuleList(attn_layers)
self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None
self.norm = norm_layer
def forward(self, x, attn_mask=None):
attns = []
if self.conv_layers is not None:
for attn_layer, conv_layer in zip(self.attn_layers, self.conv_layers):
x, attn = attn_layer(x, attn_mask=attn_mask)
x = conv_layer(x)
attns.append(attn)
x, attn = self.attn_layers[-1](x)
attns.append(attn)
else:
for attn_layer in self.attn_layers:
x, attn = attn_layer(x, attn_mask=attn_mask)
attns.append(attn)
if self.norm is not None:
x = self.norm(x)
return x, attns
class DecoderLayer(nn.Module):
"""
Autoformer decoder layer with the progressive decomposition architecture
"""
def __init__(
self, self_attention, cross_attention, d_model, c_out, d_ff=None, moving_avg=25, dropout=0.1, activation="relu"
):
super(DecoderLayer, self).__init__()
d_ff = d_ff or 4 * d_model
self.self_attention = self_attention
self.cross_attention = cross_attention
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1, bias=False)
self.decomp1 = SeriesDecomposeBlock(moving_avg)
self.decomp2 = SeriesDecomposeBlock(moving_avg)
self.decomp3 = SeriesDecomposeBlock(moving_avg)
self.dropout = nn.Dropout(dropout)
self.projection = nn.Conv1d(
in_channels=d_model,
out_channels=c_out,
kernel_size=3,
stride=1,
padding=1,
padding_mode="circular",
bias=False,
)
self.activation = F.relu if activation == "relu" else F.gelu
def forward(self, x, cross, x_mask=None, cross_mask=None):
x = x + self.dropout(self.self_attention(x, x, x, attn_mask=x_mask)[0])
x, trend1 = self.decomp1(x)
x = x + self.dropout(self.cross_attention(x, cross, cross, attn_mask=cross_mask)[0])
x, trend2 = self.decomp2(x)
y = x
y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))
y = self.dropout(self.conv2(y).transpose(-1, 1))
x, trend3 = self.decomp3(x + y)
residual_trend = trend1 + trend2 + trend3
residual_trend = self.projection(residual_trend.permute(0, 2, 1)).transpose(1, 2)
return x, residual_trend
class Decoder(nn.Module):
"""
Autoformer decoder
"""
def __init__(self, layers, norm_layer=None, projection=None):
super(Decoder, self).__init__()
self.layers = nn.ModuleList(layers)
self.norm = norm_layer
self.projection = projection
def forward(self, x, cross, x_mask=None, cross_mask=None, trend=None):
for layer in self.layers:
x, residual_trend = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask)
trend = trend + residual_trend
if self.norm is not None:
x = self.norm(x)
if self.projection is not None:
x = self.projection(x)
return x, trend | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/utils/nn_modules/enc_dec_autoformer.py | 0.94453 | 0.461927 | enc_dec_autoformer.py | pypi |
import logging
import warnings
from typing import List, Tuple
import numpy as np
import pandas as pd
import statsmodels.api as sm
from merlion.models.automl.seasonality import SeasonalityModel
from merlion.models.forecast.base import ForecasterExogBase, ForecasterExogConfig
from merlion.transform.resample import TemporalResample
from merlion.utils.time_series import UnivariateTimeSeries, to_pd_datetime, to_timestamp
logger = logging.getLogger(__name__)
class SarimaConfig(ForecasterExogConfig):
"""
Config class for `Sarima` (Seasonal AutoRegressive Integrated Moving Average).
"""
_default_transform = TemporalResample(granularity=None)
def __init__(self, order: List[int] = (4, 1, 2), seasonal_order: List[int] = (2, 0, 1, 24), **kwargs):
"""
:param order: Order is (p, d, q) for an ARIMA(p, d, q) process. d must
be an integer indicating the integration order of the process, while
p and q must be integers indicating the AR and MA orders (so that
all lags up to those orders are included).
:param seasonal_order: Seasonal order is (P, D, Q, S) for seasonal ARIMA
process, where s is the length of the seasonality cycle (e.g. s=24
for 24 hours on hourly granularity). P, D, Q are as for ARIMA.
"""
super().__init__(**kwargs)
self.order = order
self.seasonal_order = seasonal_order
class Sarima(ForecasterExogBase, SeasonalityModel):
"""
Implementation of the classic statistical model SARIMA (Seasonal
AutoRegressive Integrated Moving Average) for forecasting.
"""
config_class = SarimaConfig
def __init__(self, config: SarimaConfig):
super().__init__(config)
self.model = None
self._last_val = None
@property
def require_even_sampling(self) -> bool:
return False
@property
def _default_train_config(self):
return dict(enforce_stationarity=False, enforce_invertibility=False)
@property
def order(self) -> Tuple[int, int, int]:
"""
:return: the order (p, d, q) of the model, where p is the AR order,
d is the integration order, and q is the MA order.
"""
return self.config.order
@property
def seasonal_order(self) -> Tuple[int, int, int, int]:
"""
:return: the seasonal order (P, D, Q, S) for the seasonal ARIMA
process, where p is the AR order, D is the integration order,
Q is the MA order, and S is the length of the seasonality cycle.
"""
return self.config.seasonal_order
@property
def _max_lookback(self) -> int:
if self.model is None:
return 0
orders = self.model.model_orders
if orders["reduced_ma"] > 0:
return 0
return 2 * orders["reduced_ar"] + 1
def _train_with_exog(
self, train_data: pd.DataFrame, train_config=None, exog_data: pd.DataFrame = None
) -> Tuple[pd.DataFrame, pd.DataFrame]:
# train model
name = self.target_name
train_data = train_data[name]
times = train_data.index
train_config = train_config or {}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model = sm.tsa.SARIMAX(
train_data, exog=exog_data, order=self.order, seasonal_order=self.seasonal_order, **train_config
)
self.model = model.fit(disp=0)
# FORECASTING: forecast for next n steps using Sarima model
self._last_val = train_data[-1]
yhat = (train_data.values - self.model.resid).tolist()
err = [np.sqrt(self.model.params["sigma2"])] * len(train_data)
return pd.DataFrame(yhat, index=times, columns=[name]), pd.DataFrame(err, index=times, columns=[f"{name}_err"])
def _forecast_with_exog(
self,
time_stamps: List[int],
time_series_prev: pd.DataFrame = None,
return_prev=False,
exog_data: pd.DataFrame = None,
exog_data_prev: pd.DataFrame = None,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
# If there is a time_series_prev, use it to set the SARIMA model's state, and then obtain its forecast
if time_series_prev is None:
last_val = self._last_val
model = self.model
else:
val_prev = time_series_prev.iloc[-self._max_lookback :, self.target_seq_index]
last_val = val_prev[-1]
exog_data_prev = None if exog_data_prev is None else exog_data_prev.loc[val_prev.index]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model = self.model.apply(val_prev, exog=exog_data_prev, validate_specification=False)
try:
forecast_result = model.get_forecast(len(time_stamps), exog=exog_data)
pred = np.asarray(forecast_result.predicted_mean)
err = np.asarray(forecast_result.se_mean)
assert len(pred) == len(
time_stamps
), f"Expected SARIMA to return forecast of length {len(time_stamps)}, but got {len(pred)} instead."
except Exception as e:
logger.warning(f"Caught {type(e).__name__}: {str(e)}")
pred = np.full(len(time_stamps), last_val)
err = np.zeros(len(time_stamps))
if time_series_prev is not None and return_prev:
m = len(time_series_prev) - len(val_prev)
params = dict(zip(model.param_names, model.params))
err_prev = np.concatenate((np.zeros(m), np.full(len(val_prev), np.sqrt(params["sigma2"]))))
pred = np.concatenate((time_series_prev.values[:m, self.target_seq_index], val_prev - model.resid, pred))
err = np.concatenate((err_prev, err))
time_stamps = np.concatenate((to_timestamp(time_series_prev.index), time_stamps))
# Check for NaN's
if any(np.isnan(pred)):
logger.warning("Trained SARIMA model producing NaN forecast. Using last training point as the prediction.")
pred[np.isnan(pred)] = last_val
if any(np.isnan(err)):
err[np.isnan(err)] = 0
# Return the forecast & its standard error
name = self.target_name
pred = pd.DataFrame(pred, index=to_pd_datetime(time_stamps), columns=[name])
err = pd.DataFrame(err, index=to_pd_datetime(time_stamps), columns=[f"{name}_err"])
return pred, err
def set_seasonality(self, theta, train_data: UnivariateTimeSeries):
# Make sure seasonality is a positive int, and set it to 1 if the train data is constant
theta = 1 if np.max(train_data) == np.min(train_data) else max(1, int(theta))
self.config.seasonal_order = self.seasonal_order[:-1] + (theta,) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/forecast/sarima.py | 0.890559 | 0.476884 | sarima.py | pypi |
import logging
from math import floor
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from merlion.utils.time_series import TimeSeries, UnivariateTimeSeries, assert_equal_timedeltas
from merlion.utils.istat import ExponentialMovingAverage, RecencyWeightedVariance
from merlion.utils.resample import to_pd_datetime, to_timestamp
from merlion.transform.moving_average import LagTransform
from merlion.transform.resample import TemporalResample
from merlion.models.forecast.base import ForecasterBase, ForecasterConfig
logger = logging.getLogger(__name__)
class MSESConfig(ForecasterConfig):
"""
Configuration class for an MSES forecasting model.
"""
_default_transform = TemporalResample(trainable_granularity=True)
def __init__(
self,
max_forecast_steps: int,
max_backstep: int = None,
recency_weight: float = 0.5,
accel_weight: float = 1.0,
optimize_acc: bool = True,
eta: float = 0.0,
rho: float = 0.0,
phi: float = 2.0,
inflation: float = 1.0,
**kwargs,
):
r"""
Letting ``w`` be the recency weight, ``B`` the maximum backstep, ``x_t`` the last seen data point,
and ``l_s,t`` the series of losses for scale ``s``.
.. math::
\begin{align*}
\hat{x}_{t+h} & = \sum_{b=0}^B p_{b} \cdot (x_{t-b} + v_{b+h,t} + a_{b+h,t}) \\
\space \\
\text{where} \space\space & v_{b+h,t} = \text{EMA}_w(\Delta_{b+h} x_t) \\
& a_{b+h,t} = \text{EMA}_w(\Delta_{b+h}^2 x_t) \\
\text{and} \space\space & p_b = \sigma(z)_b \space\space \\
\text{if} & \space\space z_b = (b+h)^\phi \cdot \text{EMA}_w(l_{b+h,t}) \cdot \text{RWSE}_w(l_{b+h,t})\\
\end{align*}
:param max_backstep: Max backstep to use in forecasting. If we train with x(0),...,x(t),
Then, the b-th model MSES uses will forecast x(t+h) by anchoring at x(t-b) and
predicting xhat(t+h) = x(t-b) + delta_hat(b+h).
:param recency_weight: The recency weight parameter to use when estimating delta_hat.
:param accel_weight: The weight to scale the acceleration by when computing delta_hat.
Specifically, delta_hat(b+h) = velocity(b+h) + accel_weight * acceleration(b+h).
:param optimize_acc: If True, the acceleration correction will only be used at scales
ranging from 1,...(max_backstep+max_forecast_steps)/2.
:param eta: The parameter used to control the rate at which recency_weight gets
tuned when online updates are made to the model and losses can be computed.
:param rho: The parameter that determines what fraction of the overall error is due to
velcity error, while the rest is due to the complement. The error at any scale
will be determined as ``rho * velocity_error + (1-rho) * loss_error``.
:param phi: The parameter used to exponentially inflate the magnitude of loss error at
different scales. Loss error for scale ``s`` will be increased by a factor of ``phi ** s``.
:param inflation: The inflation exponent to use when computing the distribution
p(b|h) over the models when forecasting at horizon h according to standard
errors of the estimated velocities over the models; inflation=1 is equivalent
to using the softmax function.
"""
super().__init__(max_forecast_steps=max_forecast_steps, **kwargs)
assert 0.0 <= rho <= 1.0
assert 1.0 <= phi
self.max_backstep = max_forecast_steps if max_backstep is None else max_backstep
self.recency_weight = recency_weight
self.accel_weight = accel_weight
self.optimize_acc = optimize_acc
self.eta = eta
self.rho = rho
self.phi = phi
self.inflation = inflation
@property
def max_scale(self):
return self.max_backstep + self.max_forecast_steps
@property
def backsteps(self):
return list(range(self.max_backstep + 1))
class MSESTrainConfig(object):
"""
MSES training configuration.
"""
def __init__(
self,
incremental: bool = True,
process_losses: bool = True,
tune_recency_weights: bool = False,
init_batch_sz: int = 2,
train_cadence: int = None,
):
"""
:param incremental: If True, train the MSES model incrementally with the initial
training data at the given ``train_cadence``. This allows MSES to return a
forecast for the training data.
:param: If True, track the losses encountered during incremental initial training.
:tune_recency_weights: If True, tune recency weights during incremental initial
training.
:param init_batch_sz: The size of the inital training batch for MSES. This is
necessary because MSES cannot predict the past, but needs to start with some
data. This should be very small. 2 is the minimum, and is recommended because
2 will result in the most representative train forecast.
:param train_cadence: The frequency at which the training forecasts will be generated
during incremental training.
"""
assert init_batch_sz >= 2
self.incremental = incremental
self.process_losses = process_losses
self.tune_recency_weights = tune_recency_weights
self.init_batch_sz = init_batch_sz
self.train_cadence = train_cadence
class MSES(ForecasterBase):
r"""
Multi-scale Exponential Smoother (MSES) is a forecasting algorithm modeled heavily
after classical mechanical concepts, namely, velocity and acceleration.
Having seen data points of a time series up to time t, MSES forecasts x(t+h) by
anchoring at a value b steps back from the last known value, x(t-b), and estimating the
delta between x(t-b) and x(t+h). The delta over these b+h timesteps, delta(b+h), also known
as the delta at scale b+h, is predicted by estimating the velocity over these timesteps
as well as the change in the velocity, acceleration. Specifically,
xhat(t+h) = x(t-b) + velocity_hat(b+h) + acceleration_hat(b+h)
This estimation is done for each b, known as a backstep, from 0, which anchors at x(t),
1,... up to a maximum backstep configurable by the user. The algorithm then takes the
seperate forecasts of x(t+h), indexed by which backstep was used, xhat_b(t+h), and determines
a final forecast: p(b|h) dot xhat_b, where p(b|h) is a distribution over the xhat_b's that is
determined according to the lowest standard errors of the recency-weighted velocity estimates.
Letting ``w`` be the recency weight, ``B`` the maximum backstep, ``x_t`` the last seen data point,
and ``l_s,t`` the series of losses for scale ``s``.
.. math::
\begin{align*}
\hat{x}_{t+h} & = \sum_{b=0}^B p_{b} \cdot (x_{t-b} + v_{b+h,t} + a_{b+h,t}) \\
\space \\
\text{where} \space\space & v_{b+h,t} = \text{EMA}_w(\Delta_{b+h} x_t) \\
& a_{b+h,t} = \text{EMA}_w(\Delta_{b+h}^2 x_t) \\
\text{and} \space\space & p_b = \sigma(z)_b \space\space \\
\text{if} & \space\space z_b = (b+h)^\phi \cdot \text{EMA}_w(l_{b+h,t}) \cdot \text{RWSE}_w(l_{b+h,t})\\
\end{align*}
"""
config_class = MSESConfig
def __init__(self, config: MSESConfig):
super().__init__(config)
self.delta_estimator = DeltaEstimator(
max_scale=self.config.max_scale,
recency_weight=self.config.recency_weight,
accel_weight=self.config.accel_weight,
optimize_acc=self.config.optimize_acc,
eta=self.config.eta,
phi=self.config.phi,
)
@property
def require_even_sampling(self) -> bool:
return True
@property
def _pandas_train(self):
return False
@property
def rho(self):
return self.config.rho
@property
def backsteps(self):
return self.config.backsteps
@property
def max_horizon(self):
return self.max_forecast_steps * self.timedelta
@property
def _default_train_config(self):
return MSESTrainConfig()
def _train(self, train_data: TimeSeries, train_config: MSESTrainConfig = None):
if isinstance(train_config, dict):
train_config = MSESTrainConfig(**train_config)
name = self.target_name
train_data = train_data.univariates[name]
if not train_config.incremental:
self.delta_estimator.train(train_data)
return None, None
# train on initial batch
b = train_config.init_batch_sz
init_train_data, train_data = train_data[:b], train_data[b:]
self.delta_estimator.train(init_train_data)
self.last_train_time = init_train_data.tf
# use inital batch as train forecast
init_train_forecast = init_train_data.to_ts()
init_train_err = UnivariateTimeSeries(
time_stamps=init_train_data.index, name=f"{init_train_data.name}_err", values=[0] * len(init_train_data)
).to_ts()
# train incrementally
h = train_config.train_cadence
h = h * self.timedelta if h is not None else None
h = min(h, self.max_horizon) if h is not None else self.max_horizon
train_forecast, train_err = self._incremental_train(
train_data=train_data,
train_cadence=h,
process_losses=train_config.process_losses,
tune_recency_weights=train_config.tune_recency_weights,
)
train_forecast = init_train_forecast + train_forecast
train_err = init_train_err + train_err
return train_forecast, train_err
def _incremental_train(self, train_data, train_cadence, process_losses, tune_recency_weights):
# train incrementally
t, tf = train_data.t0, train_data.tf
train_forecast, train_err = [], []
if train_cadence is None:
train_cadence = self.max_horizon
all_t = train_data.time_stamps
while t <= tf:
i = np.searchsorted(all_t, t)
if i + 1 < len(all_t):
t_next = max(to_timestamp(to_pd_datetime(t) + train_cadence), all_t[i + 1])
else:
t_next = all_t[-1] + 0.001
train_batch = train_data.window(t, t_next, include_tf=False)
if len(train_batch) > 0:
# forecast & process losses
if process_losses:
scale_losses, (forecast, err) = self._compute_losses(train_batch, return_forecast=True)
self.delta_estimator.process_losses(scale_losses, tune_recency_weights)
else:
forecast, err = self.forecast(train_batch.time_stamps)
# store forecast results
train_forecast.append(forecast)
train_err.append(err)
# train on batch
self.delta_estimator.train(train_batch)
self.last_train_time = train_batch.tf
# increment time
t = t_next
train_forecast, train_err = [sum(v[1:], v[0]) for v in (train_forecast, train_err)]
return train_forecast, train_err
def update(
self, new_data: pd.DataFrame, tune_recency_weights: bool = True, train_cadence=None
) -> Tuple[TimeSeries, TimeSeries]:
"""
Updates the MSES model with new data that has been acquired since the model's initial training.
:param new_data: New data that has occured since the last training time.
:param tune_recency_weights: If True, the model will first forecast the values at the
new_data's timestamps, calculate the associated losses, and use these losses
to make updates to the recency weight.
:param train_cadence: The frequency at which the training forecasts will be generated
during incremental training.
"""
name = self.target_name
if len(new_data) == 0:
return (
UnivariateTimeSeries.empty(name=name).to_ts(),
UnivariateTimeSeries.empty(name=f"{name}_err").to_ts(),
)
new_data = TimeSeries.from_pd(new_data).univariates[name]
assert_equal_timedeltas(new_data, self.timedelta, self.timedelta_offset)
next_train_time = self.last_train_time + self.timedelta
if to_pd_datetime(new_data.t0) > next_train_time:
logger.warning(
f"Updating the model with new data requires the "
f"new data to start at or before time "
f"{to_pd_datetime(next_train_time)}, which is the time "
f"directly after the last train time. Got data starting "
f"at {to_pd_datetime(new_data.t0)} instead."
)
_, new_data = new_data.bisect(next_train_time, t_in_left=False)
if new_data.is_empty():
return (
UnivariateTimeSeries.empty(name=name).to_ts(),
UnivariateTimeSeries.empty(name=f"{name}_err").to_ts(),
)
return self._incremental_train(
train_data=new_data,
train_cadence=train_cadence,
process_losses=True,
tune_recency_weights=tune_recency_weights,
)
def _compute_losses(
self, data: UnivariateTimeSeries, return_forecast: bool = False
) -> Union[Dict[int, List[float]], Tuple[Dict[int, List[float]], Tuple[TimeSeries, TimeSeries]]]:
"""
Computes forecast losses at every point possible in data for every backstep, and
then associates the losses with the relevant scale.
:param data: Data to forecast and compute losses for. The first timestamp in
data must be the timestamp directly after the last train time.
:return: A hash map mapping each scale to a list of losses.
"""
# forecast at every scale possible for every point possible in data
forecastable_data = data[: self.max_forecast_steps]
xhat_hb = [self.xhat_h(h) for h in range(1, len(forecastable_data) + 1)]
xtrue_h = forecastable_data.values
losses_hb = np.array(xhat_hb, dtype=float) - np.expand_dims(xtrue_h, 1)
losses = dict()
for i, j in np.ndindex(losses_hb.shape):
# scale=i+j+1 because b=i and h=j+1
scale = i + j + 1
if not np.isnan(losses_hb[i, j]):
if scale in losses:
losses[scale] += [losses_hb[i, j]]
else:
losses[scale] = [losses_hb[i, j]]
if not return_forecast:
return losses
# generate forecast
name = self.target_name
forecast = [self.marginalize_xhat_h(i + 1, xhat_h) for i, xhat_h in enumerate(xhat_hb)]
xhat, neg_err, pos_err = [[f[i] for f in forecast] for i in (0, 1, 2)]
err = UnivariateTimeSeries(
time_stamps=forecastable_data.time_stamps,
name=f"{name}_err",
values=(np.abs(pos_err) + np.abs(neg_err)) / 2,
).to_ts()
xhat = UnivariateTimeSeries(time_stamps=forecastable_data.time_stamps, values=xhat, name=name).to_ts()
return losses, (xhat, err)
def _forecast(
self, time_stamps: List[int], time_series_prev: pd.DataFrame = None, return_prev=False
) -> Tuple[pd.DataFrame, Union[None, Tuple[pd.DataFrame, pd.DataFrame]]]:
if time_series_prev is not None and len(time_series_prev) > 0:
self.update(time_series_prev)
# forecast
forecast = [self.marginalize_xhat_h(h, self.xhat_h(h)) for h in range(1, len(time_stamps) + 1)]
xhat, neg_err, pos_err = [[f[i] for f in forecast] for i in (0, 1, 2)]
if return_prev and time_series_prev is not None:
prev = time_series_prev.iloc[:, self.target_seq_index]
xhat = prev.values.tolist() + xhat
neg_err = np.concatenate((np.zeros(len(time_series_prev)), neg_err))
pos_err = np.concatenate((np.zeros(len(time_series_prev)), pos_err))
time_stamps = np.concatenate((to_timestamp(prev.index), time_stamps))
# convert to dataframes
name = self.target_name
index = to_pd_datetime(time_stamps)
xhat = pd.DataFrame(xhat, index=index, columns=[name])
neg_err = pd.DataFrame(np.abs(neg_err), index=index, columns=[f"{name}_neg_err"])
pos_err = pd.DataFrame(np.abs(pos_err), index=index, columns=[f"{name}_pos_err"])
return xhat, (neg_err, pos_err)
def _forecast_hb(self, horizon: int, backstep: int) -> Optional[float]:
"""
Returns the forecast at input horizon using input backstep.
"""
x = self.delta_estimator.x
scale = backstep + horizon
delta_hat = self.delta_estimator.delta_hat(scale)
if len(x) < backstep + 1 or delta_hat is None:
return None
return x[-(backstep + 1)] + delta_hat
def xhat_h(self, horizon: int) -> List[Optional[float]]:
"""
Returns the forecasts for the input horizon at every backstep.
"""
return [self._forecast_hb(horizon, backstep) for backstep in self.backsteps]
def marginalize_xhat_h(self, horizon: int, xhat_h: List[Optional[float]]):
"""
Given a list of forecasted values produced by delta estimators at
different backsteps, compute a weighted average of these values. The
weights are assigned based on the standard errors of the velocities,
where the b'th estimate will be given more weight if its velocity has a
lower standard error relative to the other estimates.
:param horizon: the horizon at which we want to predict
:param xhat_h: the forecasted values at this horizon, using each of
the possible backsteps
"""
assert len(xhat_h) == len(self.backsteps)
if all(x is None for x in xhat_h):
t = self.last_train_time = self.last_train_time + horizon * self.timedelta
raise RuntimeError(
f"Not enough training data to forecast at horizon {horizon} "
f"(estimated time {pd.to_datetime(t, unit='s')}, last train "
f"time is {pd.to_datetime(self.last_train_time, unit='s')})"
)
# Get the non None xhat's & their corresponding std errs
xhat_h, neg_err_h, pos_err_h, vel_errs, loss_errs = np.asarray(
[
(
x,
self.delta_estimator.neg_err(b + horizon),
self.delta_estimator.pos_err(b + horizon),
self.delta_estimator.vel_err(b + horizon),
self.delta_estimator.loss_err(b + horizon),
)
for x, b in zip(xhat_h, self.backsteps)
if x is not None
],
dtype=float,
).T
if self.rho == 1.0:
q = vel_errs
elif self.rho == 0.0:
q = loss_errs
else:
if (vel_errs == np.inf).all():
vel_errs.fill(0)
if (loss_errs == np.inf).all():
loss_errs.fill(0)
q = self.rho * vel_errs + (1 - self.rho) * loss_errs
if (q == np.inf).all():
q = np.ones(len(q))
# Do a softmax to get probabilities
q = np.exp(-(q - q.min()) * self.config.inflation)
q = q / q.sum()
# compute estimate with lower and upper bounds
xhat, neg_err, pos_err = [np.sum(q * v).item() for v in (xhat_h, neg_err_h, pos_err_h)]
return xhat, neg_err, pos_err
class DeltaStats:
"""
A wrapper around the statistics used to estimate deltas at a given scale.
"""
def __init__(self, scale: int, recency_weight: float):
"""
:param scale: The scale associated with the statistics
:param recency_weight: The recency weight parameter that that the incremental
velocity, acceleration and standard error statistics should use.
"""
self.velocity = ExponentialMovingAverage(recency_weight, value=0, n=1)
self.acceleration = ExponentialMovingAverage(recency_weight, value=0, n=1)
self.loss = ExponentialMovingAverage(recency_weight, value=1, n=1)
self.pos_err = ExponentialMovingAverage(recency_weight, value=1, n=1)
self.neg_err = ExponentialMovingAverage(recency_weight, value=-1, n=1)
self.vel_var = RecencyWeightedVariance(recency_weight, ex_value=0, ex2_value=0, n=1)
self.loss_var = RecencyWeightedVariance(recency_weight, ex_value=1, ex2_value=1, n=1)
self.scale = scale
self.recency_weight = recency_weight
@property
def lag(self):
return LagTransform(self.scale)
def update_velocity(self, vels: UnivariateTimeSeries):
self.velocity.add_batch(vels.values)
self.vel_var.add_batch(vels.values)
def update_acceleration(self, accs: UnivariateTimeSeries):
self.acceleration.add_batch(accs.values)
def update_loss(self, losses: Union[List[float], UnivariateTimeSeries]):
if isinstance(losses, UnivariateTimeSeries):
losses = losses.values
# update errs
for loss in losses:
if loss > 0:
self.pos_err.add(loss)
elif loss < 0:
self.neg_err.add(loss)
# update loss
losses = np.abs(losses).tolist()
self.loss.add_batch(losses)
self.loss_var.add_batch(losses)
def tune(self, losses: List[float], eta: float):
"""
Tunes the recency weight according to recent forecast losses.
:param losses: List of recent losses.
:param eta: Constant by which to scale the update to the recency weight.
A bigger eta means more aggressive updates to the recency_weight.
"""
if self.velocity.n == 1:
return
tune_stats = [self.velocity, self.vel_var] + [self.acceleration] * (self.acceleration.n > 1)
for loss in losses:
nerr = np.tanh(eta * loss)
for stat in tune_stats:
gap = 1.0 - stat.recency_weight if nerr > 0 else stat.recency_weight
stat.recency_weight += eta * gap * nerr
class DeltaEstimator:
"""
Class for estimating the delta for MSES.
"""
def __init__(
self,
max_scale: int,
recency_weight: float,
accel_weight: float,
optimize_acc: bool,
eta: float,
phi: float,
data: UnivariateTimeSeries = None,
stats: Dict[int, DeltaStats] = None,
):
"""
:param max_scale: Delta Estimator can estimate delta over multiple scales, or
time steps, ranging from 1,2,...,max_scale.
:param recency_weight: The recency weight parameter to use when estimating delta_hat.
:param accel_weight: The weight to scale the acceleration by when computing delta_hat.
Specifically, delta_hat(b+h) = velocity(b+h) + accel_weight * acceleration(b+h).
:param optimize_acc: If True, the acceleration correction will only be used at scales
ranging from 1,...,max_scale/2.
:param eta: The parameter used to control the rate at which recency_weight gets
tuned when online updates are made to the model and losses can be computed.
:param data: The data to initialize the delta estimator with.
:param stats: Dictionary mapping scales to DeltaStats objects to be used for delta
estimation.
"""
self.stats = dict() if stats is None else stats
self.recency_weight = recency_weight
self.max_scale = max_scale
self.accel_weight = accel_weight
self.optimize_acc = optimize_acc
self.eta = eta
self.phi = phi
self.data = UnivariateTimeSeries.empty() if data is None else data
@property
def max_scale(self):
return self._max_scale
@property
def acc_max_scale(self):
return floor(self.max_scale / 2) if self.optimize_acc else self.max_scale
@max_scale.setter
def max_scale(self, scale: int):
self._max_scale = scale
# update delta stats
self.stats = {s: ds for s, ds in self.stats.items() if s <= scale}
for scale in range(1, self.max_scale + 1):
if scale not in self.stats:
self.stats[scale] = DeltaStats(scale, self.recency_weight)
@property
def data(self):
return self._data
@data.setter
def data(self, data: UnivariateTimeSeries):
"""
Only keeps data necessary for future updates
let previous seen and used data be ...,x(t-1), x(t).
If an incoming batch of data x(t+1),...,x(t+B) arrives, we need
to compute both v(t+1),...,v(t+B) and a(t+1),...,a(t+B).
v(t+1),...,v(t+B) requires x(t+1-scale),...,x(t+B)
a(t+1),...,a(t+B) requires v(t+1-scale),...,v(t+B) requires x(t+1-2*scale),...,x(t+B)
From the data already seen we need to retain x(t+1-2*scale),...,x(t)
which are the last 2*scale points.
:param data: time series to retain for future updates.
"""
self._data = data[-(2 * self.max_scale + 1) :]
@property
def x(self):
return self.data.values
def __setstate__(self, state):
for name, value in state.items():
if name == "_data" and isinstance(value, pd.Series):
setattr(self, name, UnivariateTimeSeries.from_pd(value))
else:
setattr(self, name, value)
def train(self, new_data: UnivariateTimeSeries):
"""
Updates the delta statistics: velocity, acceleration and velocity
standard error at each scale using new data.
:param new_data: new datapoints in the time series.
"""
needed_data = self.data.concat(new_data)
for scale, stat in self.stats.items():
if len(needed_data) < scale + 1:
continue
# compute and update velocity
vels = stat.lag(needed_data.to_ts())
bs = min(len(new_data), len(vels))
stat.update_velocity(vels[-bs:].univariates[vels.names[0]])
# compute and update acceleration
if len(needed_data) >= 2 * scale + 1 and scale <= self.acc_max_scale:
accs = stat.lag(vels)
bs = min(len(new_data), len(accs))
stat.update_acceleration(accs[-bs:].univariates[accs.names[0]])
# update data to retain for future updates
self.data = needed_data
def process_losses(self, scale_losses: Dict[int, List[float]], tune_recency_weights: bool = False):
"""
Uses recent forecast errors to improve the delta estimator. This is done by updating
the recency_weight that is used by delta stats at particular scales.
:param scale_losses: A dictionary mapping a scale to a list of forecasting errors
that associated with that scale.
"""
for scale, losses in scale_losses.items():
stat = self.stats.get(scale)
stat.update_loss(losses)
if tune_recency_weights:
stat.tune(losses, self.eta)
def velocity(self, scale: int) -> float:
stat = self.stats.get(scale)
return 0 if stat is None else stat.velocity.value
def acceleration(self, scale: int) -> float:
stat = self.stats.get(scale)
return 0 if stat is None else stat.acceleration.value
def vel_err(self, scale: int) -> float:
stat = self.stats.get(scale)
return np.inf if stat is None else stat.vel_var.sd
def pos_err(self, scale: int) -> float:
stat = self.stats.get(scale)
return 1 if stat is None else stat.pos_err.value
def neg_err(self, scale: int) -> float:
stat = self.stats.get(scale)
return 1 if stat is None else stat.neg_err.value
def loss_err(self, scale: int) -> float:
stat = self.stats.get(scale)
if stat is None or stat.loss.value is None:
return np.inf
return (scale**self.phi) * stat.loss.value * stat.loss_var.se
def delta_hat(self, scale: int) -> float:
return self.velocity(scale) + self.accel_weight * self.acceleration(scale) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/forecast/smoother.py | 0.949693 | 0.629689 | smoother.py | pypi |
import copy
import logging
import os
from typing import Iterable, List, Tuple, Union
import warnings
import numpy as np
import pandas as pd
import prophet
import prophet.serialize
from merlion.models.automl.seasonality import SeasonalityModel
from merlion.models.forecast.base import ForecasterExogBase, ForecasterExogConfig
from merlion.utils import TimeSeries, UnivariateTimeSeries, to_pd_datetime, to_timestamp
logger = logging.getLogger(__name__)
class _suppress_stdout_stderr(object):
"""
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
Source: https://github.com/facebook/prophet/issues/223#issuecomment-326455744
"""
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = [os.dup(1), os.dup(2)]
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
for fd in self.null_fds + self.save_fds:
os.close(fd)
class ProphetConfig(ForecasterExogConfig):
"""
Configuration class for Facebook's `Prophet` model, as described by
`Taylor & Letham, 2017 <https://peerj.com/preprints/3190/>`__.
"""
def __init__(
self,
max_forecast_steps: int = None,
target_seq_index: int = None,
yearly_seasonality: Union[bool, int] = "auto",
weekly_seasonality: Union[bool, int] = "auto",
daily_seasonality: Union[bool, int] = "auto",
seasonality_mode="additive",
holidays=None,
uncertainty_samples: int = 100,
**kwargs,
):
"""
:param max_forecast_steps: Max # of steps we would like to forecast for.
:param target_seq_index: The index of the univariate (amongst all
univariates in a general multivariate time series) whose value we
would like to forecast.
:param yearly_seasonality: If bool, whether to enable yearly seasonality.
By default, it is activated if there are >= 2 years of history, but
deactivated otherwise. If int, this is the number of Fourier series
components used to model the seasonality (default = 10).
:param weekly_seasonality: If bool, whether to enable weekly seasonality.
By default, it is activated if there are >= 2 weeks of history, but
deactivated otherwise. If int, this is the number of Fourier series
components used to model the seasonality (default = 3).
:param daily_seasonality: If bool, whether to enable daily seasonality.
By default, it is activated if there are >= 2 days of history, but
deactivated otherwise. If int, this is the number of Fourier series
components used to model the seasonality (default = 4).
:param seasonality_mode: 'additive' (default) or 'multiplicative'.
:param holidays: pd.DataFrame with columns holiday (string) and ds (date type)
and optionally columns lower_window and upper_window which specify a
range of days around the date to be included as holidays.
lower_window=-2 will include 2 days prior to the date as holidays. Also
optionally can have a column prior_scale specifying the prior scale for
that holiday. Can also be a dict corresponding to the desired pd.DataFrame.
:param uncertainty_samples: The number of posterior samples to draw in
order to calibrate the anomaly scores.
"""
super().__init__(max_forecast_steps=max_forecast_steps, target_seq_index=target_seq_index, **kwargs)
self.yearly_seasonality = yearly_seasonality
self.weekly_seasonality = weekly_seasonality
self.daily_seasonality = daily_seasonality
self.seasonality_mode = seasonality_mode
self.uncertainty_samples = uncertainty_samples
self.holidays = holidays
class Prophet(ForecasterExogBase, SeasonalityModel):
"""
Facebook's model for time series forecasting. See docs for `ProphetConfig`
and `Taylor & Letham, 2017 <https://peerj.com/preprints/3190/>`__ for more details.
"""
config_class = ProphetConfig
def __init__(self, config: ProphetConfig):
super().__init__(config)
self.model = prophet.Prophet(
yearly_seasonality=self.yearly_seasonality,
weekly_seasonality=self.weekly_seasonality,
daily_seasonality=self.daily_seasonality,
seasonality_mode=self.seasonality_mode,
uncertainty_samples=self.uncertainty_samples,
holidays=None if self.holidays is None else pd.DataFrame(self.holidays),
)
@property
def require_even_sampling(self) -> bool:
return False
def __getstate__(self):
try:
model = prophet.serialize.model_to_json(self.model)
except ValueError: # prophet.serialize only works for fitted models, so deepcopy as a backup
model = copy.deepcopy(self.model)
return {k: model if k == "model" else copy.deepcopy(v) for k, v in self.__dict__.items()}
def __setstate__(self, state):
if "model" in state:
model = state["model"]
if isinstance(model, str):
state = copy.copy(state)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
state["model"] = prophet.serialize.model_from_json(model)
super().__setstate__(state)
@property
def yearly_seasonality(self):
return self.config.yearly_seasonality
@property
def weekly_seasonality(self):
return self.config.weekly_seasonality
@property
def daily_seasonality(self):
return self.config.daily_seasonality
@property
def add_seasonality(self):
return self.config.add_seasonality
@property
def seasonality_mode(self):
return self.config.seasonality_mode
@property
def holidays(self):
return self.config.holidays
@property
def uncertainty_samples(self):
return self.config.uncertainty_samples
def set_seasonality(self, theta, train_data: UnivariateTimeSeries):
theta = [theta] if not isinstance(theta, Iterable) else theta
dt = train_data.index[1] - train_data.index[0]
for p in theta:
if p > 1:
period = p * dt.total_seconds() / 86400
logger.debug(f"Add seasonality {str(p)} ({p * dt})")
self.model.add_seasonality(name=f"extra_season_{p}", period=period, fourier_order=p)
def _add_exog_data(self, data: pd.DataFrame, exog_data: pd.DataFrame):
df = pd.DataFrame(data[self.target_name].rename("y"))
if exog_data is not None:
df = df.join(exog_data, how="outer")
df.index.rename("ds", inplace=True)
df.reset_index(inplace=True)
return df
def _train_with_exog(
self, train_data: pd.DataFrame, train_config=None, exog_data: pd.DataFrame = None
) -> Tuple[pd.DataFrame, pd.DataFrame]:
if exog_data is not None:
for col in exog_data.columns:
self.model.add_regressor(col)
df = self._add_exog_data(train_data, exog_data)
with _suppress_stdout_stderr():
self.model.fit(df)
# Get & return prediction & errors for train data.
# sigma computation based on https://github.com/facebook/prophet/issues/549#issuecomment-435482584
self.model.uncertainty_samples = 0
forecast = self.model.predict(df)["yhat"].values.tolist()
sigma = (self.model.params["sigma_obs"] * self.model.y_scale).item()
self.model.uncertainty_samples = self.uncertainty_samples
yhat = pd.DataFrame(forecast, index=df.ds, columns=[self.target_name])
err = pd.DataFrame(sigma, index=df.ds, columns=[f"{self.target_name}_err"])
return yhat, err
def _forecast_with_exog(
self,
time_stamps: List[int],
time_series_prev: pd.DataFrame = None,
return_prev=False,
exog_data: pd.DataFrame = None,
exog_data_prev: pd.DataFrame = None,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
# Construct data frame for prophet
time_stamps = to_pd_datetime(time_stamps)
df = self._add_exog_data(data=pd.DataFrame({self.target_name: np.nan}, index=time_stamps), exog_data=exog_data)
if time_series_prev is not None:
past = self._add_exog_data(time_series_prev, exog_data_prev)
df = pd.concat((past, df))
# Determine the right set of timestamps to use
if return_prev and time_series_prev is not None:
time_stamps = df["ds"]
# Get MAP estimate from prophet
self.model.uncertainty_samples = 0
yhat = self.model.predict(df)["yhat"].values
self.model.uncertainty_samples = self.uncertainty_samples
# Get posterior samples for uncertainty estimation
resid_samples = self.model.predictive_samples(df)["yhat"] - np.expand_dims(yhat, -1)
# Return the MAP estimate & stderr
yhat = yhat[-len(time_stamps) :]
resid_samples = resid_samples[-len(time_stamps) :]
name = self.target_name
yhat = pd.DataFrame(yhat, index=time_stamps, columns=[name])
err = pd.DataFrame(np.std(resid_samples, axis=-1), index=time_stamps, columns=[f"{name}_err"])
return yhat, err | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/forecast/prophet.py | 0.840848 | 0.367526 | prophet.py | pypi |
import logging
from lightgbm import LGBMRegressor
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.multioutput import MultiOutputRegressor
from merlion.models.forecast.sklearn_base import SKLearnForecaster, SKLearnForecasterConfig
logger = logging.getLogger(__name__)
class _TreeEnsembleForecasterConfig(SKLearnForecasterConfig):
"""
Configuration class for bagging tree-based forecaster model.
"""
def __init__(self, n_estimators: int = 100, max_depth: int = None, random_state: int = None, **kwargs):
"""
:param n_estimators: number of base estimators for the tree ensemble
:param max_depth: max depth of base estimators
:param random_state: random seed for bagging
"""
super().__init__(**kwargs)
self.n_estimators = n_estimators
self.random_state = random_state
self.max_depth = max_depth
class RandomForestForecasterConfig(_TreeEnsembleForecasterConfig):
"""
Config class for `RandomForestForecaster`.
"""
def __init__(self, min_samples_split: int = 2, **kwargs):
"""
:param min_samples_split: min split for tree leaves
"""
super().__init__(**kwargs)
self.min_samples_split = min_samples_split
class RandomForestForecaster(SKLearnForecaster):
"""
Random Forest Regressor for time series forecasting
Random Forest is a meta estimator that fits a number of classifying decision
trees on various sub-samples of the dataset, and uses averaging to improve
the predictive accuracy and control over-fitting.
"""
config_class = RandomForestForecasterConfig
def __init__(self, config: RandomForestForecasterConfig):
super().__init__(config)
self.model = RandomForestRegressor(
n_estimators=self.config.n_estimators,
max_depth=self.config.max_depth,
min_samples_split=self.config.min_samples_split,
random_state=self.config.random_state,
)
class ExtraTreesForecasterConfig(_TreeEnsembleForecasterConfig):
"""
Config class for `ExtraTreesForecaster`.
"""
def __init__(self, min_samples_split: int = 2, **kwargs):
"""
:param min_samples_split: min split for tree leaves
"""
super().__init__(**kwargs)
self.min_samples_split = min_samples_split
class ExtraTreesForecaster(SKLearnForecaster):
"""
Extra Trees Regressor for time series forecasting
Extra Trees Regressor implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples of
the dataset and uses averaging to improve the predictive accuracy and
control over-fitting.
"""
config_class = ExtraTreesForecasterConfig
def __init__(self, config: ExtraTreesForecasterConfig):
super().__init__(config)
self.model = ExtraTreesRegressor(
n_estimators=self.config.n_estimators,
max_depth=self.config.max_depth,
min_samples_split=self.config.min_samples_split,
random_state=self.config.random_state,
)
class LGBMForecasterConfig(_TreeEnsembleForecasterConfig):
"""
Config class for `LGBMForecaster`.
"""
def __init__(self, learning_rate: float = 0.1, n_jobs: int = -1, **kwargs):
"""
:param learning_rate: learning rate for boosting
:param n_jobs: num of threading, -1 or 0 indicates device default, positive int indicates num of threads
"""
super().__init__(**kwargs)
self.learning_rate = learning_rate
self.n_jobs = n_jobs
class LGBMForecaster(SKLearnForecaster):
"""
Light gradient boosting (LGBM) regressor for time series forecasting
LightGBM is a light weight and fast gradient boosting framework that uses tree based learning algorithms, for more
details, please refer to the document https://lightgbm.readthedocs.io/en/latest/Features.html
"""
config_class = LGBMForecasterConfig
def __init__(self, config: LGBMForecasterConfig):
super().__init__(config)
self.model = MultiOutputRegressor(
LGBMRegressor(
learning_rate=self.config.learning_rate,
n_estimators=self.config.n_estimators,
max_depth=self.config.max_depth,
random_state=self.config.random_state,
n_jobs=self.config.n_jobs,
),
n_jobs=1,
) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/forecast/trees.py | 0.903631 | 0.357259 | trees.py | pypi |
import copy
import logging
import math
import numpy as np
import pandas as pd
from scipy.stats import norm
from typing import List, Optional, Tuple, Union
from abc import abstractmethod
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
from merlion.models.base import NormalizingConfig
from merlion.models.deep_base import TorchModel
from merlion.models.forecast.deep_base import DeepForecasterConfig, DeepForecaster
from merlion.models.utils.nn_modules import FullAttention, AttentionLayer, DataEmbedding, ConvLayer
from merlion.models.utils.nn_modules.enc_dec_transformer import Encoder, EncoderLayer, Decoder, DecoderLayer
from merlion.utils.misc import initializer
logger = logging.getLogger(__name__)
class TransformerConfig(DeepForecasterConfig, NormalizingConfig):
"""
Transformer for time series forecasting.
Code adapted from https://github.com/thuml/Autoformer.
"""
@initializer
def __init__(
self,
n_past,
max_forecast_steps: int = None,
encoder_input_size: int = None,
decoder_input_size: int = None,
num_encoder_layers: int = 2,
num_decoder_layers: int = 1,
start_token_len: int = 0,
factor: int = 3,
model_dim: int = 512,
embed: str = "timeF",
dropout: float = 0.05,
activation: str = "gelu",
n_heads: int = 8,
fcn_dim: int = 2048,
distil: bool = True,
**kwargs
):
"""
:param n_past: # of past steps used for forecasting future.
:param max_forecast_steps: Max # of steps we would like to forecast for.
:param encoder_input_size: Input size of encoder. If ``encoder_input_size = None``,
then the model will automatically use ``config.dim``, which is the dimension of the input data.
:param decoder_input_size: Input size of decoder. If ``decoder_input_size = None``,
then the model will automatically use ``config.dim``, which is the dimension of the input data.
:param num_encoder_layers: Number of encoder layers.
:param num_decoder_layers: Number of decoder layers.
:param start_token_len: Length of start token for deep transformer encoder-decoder based models.
The start token is similar to the special tokens for NLP models (e.g., bos, sep, eos tokens).
:param factor: Attention factor.
:param model_dim: Dimension of the model.
:param embed: Time feature encoding type, options include ``timeF``, ``fixed`` and ``learned``.
:param dropout: dropout rate.
:param activation: Activation function, can be ``gelu``, ``relu``, ``sigmoid``, etc.
:param n_heads: Number of heads of the model.
:param fcn_dim: Hidden dimension of the MLP layer in the model.
:param distil: whether to use distilling in the encoder of the model.
"""
super().__init__(n_past=n_past, max_forecast_steps=max_forecast_steps, **kwargs)
class TransformerModel(TorchModel):
"""
Implementaion of Transformer deep torch model.
"""
def __init__(self, config: TransformerConfig):
super().__init__(config)
if config.dim is not None:
config.encoder_input_size = config.dim if config.encoder_input_size is None else config.encoder_input_size
config.decoder_input_size = (
config.encoder_input_size if config.decoder_input_size is None else config.decoder_input_size
)
config.c_out = config.encoder_input_size
self.n_past = config.n_past
self.start_token_len = config.start_token_len
self.max_forecast_steps = config.max_forecast_steps
self.enc_embedding = DataEmbedding(
config.encoder_input_size, config.model_dim, config.embed, config.ts_encoding, config.dropout
)
self.dec_embedding = DataEmbedding(
config.decoder_input_size, config.model_dim, config.embed, config.ts_encoding, config.dropout
)
# Encoder
self.encoder = Encoder(
[
EncoderLayer(
AttentionLayer(
FullAttention(False, config.factor, attention_dropout=config.dropout, output_attention=False),
config.model_dim,
config.n_heads,
),
config.model_dim,
config.fcn_dim,
dropout=config.dropout,
activation=config.activation,
)
for l in range(config.num_encoder_layers)
],
norm_layer=torch.nn.LayerNorm(config.model_dim),
)
# Decoder
self.decoder = Decoder(
[
DecoderLayer(
AttentionLayer(
FullAttention(True, config.factor, attention_dropout=config.dropout, output_attention=False),
config.model_dim,
config.n_heads,
),
AttentionLayer(
FullAttention(False, config.factor, attention_dropout=config.dropout, output_attention=False),
config.model_dim,
config.n_heads,
),
config.model_dim,
config.fcn_dim,
dropout=config.dropout,
activation=config.activation,
)
for l in range(config.num_decoder_layers)
],
norm_layer=torch.nn.LayerNorm(config.model_dim),
projection=nn.Linear(config.model_dim, config.c_out, bias=True),
)
def forward(
self,
past,
past_timestamp,
future_timestamp,
enc_self_mask=None,
dec_self_mask=None,
dec_enc_mask=None,
**kwargs
):
config = self.config
start_token = past[:, (past.shape[1] - self.start_token_len) :]
dec_inp = torch.zeros(
past.shape[0], self.max_forecast_steps, config.decoder_input_size, dtype=torch.float, device=self.device
)
dec_inp = torch.cat([start_token, dec_inp], dim=1)
future_timestamp = torch.cat(
[past_timestamp[:, (past_timestamp.shape[1] - self.start_token_len) :], future_timestamp], dim=1
)
enc_out = self.enc_embedding(past, past_timestamp)
enc_out, attns = self.encoder(enc_out, attn_mask=enc_self_mask)
dec_out = self.dec_embedding(dec_inp, future_timestamp)
dec_out = self.decoder(dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask)
if self.config.target_seq_index is not None:
return dec_out[:, -self.max_forecast_steps :, :1]
else:
return dec_out[:, -self.max_forecast_steps :, :]
class TransformerForecaster(DeepForecaster):
"""
Implementaion of Transformer deep forecaster
"""
config_class = TransformerConfig
deep_model_class = TransformerModel
def __init__(self, config: TransformerConfig):
super().__init__(config) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/forecast/transformer.py | 0.91782 | 0.344058 | transformer.py | pypi |
import logging
from typing import List, Tuple
import numpy as np
import pandas as pd
from statsmodels.tsa.api import VAR as sm_VAR
from statsmodels.tsa.arima.model import ARIMA as sm_ARIMA
from merlion.models.forecast.base import ForecasterExogBase, ForecasterExogConfig
from merlion.transform.resample import TemporalResample
from merlion.utils.time_series import to_pd_datetime
logger = logging.getLogger(__name__)
class VectorARConfig(ForecasterExogConfig):
"""
Config object for `VectorAR` forecaster.
"""
_default_transform = TemporalResample()
"""
Configuration class for Vector AutoRegressive model.
"""
def __init__(self, maxlags: int = None, target_seq_index: int = None, **kwargs):
"""
:param maxlags: Max # of lags for AR
:param target_seq_index: The index of the univariate (amongst all
univariates in a general multivariate time series) whose value we
would like to forecast.
:param maxlags: Max # of lags for AR
"""
super().__init__(target_seq_index=target_seq_index, **kwargs)
self.maxlags = maxlags
class VectorAR(ForecasterExogBase):
"""
Vector AutoRegressive model for multivariate time series forecasting.
"""
config_class = VectorARConfig
def __init__(self, config: VectorARConfig):
super().__init__(config)
self.model = None
self._pd_train_data = None
@property
def require_even_sampling(self) -> bool:
return True
@property
def maxlags(self) -> int:
return self.config.maxlags
def _train_with_exog(
self, train_data: pd.DataFrame, train_config=None, exog_data: pd.DataFrame = None
) -> Tuple[pd.DataFrame, pd.DataFrame]:
if self.maxlags is None:
self.config.maxlags = max(min(20, len(train_data) // 10), self.max_forecast_steps or 1)
# train model
if self.dim == 1:
train_data = train_data.iloc[:, 0]
self.model = sm_ARIMA(train_data, exog=exog_data, order=(self.maxlags, 0, 0))
self.model = self.model.fit(method="yule_walker", cov_type="oim")
else:
self.model = sm_VAR(train_data, exog=exog_data).fit(self.maxlags)
i = self.target_seq_index
resid = self.model.resid
pred = train_data - resid if self.dim == 1 else (train_data - resid).iloc[:, i]
nanpred = pred.isna()
if nanpred.any():
pred[nanpred] = train_data.loc[nanpred, self.target_name]
if self.dim == 1:
pred_err = [np.sqrt(self.model.params["sigma2"]).item()] * len(pred)
else:
self._pd_train_data = train_data.iloc[-self.maxlags :]
pred_err = [self.model.cov_ybar()[i, i].item()] * len(pred)
pred = pd.DataFrame(pred, index=train_data.index, columns=[self.target_name])
pred_err = pd.DataFrame(pred_err, index=train_data.index, columns=[f"{self.target_name}_err"])
return pred, pred_err
def _forecast_with_exog(
self,
time_stamps: List[int],
time_series_prev: pd.DataFrame = None,
return_prev=False,
exog_data: pd.DataFrame = None,
exog_data_prev: pd.DataFrame = None,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
if time_series_prev is not None:
assert (
len(time_series_prev) >= self.maxlags
), f"time_series_prev has length of {len(time_series_prev)}, which is shorter than the model's maxlags"
assert not return_prev, "VectorAR.forecast() does not support return_prev=True"
n = len(time_stamps)
prev = self._pd_train_data if time_series_prev is None else time_series_prev.iloc[-self.maxlags :]
exog_data_prev = None if exog_data_prev is None else exog_data_prev.loc[prev.index]
if self.dim == 1:
if time_series_prev is None:
model = self.model
else:
model = self.model.apply(prev, exog=exog_data_prev, validate_specification=False)
forecast_result = model.get_forecast(steps=n, exog=exog_data)
yhat = forecast_result.predicted_mean
err = forecast_result.se_mean
else:
old_exog = self.model.exog
exog = None if exog_data is None else exog_data.values
self.model.exog = old_exog if exog_data_prev is None else exog_data_prev.values
yhat = self.model.forecast(prev.values, exog_future=exog, steps=n)[:, self.target_seq_index]
err = np.sqrt(self.model.forecast_cov(n)[:, self.target_seq_index, self.target_seq_index])
self.model.exog = old_exog
name = self.target_name
forecast = pd.DataFrame(np.asarray(yhat), index=to_pd_datetime(time_stamps), columns=[name])
err = pd.DataFrame(np.asarray(err), index=forecast.index, columns=[f"{name}_err"])
return forecast, err | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/forecast/vector_ar.py | 0.874848 | 0.52342 | vector_ar.py | pypi |
import copy
import logging
import math
import numpy as np
import pandas as pd
from scipy.stats import norm
from typing import List, Optional, Tuple, Union
from abc import abstractmethod
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
from merlion.models.base import NormalizingConfig
from merlion.models.deep_base import TorchModel
from merlion.models.forecast.deep_base import DeepForecasterConfig, DeepForecaster
from merlion.models.utils.nn_modules import ETSEmbedding
from merlion.models.utils.nn_modules.enc_dec_etsformer import EncoderLayer, Encoder, DecoderLayer, Decoder
from merlion.utils.misc import initializer
logger = logging.getLogger(__name__)
class ETSformerConfig(DeepForecasterConfig, NormalizingConfig):
"""
ETSformer: Exponential Smoothing Transformers for Time-series Forecasting: https://arxiv.org/abs/2202.01381
Code adapted from https://github.com/salesforce/ETSformer.
"""
@initializer
def __init__(
self,
n_past,
max_forecast_steps: int = None,
encoder_input_size: int = None,
decoder_input_size: int = None,
num_encoder_layers: int = 2,
num_decoder_layers: int = 2,
model_dim: int = 512,
dropout: float = 0.2,
n_heads: int = 8,
fcn_dim: int = 2048,
top_K: int = 1, # Top-K Fourier bases
sigma=0.2,
**kwargs
):
"""
:param n_past: # of past steps used for forecasting future.
:param max_forecast_steps: Max # of steps we would like to forecast for.
:param encoder_input_size: Input size of encoder. If ``encoder_input_size = None``,
then the model will automatically use ``config.dim``, which is the dimension of the input data.
:param decoder_input_size: Input size of decoder. If ``decoder_input_size = None``,
then the model will automatically use ``config.dim``, which is the dimension of the input data.
:param num_encoder_layers: Number of encoder layers.
:param num_decoder_layers: Number of decoder layers.
:param model_dim: Dimension of the model.
:param dropout: dropout rate.
:param n_heads: Number of heads of the model.
:param fcn_dim: Hidden dimension of the MLP layer in the model.
:param top_K: Top-K Frequent Fourier basis.
:param sigma: Standard derivation for ETS input data transform.
"""
super().__init__(n_past=n_past, max_forecast_steps=max_forecast_steps, **kwargs)
class ETSformerModel(TorchModel):
"""
Implementaion of ETSformer deep torch model.
"""
def __init__(self, config: ETSformerConfig):
super().__init__(config)
assert (
config.num_encoder_layers == config.num_decoder_layers
), "The number of encoder and decoder layers must be equal!"
if config.dim is not None:
config.encoder_input_size = config.dim if config.encoder_input_size is None else config.encoder_input_size
config.decoder_input_size = (
config.encoder_input_size if config.decoder_input_size is None else config.decoder_input_size
)
config.c_out = config.encoder_input_size
self.n_past = config.n_past
self.max_forecast_steps = config.max_forecast_steps
self.enc_embedding = ETSEmbedding(config.encoder_input_size, config.model_dim, dropout=config.dropout)
self.encoder = Encoder(
[
EncoderLayer(
config.model_dim,
config.n_heads,
config.c_out,
config.n_past,
config.max_forecast_steps,
config.top_K,
dim_feedforward=config.fcn_dim,
dropout=config.dropout,
output_attention=False,
)
for _ in range(config.num_encoder_layers)
]
)
# Decoder
self.decoder = Decoder(
[
DecoderLayer(
config.model_dim,
config.n_heads,
config.c_out,
config.max_forecast_steps,
dropout=config.dropout,
output_attention=False,
)
for _ in range(config.num_decoder_layers)
],
)
def forward(
self,
past,
past_timestamp,
future_timestamp,
enc_self_mask=None,
dec_self_mask=None,
dec_enc_mask=None,
attention=False,
**kwargs
):
with torch.no_grad():
if self.training:
past = self.transform(past)
res = self.enc_embedding(past)
level, growths, seasons, season_attns, growth_attns = self.encoder(res, past, attn_mask=enc_self_mask)
growth, season, growth_dampings = self.decoder(growths, seasons)
preds = level[:, -1:] + growth + season
# maybe remove later
if attention:
decoder_growth_attns = []
for growth_attn, growth_damping in zip(growth_attns, growth_dampings):
decoder_growth_attns.append(torch.einsum("bth,oh->bhot", [growth_attn.squeeze(-1), growth_damping]))
season_attns = torch.stack(season_attns, dim=0)[:, :, -self.pred_len :]
season_attns = reduce(season_attns, "l b d o t -> b o t", reduction="mean")
decoder_growth_attns = torch.stack(decoder_growth_attns, dim=0)[:, :, -self.pred_len :]
decoder_growth_attns = reduce(decoder_growth_attns, "l b d o t -> b o t", reduction="mean")
return preds, season_attns, decoder_growth_attns
if self.config.target_seq_index is not None:
return preds[:, :, :1]
else:
return preds
@torch.no_grad()
def transform(self, x):
return self.jitter(self.shift(self.scale(x)))
def jitter(self, x):
return x + (torch.randn(x.shape).to(x.device) * self.config.sigma)
def scale(self, x):
return x * (torch.randn(x.size(-1)).to(x.device) * self.config.sigma + 1)
def shift(self, x):
return x + (torch.randn(x.size(-1)).to(x.device) * self.config.sigma)
class ETSformerForecaster(DeepForecaster):
"""
Implementaion of ETSformer deep forecaster.
"""
config_class = ETSformerConfig
deep_model_class = ETSformerModel
def __init__(self, config: ETSformerConfig):
super().__init__(config) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/forecast/etsformer.py | 0.874118 | 0.376566 | etsformer.py | pypi |
from abc import abstractmethod
import copy
import logging
from typing import List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from scipy.stats import norm
from merlion.models.base import Config, ModelBase
from merlion.plot import Figure
from merlion.transform.base import TransformBase
from merlion.transform.factory import TransformFactory
from merlion.transform.normalize import MeanVarNormalize
from merlion.utils.time_series import to_pd_datetime, to_timestamp, TimeSeries, AggregationPolicy, MissingValuePolicy
logger = logging.getLogger(__name__)
class ForecasterConfig(Config):
"""
Config object used to define a forecaster model.
"""
max_forecast_steps: Optional[int] = None
target_seq_index: Optional[int] = None
invert_transform: bool = None
def __init__(self, max_forecast_steps: int = None, target_seq_index: int = None, invert_transform=None, **kwargs):
"""
:param max_forecast_steps: Max # of steps we would like to forecast for. Required for some models like `MSES`.
:param target_seq_index: The index of the univariate (amongst all univariates in a general multivariate time
series) whose value we would like to forecast.
:param invert_transform: Whether to automatically invert the ``transform`` before returning a forecast.
By default, we will invert the transform for all base forecasters if it supports a proper inversion, but
we will not invert it for forecaster-based anomaly detectors or transforms without proper inversions.
"""
from merlion.models.anomaly.base import DetectorConfig
super().__init__(**kwargs)
if invert_transform is None:
invert_transform = self.transform.proper_inversion and not isinstance(self, DetectorConfig)
self.max_forecast_steps = max_forecast_steps
self.target_seq_index = target_seq_index
self.invert_transform = invert_transform
class ForecasterBase(ModelBase):
"""
Base class for a forecaster model.
.. note::
If your model depends on an evenly spaced time series, make sure to
1. Call `ForecasterBase.train_pre_process` in `ForecasterBase.train`
2. Call `ForecasterBase.resample_time_stamps` at the start of
`ForecasterBase.forecast` to get a set of resampled time stamps, and
call ``time_series.align(reference=time_stamps)`` to align the forecast
with the original time stamps.
"""
config_class = ForecasterConfig
target_name = None
"""
The name of the target univariate to forecast.
"""
def __init__(self, config: ForecasterConfig):
super().__init__(config)
self.target_name = None
self.exog_dim = None
@property
def max_forecast_steps(self):
return self.config.max_forecast_steps
@property
def target_seq_index(self) -> int:
"""
:return: the index of the univariate (amongst all univariates in a
general multivariate time series) whose value we would like to forecast.
"""
return self.config.target_seq_index
@property
def invert_transform(self):
"""
:return: Whether to automatically invert the ``transform`` before returning a forecast.
"""
return self.config.invert_transform and not self.transform.identity_inversion
@property
def require_univariate(self) -> bool:
"""
All forecasters can work on multivariate data, since they only forecast a single target univariate.
"""
return False
@property
def support_multivariate_output(self) -> bool:
"""
Indicating whether the forecasting model can forecast multivariate output.
"""
return False
def resample_time_stamps(self, time_stamps: Union[int, List[int]], time_series_prev: TimeSeries = None):
assert self.timedelta is not None and self.last_train_time is not None, (
"train() must be called before you can call forecast(). "
"If you have already called train(), make sure it sets "
"self.timedelta and self.last_train_time appropriately."
)
# Determine timedelta & initial time of forecast
dt, offset = self.timedelta, self.timedelta_offset
if time_series_prev is not None and not time_series_prev.is_empty():
t0 = to_pd_datetime(time_series_prev.tf)
else:
t0 = self.last_train_time
# Handle the case where time_stamps is an integer
if isinstance(time_stamps, (int, float)):
n = int(time_stamps)
assert self.max_forecast_steps is None or n <= self.max_forecast_steps
resampled = pd.date_range(start=t0, periods=n + 1, freq=dt) + offset
resampled = resampled[1:] if resampled[0] == t0 else resampled[:-1]
time_stamps = to_timestamp(resampled)
elif not self.require_even_sampling:
resampled = to_pd_datetime(time_stamps)
# Handle the cases where we don't have a max_forecast_steps
elif self.max_forecast_steps is None:
tf = to_pd_datetime(time_stamps[-1])
resampled = pd.date_range(start=t0, end=tf + 2 * dt, freq=dt) + offset
if resampled[0] == t0:
resampled = resampled[1:]
if len(resampled) > 1 and resampled[-2] >= tf:
resampled = resampled[:-1]
# Handle the case where we do have a max_forecast_steps
else:
resampled = pd.date_range(start=t0, periods=self.max_forecast_steps + 1, freq=dt) + offset
resampled = resampled[1:] if resampled[0] == t0 else resampled[:-1]
resampled = resampled[: 1 + sum(resampled < to_pd_datetime(time_stamps[-1]))]
tf = resampled[-1]
assert to_pd_datetime(time_stamps[0]) >= t0 and to_pd_datetime(time_stamps[-1]) <= tf, (
f"Expected `time_stamps` to be between {t0} and {tf}, but `time_stamps` ranges "
f"from {to_pd_datetime(time_stamps[0])} to {to_pd_datetime(time_stamps[-1])}"
)
return to_timestamp(resampled).tolist()
def train_pre_process(
self, train_data: TimeSeries, exog_data: TimeSeries = None, return_exog=None
) -> Union[TimeSeries, Tuple[TimeSeries, Union[TimeSeries, None]]]:
train_data = super().train_pre_process(train_data)
if self.dim == 1:
self.config.target_seq_index = 0
elif self.target_seq_index is None and not self.support_multivariate_output:
raise RuntimeError(
f"Attempting to use a forecaster that does not support multivariate outputs "
f"on a {train_data.dim}-variable "
f"time series, but didn't specify a `target_seq_index` "
f"indicating which univariate is the target."
)
assert self.support_multivariate_output or (0 <= self.target_seq_index < train_data.dim), (
f"Expected `support_multivariate_output = True`,"
f"or `target_seq_index` to be between 0 and {train_data.dim}"
f"(the dimension of the transformed data), but got {self.target_seq_index} "
)
if self.support_multivariate_output and self.target_seq_index is None:
self.target_name = str(train_data.names)
else:
self.target_name = train_data.names[self.target_seq_index]
# Handle exogenous data
if return_exog is None:
return_exog = exog_data is not None
if not self.supports_exog:
if exog_data is not None:
exog_data = None
logger.warning(f"Exogenous regressors are not supported for model {type(self).__name__}")
if exog_data is not None:
self.exog_dim = exog_data.dim
self.config.exog_transform.train(exog_data)
else:
self.exog_dim = None
if return_exog and exog_data is not None:
exog_data, _ = self.transform_exog_data(exog_data=exog_data, time_stamps=train_data.time_stamps)
return (train_data, exog_data) if return_exog else train_data
def train(
self, train_data: TimeSeries, train_config=None, exog_data: TimeSeries = None
) -> Tuple[TimeSeries, Optional[TimeSeries]]:
"""
Trains the forecaster on the input time series.
:param train_data: a `TimeSeries` of metric values to train the model.
:param train_config: Additional training configs, if needed. Only required for some models.
:param exog_data: A time series of exogenous variables, sampled at the same time stamps as ``train_data``.
Exogenous variables are known a priori, and they are independent of the variable being forecasted.
Only supported for models which inherit from `ForecasterExogBase`.
:return: the model's prediction on ``train_data``, in the same format as
if you called `ForecasterBase.forecast` on the time stamps of ``train_data``
"""
if train_config is None:
train_config = copy.deepcopy(self._default_train_config)
train_data, exog_data = self.train_pre_process(train_data, exog_data=exog_data, return_exog=True)
if self._pandas_train:
train_data = train_data.to_pd()
exog_data = None if exog_data is None else exog_data.to_pd()
if exog_data is None:
train_result = self._train(train_data=train_data, train_config=train_config)
else:
train_result = self._train_with_exog(train_data=train_data, train_config=train_config, exog_data=exog_data)
return self.train_post_process(train_result)
def train_post_process(
self, train_result: Tuple[Union[TimeSeries, pd.DataFrame], Optional[Union[TimeSeries, pd.DataFrame]]]
) -> Tuple[TimeSeries, TimeSeries]:
"""
Converts the train result (forecast & stderr for training data) into TimeSeries objects, and inverts the
model's transform if desired.
"""
return self._process_forecast(*train_result)
def transform_exog_data(
self,
exog_data: TimeSeries,
time_stamps: Union[List[int], pd.DatetimeIndex],
time_series_prev: TimeSeries = None,
) -> Union[Tuple[TimeSeries, TimeSeries], Tuple[TimeSeries, None], Tuple[None, None]]:
if exog_data is not None:
logger.warning(f"Exogenous regressors are not supported for model {type(self).__name__}")
return None, None
@abstractmethod
def _train(self, train_data: pd.DataFrame, train_config=None) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
raise NotImplementedError
def _train_with_exog(
self, train_data: pd.DataFrame, train_config=None, exog_data: pd.DataFrame = None
) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
return self._train(train_data=train_data, train_config=train_config)
def forecast(
self,
time_stamps: Union[int, List[int]],
time_series_prev: TimeSeries = None,
exog_data: TimeSeries = None,
return_iqr: bool = False,
return_prev: bool = False,
) -> Union[Tuple[TimeSeries, Optional[TimeSeries]], Tuple[TimeSeries, TimeSeries, TimeSeries]]:
"""
Returns the model's forecast on the timestamps given. If ``self.transform`` is specified in the config, the
forecast is a forecast of transformed values by default. To invert the transform and forecast the actual
values of the time series, specify ``invert_transform = True`` when specifying the config.
:param time_stamps: Either a ``list`` of timestamps we wish to forecast for, or the number of steps (``int``)
we wish to forecast for.
:param time_series_prev: a time series immediately preceding ``time_series``. If given, we use it to initialize
the forecaster's state. Otherwise, we assume that ``time_series`` immediately follows the training data.
:param exog_data: A time series of exogenous variables. Exogenous variables are known a priori, and they are
independent of the variable being forecasted. ``exog_data`` must include data for all of ``time_stamps``;
if ``time_series_prev`` is given, it must include data for all of ``time_series_prev.time_stamps`` as well.
Optional. Only supported for models which inherit from `ForecasterExogBase`.
:param return_iqr: whether to return the inter-quartile range for the forecast.
Only supported for models which return error bars.
:param return_prev: whether to return the forecast for ``time_series_prev`` (and its stderr or IQR if relevant),
in addition to the forecast for ``time_stamps``. Only used if ``time_series_prev`` is provided.
:return: ``(forecast, stderr)`` if ``return_iqr`` is false, ``(forecast, lb, ub)`` otherwise.
- ``forecast``: the forecast for the timestamps given
- ``stderr``: the standard error of each forecast value. May be ``None``.
- ``lb``: 25th percentile of forecast values for each timestamp
- ``ub``: 75th percentile of forecast values for each timestamp
"""
# Determine the time stamps to forecast for, and resample them if needed
orig_t = None if isinstance(time_stamps, (int, float)) else time_stamps
time_stamps = self.resample_time_stamps(time_stamps, time_series_prev)
if return_prev and time_series_prev is not None:
if orig_t is None:
orig_t = time_series_prev.time_stamps + time_stamps
else:
orig_t = time_series_prev.time_stamps + to_timestamp(orig_t).tolist()
# Transform time_series_prev if it is given
old_inversion_state = self.transform.inversion_state
if time_series_prev is None:
time_series_prev_df = None
else:
time_series_prev = self.transform(time_series_prev)
assert time_series_prev.dim == self.dim, (
f"time_series_prev has dimension of {time_series_prev.dim} that is different from "
f"training data dimension of {self.dim} for the model"
)
time_series_prev_df = time_series_prev.to_pd()
# Make the prediction
exog_data, exog_data_prev = self.transform_exog_data(
exog_data, time_stamps=time_stamps, time_series_prev=time_series_prev
)
if exog_data is None:
forecast, err = self._forecast(
time_stamps=time_stamps, time_series_prev=time_series_prev_df, return_prev=return_prev
)
else:
forecast, err = self._forecast_with_exog(
time_stamps=time_stamps,
time_series_prev=time_series_prev_df,
return_prev=return_prev,
exog_data=exog_data.to_pd(),
exog_data_prev=None if exog_data_prev is None else exog_data_prev.to_pd(),
)
# Format the return values and reset the transform's inversion state
if self.invert_transform and time_series_prev is None:
time_series_prev = self.transform(self.train_data)
if time_series_prev is not None and self.target_seq_index is not None:
time_series_prev = pd.DataFrame(time_series_prev.univariates[time_series_prev.names[self.target_seq_index]])
ret = self._process_forecast(forecast, err, time_series_prev, return_prev=return_prev, return_iqr=return_iqr)
self.transform.inversion_state = old_inversion_state
return tuple(None if x is None else x.align(reference=orig_t) for x in ret)
def _process_forecast(self, forecast, err, time_series_prev=None, return_prev=False, return_iqr=False):
forecast = forecast.to_pd() if isinstance(forecast, TimeSeries) else forecast
if return_prev and time_series_prev is not None:
forecast = pd.concat((time_series_prev, forecast))
# Obtain negative & positive error bars which are appropriately padded
if err is not None:
err = (err,) if not isinstance(err, tuple) else err
assert isinstance(err, tuple) and len(err) in (1, 2)
assert all(isinstance(e, (pd.DataFrame, TimeSeries)) for e in err)
new_err = []
for e in err:
e = e.to_pd() if isinstance(e, TimeSeries) else e
n, d = len(forecast) - len(e), e.shape[1]
if n > 0:
e = pd.concat((pd.DataFrame(np.zeros((n, d)), index=forecast.index[:n], columns=e.columns), e))
e.columns = [f"{c}_err" for c in forecast.columns]
new_err.append(e.abs())
e_neg, e_pos = new_err if len(new_err) == 2 else (new_err[0], new_err[0])
else:
e_neg = e_pos = None
# Compute upper/lower bounds for the (potentially inverted) forecast.
# Only do this if returning the IQR or inverting the transform.
if (return_iqr or self.invert_transform) and e_neg is not None and e_pos is not None:
lb = TimeSeries.from_pd((forecast + e_neg.values * (norm.ppf(0.25) if return_iqr else -1)))
ub = TimeSeries.from_pd((forecast + e_pos.values * (norm.ppf(0.75) if return_iqr else 1)))
if self.invert_transform:
lb = self.transform.invert(lb, retain_inversion_state=True)
ub = self.transform.invert(ub, retain_inversion_state=True)
else:
lb = ub = None
# Convert the forecast to TimeSeries and invert the transform on it if desired
forecast = TimeSeries.from_pd(forecast)
if self.invert_transform:
forecast = self.transform.invert(forecast, retain_inversion_state=True)
# Return the IQR if desired
if return_iqr:
if lb is None or ub is None:
logger.warning("Model returned err = None, so returning IQR = (None, None)")
else:
lb, ub = lb.rename(lambda c: f"{c}_lower"), ub.rename(lambda c: f"{c}_upper")
return forecast, lb, ub
# Otherwise, either compute the stderr from the upper/lower bounds (if relevant), or just use the error
if lb is not None and ub is not None:
err = TimeSeries.from_pd((ub.to_pd() - lb.to_pd().values).rename(columns=lambda c: f"{c}_err").abs() / 2)
elif e_neg is not None and e_pos is not None:
err = TimeSeries.from_pd(e_pos if e_neg is e_pos else (e_neg + e_pos) / 2)
else:
err = None
return forecast, err
@abstractmethod
def _forecast(
self, time_stamps: List[int], time_series_prev: pd.DataFrame = None, return_prev=False
) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
raise NotImplementedError
def _forecast_with_exog(
self,
time_stamps: List[int],
time_series_prev: pd.DataFrame = None,
return_prev=False,
exog_data: pd.DataFrame = None,
exog_data_prev: pd.DataFrame = None,
) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
return self._forecast(time_stamps=time_stamps, time_series_prev=time_series_prev, return_prev=return_prev)
def batch_forecast(
self,
time_stamps_list: List[List[int]],
time_series_prev_list: List[TimeSeries],
return_iqr: bool = False,
return_prev: bool = False,
) -> Tuple[
Union[
Tuple[List[TimeSeries], List[Optional[TimeSeries]]],
Tuple[List[TimeSeries], List[TimeSeries], List[TimeSeries]],
]
]:
"""
Returns the model's forecast on a batch of timestamps given.
:param time_stamps_list: a list of lists of timestamps we wish to forecast for
:param time_series_prev_list: a list of TimeSeries immediately preceding the time stamps in time_stamps_list
:param return_iqr: whether to return the inter-quartile range for the forecast.
Only supported by models which can return error bars.
:param return_prev: whether to return the forecast for ``time_series_prev`` (and its stderr or IQR if relevant),
in addition to the forecast for ``time_stamps``. Only used if ``time_series_prev`` is provided.
:return: ``(forecast, forecast_stderr)`` if ``return_iqr`` is false,
``(forecast, forecast_lb, forecast_ub)`` otherwise.
- ``forecast``: the forecast for the timestamps given
- ``forecast_stderr``: the standard error of each forecast value. May be ``None``.
- ``forecast_lb``: 25th percentile of forecast values for each timestamp
- ``forecast_ub``: 75th percentile of forecast values for each timestamp
"""
out_list = []
if time_series_prev_list is None:
time_series_prev_list = [None for _ in range(len(time_stamps_list))]
for time_stamps, time_series_prev in zip(time_stamps_list, time_series_prev_list):
out = self.forecast(
time_stamps=time_stamps,
time_series_prev=time_series_prev,
return_iqr=return_iqr,
return_prev=return_prev,
)
out_list.append(out)
return tuple(zip(*out_list))
def get_figure(
self,
*,
time_series: TimeSeries = None,
time_stamps: List[int] = None,
time_series_prev: TimeSeries = None,
exog_data: TimeSeries = None,
plot_forecast_uncertainty=False,
plot_time_series_prev=False,
) -> Figure:
"""
:param time_series: the time series over whose timestamps we wish to make a forecast. Exactly one of
``time_series`` or ``time_stamps`` should be provided.
:param time_stamps: Either a ``list`` of timestamps we wish to forecast for, or the number of steps (``int``)
we wish to forecast for. Exactly one of ``time_series`` or ``time_stamps`` should be provided.
:param time_series_prev: a time series immediately preceding ``time_series``. If given, we use it to initialize
the forecaster's state. Otherwise, we assume that ``time_series`` immediately follows the training data.
:param exog_data: A time series of exogenous variables. Exogenous variables are known a priori, and they are
independent of the variable being forecasted. ``exog_data`` must include data for all of ``time_stamps``;
if ``time_series_prev`` is given, it must include data for all of ``time_series_prev.time_stamps`` as well.
Optional. Only supported for models which inherit from `ForecasterExogBase`.
:param plot_forecast_uncertainty: whether to plot uncertainty estimates (the inter-quartile range) for forecast
values. Not supported for all models.
:param plot_time_series_prev: whether to plot ``time_series_prev`` (and the model's fit for it).
Only used if ``time_series_prev`` is given.
:return: a `Figure` of the model's forecast.
"""
assert not (
time_series is None and time_stamps is None
), "Must provide at least one of time_series or time_stamps"
if time_stamps is None:
if self.invert_transform:
time_stamps = time_series.time_stamps
y = time_series.univariates[time_series.names[self.target_seq_index]]
else:
transformed_ts = self.transform(time_series)
time_stamps = transformed_ts.time_stamps
y = transformed_ts.univariates[transformed_ts.names[self.target_seq_index]]
else:
y = None
# Get forecast + bounds if plotting uncertainty
if plot_forecast_uncertainty:
yhat, lb, ub = self.forecast(
time_stamps, time_series_prev, exog_data=exog_data, return_iqr=True, return_prev=plot_time_series_prev
)
yhat, lb, ub = [None if x is None else x.univariates[x.names[0]] for x in [yhat, lb, ub]]
# Just get the forecast otherwise
else:
lb, ub = None, None
yhat, err = self.forecast(
time_stamps, time_series_prev, exog_data=exog_data, return_iqr=False, return_prev=plot_time_series_prev
)
yhat = yhat.univariates[yhat.names[0]]
# Set up all the parameters needed to make a figure
if time_series_prev is not None and plot_time_series_prev:
if not self.invert_transform:
time_series_prev = self.transform(time_series_prev)
time_series_prev = time_series_prev.univariates[time_series_prev.names[self.target_seq_index]]
n_prev = len(time_series_prev)
yhat_prev, yhat = yhat[:n_prev], yhat[n_prev:]
if lb is not None and ub is not None:
lb_prev, lb = lb[:n_prev], lb[n_prev:]
ub_prev, ub = ub[:n_prev], ub[n_prev:]
else:
lb_prev = ub_prev = None
else:
time_series_prev = None
yhat_prev = lb_prev = ub_prev = None
# Create the figure
return Figure(
y=y,
yhat=yhat,
yhat_lb=lb,
yhat_ub=ub,
y_prev=time_series_prev,
yhat_prev=yhat_prev,
yhat_prev_lb=lb_prev,
yhat_prev_ub=ub_prev,
)
def plot_forecast(
self,
*,
time_series: TimeSeries = None,
time_stamps: List[int] = None,
time_series_prev: TimeSeries = None,
exog_data: TimeSeries = None,
plot_forecast_uncertainty=False,
plot_time_series_prev=False,
figsize=(1000, 600),
ax=None,
):
"""
Plots the forecast for the time series in matplotlib, optionally also
plotting the uncertainty of the forecast, as well as the past values
(both true and predicted) of the time series.
:param time_series: the time series over whose timestamps we wish to make a forecast. Exactly one of
``time_series`` or ``time_stamps`` should be provided.
:param time_stamps: Either a ``list`` of timestamps we wish to forecast for, or the number of steps (``int``)
we wish to forecast for. Exactly one of ``time_series`` or ``time_stamps`` should be provided.
:param time_series_prev: a time series immediately preceding ``time_series``. If given, we use it to initialize
the forecaster's state. Otherwise, we assume that ``time_series`` immediately follows the training data.
:param exog_data: A time series of exogenous variables. Exogenous variables are known a priori, and they are
independent of the variable being forecasted. ``exog_data`` must include data for all of ``time_stamps``;
if ``time_series_prev`` is given, it must include data for all of ``time_series_prev.time_stamps`` as well.
Optional. Only supported for models which inherit from `ForecasterExogBase`.
:param plot_forecast_uncertainty: whether to plot uncertainty estimates (the inter-quartile range) for forecast
values. Not supported for all models.
:param plot_time_series_prev: whether to plot ``time_series_prev`` (and the model's fit for it). Only used if
``time_series_prev`` is given.
:param figsize: figure size in pixels
:param ax: matplotlib axis to add this plot to
:return: (fig, ax): matplotlib figure & axes the figure was plotted on
"""
fig = self.get_figure(
time_series=time_series,
time_stamps=time_stamps,
time_series_prev=time_series_prev,
exog_data=exog_data,
plot_forecast_uncertainty=plot_forecast_uncertainty,
plot_time_series_prev=plot_time_series_prev,
)
title = f"{type(self).__name__}: Forecast of {self.target_name}"
return fig.plot(title=title, metric_name=self.target_name, figsize=figsize, ax=ax)
def plot_forecast_plotly(
self,
*,
time_series: TimeSeries = None,
time_stamps: List[int] = None,
time_series_prev: TimeSeries = None,
exog_data: TimeSeries = None,
plot_forecast_uncertainty=False,
plot_time_series_prev=False,
figsize=(1000, 600),
):
"""
Plots the forecast for the time series in plotly, optionally also
plotting the uncertainty of the forecast, as well as the past values
(both true and predicted) of the time series.
:param time_series: the time series over whose timestamps we wish to make a forecast. Exactly one of
``time_series`` or ``time_stamps`` should be provided.
:param time_stamps: Either a ``list`` of timestamps we wish to forecast for, or the number of steps (``int``)
we wish to forecast for. Exactly one of ``time_series`` or ``time_stamps`` should be provided.
:param time_series_prev: a time series immediately preceding ``time_series``. If given, we use it to initialize
the forecaster's state. Otherwise, we assume that ``time_series`` immediately follows the training data.
:param exog_data: A time series of exogenous variables. Exogenous variables are known a priori, and they are
independent of the variable being forecasted. ``exog_data`` must include data for all of ``time_stamps``;
if ``time_series_prev`` is given, it must include data for all of ``time_series_prev.time_stamps`` as well.
Optional. Only supported for models which inherit from `ForecasterExogBase`.
:param plot_forecast_uncertainty: whether to plot uncertainty estimates (the
inter-quartile range) for forecast values. Not supported for all
models.
:param plot_time_series_prev: whether to plot ``time_series_prev`` (and
the model's fit for it). Only used if ``time_series_prev`` is given.
:param figsize: figure size in pixels
"""
fig = self.get_figure(
time_series=time_series,
time_stamps=time_stamps,
time_series_prev=time_series_prev,
exog_data=exog_data,
plot_forecast_uncertainty=plot_forecast_uncertainty,
plot_time_series_prev=plot_time_series_prev,
)
title = f"{type(self).__name__}: Forecast of {self.target_name}"
return fig.plot_plotly(title=title, metric_name=self.target_name, figsize=figsize)
class ForecasterExogConfig(ForecasterConfig):
_default_exog_transform = MeanVarNormalize()
exog_transform: TransformBase = None
def __init__(
self,
exog_transform: TransformBase = None,
exog_aggregation_policy: Union[AggregationPolicy, str] = "Mean",
exog_missing_value_policy: Union[MissingValuePolicy, str] = "ZFill",
**kwargs,
):
"""
:param exog_transform: The pre-processing transform for exogenous data. Note: resampling is handled separately.
:param exog_aggregation_policy: The policy to use for aggregating values in exogenous data,
to ensure it is sampled at the same timestamps as the endogenous data.
:param exog_missing_value_policy: The policy to use for imputing missing values in exogenous data,
to ensure it is sampled at the same timestamps as the endogenous data.
"""
super().__init__(**kwargs)
if exog_transform is None:
self.exog_transform = copy.deepcopy(self._default_exog_transform)
elif isinstance(exog_transform, dict):
self.exog_transform = TransformFactory.create(**exog_transform)
else:
self.exog_transform = exog_transform
self.exog_aggregation_policy = exog_aggregation_policy
self.exog_missing_value_policy = exog_missing_value_policy
@property
def exog_aggregation_policy(self):
return self._exog_aggregation_policy
@exog_aggregation_policy.setter
def exog_aggregation_policy(self, agg):
if isinstance(agg, str):
valid = set(AggregationPolicy.__members__.keys())
if agg not in valid:
raise KeyError(f"{agg} is not a aggregation policy. Valid aggregation policies are: {valid}")
agg = AggregationPolicy[agg]
self._exog_aggregation_policy = agg
@property
def exog_missing_value_policy(self):
return self._exog_missing_value_policy
@exog_missing_value_policy.setter
def exog_missing_value_policy(self, mv: Union[MissingValuePolicy, str]):
if isinstance(mv, str):
valid = set(MissingValuePolicy.__members__.keys())
if mv not in valid:
raise KeyError(f"{mv} is not a valid missing value policy. Valid missing value policies are: {valid}")
mv = MissingValuePolicy[mv]
self._exog_missing_value_policy = mv
class ForecasterExogBase(ForecasterBase):
"""
Base class for a forecaster model which supports exogenous variables. Exogenous variables are known a priori, and
they are independent of the variable being forecasted.
"""
@property
def supports_exog(self):
return True
@property
def exog_transform(self):
return self.config.exog_transform
@property
def exog_aggregation_policy(self):
return self.config.exog_aggregation_policy
@property
def exog_missing_value_policy(self):
return self.config.exog_missing_value_policy
def transform_exog_data(
self,
exog_data: TimeSeries,
time_stamps: Union[List[int], pd.DatetimeIndex],
time_series_prev: TimeSeries = None,
) -> Union[Tuple[TimeSeries, TimeSeries], Tuple[TimeSeries, None], Tuple[None, None]]:
"""
Transforms & resamples exogenous data and splits it into two subsets:
one with the same timestamps as ``time_series_prev`` (``None`` if ``time_series_prev`` is ``None``),
and one with the timestamps ``time_stamps``.
:param exog_data: The exogenous data of interest.
:param time_stamps: The timestamps of interest (either the timestamps of data, or the timestamps at which
we want to obtain a forecast)
:param time_series_prev: The timestamps of a time series preceding ``time_stamps`` as context. Optional.
:return: ``(exog_data, exog_data_prev)``, where ``exog_data`` has been resampled to match the ``time_stamps``
and ``exog_data_prev` has been resampled to match ``time_series_prev.time_stamps``.
"""
# Check validity
if exog_data is None:
if self.exog_dim is not None:
raise ValueError(f"Trained with {self.exog_dim}-dim exogenous data, but received none.")
return None, None
if self.exog_dim is None:
raise ValueError("Trained without exogenous data, but received exogenous data.")
if self.exog_dim != exog_data.dim:
raise ValueError(f"Trained with {self.exog_dim}-dim exogenous data, but received {exog_data.dim}-dim.")
# Transform & resample
exog_data = self.exog_transform(exog_data)
if time_series_prev is not None:
t = time_series_prev.time_stamps + to_timestamp(time_stamps).tolist()
exog_data = exog_data.align(
reference=t,
aggregation_policy=self.exog_aggregation_policy,
missing_value_policy=self.exog_missing_value_policy,
)
exog_data_prev, exog_data = exog_data.bisect(time_stamps[0], t_in_left=False)
else:
exog_data_prev = None
exog_data = exog_data.align(
reference=time_stamps,
aggregation_policy=self.exog_aggregation_policy,
missing_value_policy=self.exog_missing_value_policy,
)
return exog_data, exog_data_prev
@abstractmethod
def _train_with_exog(
self, train_data: pd.DataFrame, train_config=None, exog_data: pd.DataFrame = None
) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
raise NotImplementedError
def _train(self, train_data: pd.DataFrame, train_config=None) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
return self._train_with_exog(train_data=train_data, train_config=train_config, exog_data=None)
@abstractmethod
def _forecast_with_exog(
self,
time_stamps: List[int],
time_series_prev: pd.DataFrame = None,
return_prev=False,
exog_data: pd.DataFrame = None,
exog_data_prev: pd.DataFrame = None,
) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
raise NotImplementedError
def _forecast(
self, time_stamps: List[int], time_series_prev: pd.DataFrame = None, return_prev=False
) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
return self._forecast_with_exog(
time_stamps=time_stamps,
time_series_prev=time_series_prev,
return_prev=return_prev,
exog_data=None,
exog_data_prev=None,
) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/forecast/base.py | 0.936988 | 0.440469 | base.py | pypi |
import copy
import logging
import numpy as np
import pandas as pd
from scipy.stats import norm
from typing import List, Optional, Tuple, Union
try:
import torch
import torch.nn as nn
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
from merlion.models.deep_base import DeepConfig, DeepModelBase
from merlion.models.forecast.base import ForecasterBase, ForecasterConfig
from merlion.models.utils.rolling_window_dataset import RollingWindowDataset
from merlion.models.utils.time_features import get_time_features
from merlion.models.utils.early_stopping import EarlyStopping
from merlion.transform.base import TransformBase, Identity
from merlion.transform.factory import TransformFactory
from merlion.utils.misc import initializer, ProgressBar
from merlion.utils.time_series import to_pd_datetime, to_timestamp, TimeSeries, AggregationPolicy, MissingValuePolicy
logger = logging.getLogger(__name__)
class DeepForecasterConfig(DeepConfig, ForecasterConfig):
"""
Config object used to define a forecaster with deep model
"""
def __init__(
self,
n_past: int,
**kwargs,
):
"""
:param n_past: # of past steps used for forecasting future.
"""
super().__init__(
**kwargs,
)
self.n_past = n_past
class DeepForecaster(DeepModelBase, ForecasterBase):
"""
Base class for a deep forecaster model
"""
config_class = DeepForecasterConfig
def __init__(self, config: DeepForecasterConfig):
super().__init__(config)
def _get_np_loss_and_prediction(self, eval_dataset: RollingWindowDataset):
"""
Get numpy prediction and loss with evaluation mode for a given dataset or data
:param eval_dataset: Evaluation dataset
:return: The numpy prediction of the model and the average loss for the given dataset.
"""
self.deep_model.eval()
all_preds = []
total_loss = []
for i, batch in enumerate(eval_dataset):
with torch.no_grad():
loss, outputs, y_true = self._get_batch_model_loss_and_outputs(self._convert_batch_to_tensors(batch))
pred = outputs.detach().cpu().numpy()
all_preds.append(pred)
total_loss.append(loss.item())
preds = np.concatenate(all_preds, axis=0)
return preds, np.average(total_loss)
@property
def support_multivariate_output(self) -> bool:
"""
Deep models support multivariate output by default.
"""
return True
def _convert_batch_to_tensors(self, batch):
device = self.deep_model.device
past, past_timestamp, future, future_timestamp = batch
past = torch.tensor(past, dtype=torch.float, device=device)
future = future if future is None else torch.tensor(future, dtype=torch.float, device=device)
past_timestamp = torch.tensor(past_timestamp, dtype=torch.float, device=device)
future_timestamp = torch.tensor(future_timestamp, dtype=torch.float, device=device)
return past, past_timestamp, future, future_timestamp
def _train(self, train_data: pd.DataFrame, train_config=None) -> pd.DataFrame:
config = self.config
# creating model before the training
self._create_model()
total_dataset = RollingWindowDataset(
train_data,
n_past=config.n_past,
n_future=config.max_forecast_steps,
batch_size=config.batch_size,
target_seq_index=None, # have to set None, we use target_seq_index later in the training, if not this is a bug
ts_encoding=config.ts_encoding,
valid_fraction=config.valid_fraction,
flatten=False,
shuffle=True,
validation=False,
)
train_steps = len(total_dataset)
logger.info(f"Training steps each epoch: {train_steps}")
bar = ProgressBar(total=config.num_epochs)
early_stopping = EarlyStopping(patience=config.early_stop_patience) if config.early_stop_patience else None
# start training
for epoch in range(config.num_epochs):
train_loss = []
self.deep_model.train()
total_dataset.seed = epoch + 1
for i, batch in enumerate(total_dataset):
self.optimizer.zero_grad()
loss, _, _ = self._get_batch_model_loss_and_outputs(self._convert_batch_to_tensors(batch))
train_loss.append(loss.item())
loss.backward()
if config.clip_gradient is not None:
torch.nn.utils.clip_grad_norm(self.model.parameters(), config.clip_gradient)
self.optimizer.step()
train_loss = np.average(train_loss)
# set validation flag
total_dataset.validation = True
_, val_loss = self._get_np_loss_and_prediction(total_dataset)
total_dataset.validation = False
if bar is not None:
bar.print(
epoch + 1, prefix="", suffix=f"Train Loss: {train_loss: .4f}, Validation Loss: {val_loss: .4f}"
)
if early_stopping is not None:
early_stopping(val_loss, self.deep_model)
if early_stopping.early_stop:
logger.info(f"Early stopping with {config.early_stop_patience} patience")
break
if early_stopping is not None:
early_stopping.load_best_model(self.deep_model)
logger.info(f"Load the best model with validation loss: {early_stopping.val_loss_min: .4f}")
logger.info("End of the training loop")
# get predictions
total_dataset.shuffle = False
total_dataset.validation = None
pred, _ = self._get_np_loss_and_prediction(total_dataset)
# since the model predicts multiple steps, we concatenate all the first steps together
columns = train_data.columns if self.target_seq_index is None else [self.target_name]
column_index = train_data.index[config.n_past : (len(train_data) - config.max_forecast_steps + 1)]
return pd.DataFrame(pred[:, 0], index=column_index, columns=columns), None
def _get_batch_model_loss_and_outputs(self, batch):
"""
For loss calculation and output prediction
:param batch: a batch contains `(past, past_timestamp, future, future_timestamp)` used for calculating loss and model outputs
:return: calculated loss, deep model outputs and targeted ground truth future
"""
past, past_timestamp, future, future_timestamp = batch
model_output = self.deep_model(past, past_timestamp, future_timestamp)
if future is None:
return None, model_output, None
if self.target_seq_index is not None:
future = future[:, :, self.target_seq_index : self.target_seq_index + 1]
loss = self.loss_fn(model_output, future)
return loss, model_output, future
@property
def require_even_sampling(self) -> bool:
return False
def _forecast(
self, time_stamps: List[int], time_series_prev: pd.DataFrame = None, return_prev=False
) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
if time_series_prev is None:
time_series_prev = self.transform(self.train_data).to_pd().iloc[-self.config.n_past :]
# convert to vector feature
prev_timestamp = get_time_features(time_series_prev.index, self.config.ts_encoding)
future_timestamp = get_time_features(to_pd_datetime(time_stamps), self.config.ts_encoding)
# preparing data
past = np.expand_dims(time_series_prev.values, 0)
past_timestamp = np.expand_dims(prev_timestamp, 0)
future_timestamp = np.expand_dims(future_timestamp, 0)
self.deep_model.eval()
batch = (past, past_timestamp, None, future_timestamp)
_, model_output, _ = self._get_batch_model_loss_and_outputs(self._convert_batch_to_tensors(batch))
preds = model_output.detach().cpu().numpy().squeeze()
columns = time_series_prev.columns if self.target_seq_index is None else [self.target_name]
pd_pred = pd.DataFrame(preds, index=to_pd_datetime(time_stamps), columns=columns)
return pd_pred, None | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/forecast/deep_base.py | 0.924407 | 0.34956 | deep_base.py | pypi |
import copy
import logging
import math
import numpy as np
import pandas as pd
from scipy.stats import norm
from typing import List, Optional, Tuple, Union
from abc import abstractmethod
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
from merlion.models.base import NormalizingConfig
from merlion.models.deep_base import TorchModel
from merlion.models.forecast.deep_base import DeepForecasterConfig, DeepForecaster
from merlion.models.utils.nn_modules import (
AutoCorrelation,
AutoCorrelationLayer,
SeriesDecomposeBlock,
SeasonalLayernorm,
DataEmbeddingWoPos,
)
from merlion.models.utils.nn_modules.enc_dec_autoformer import Encoder, Decoder, EncoderLayer, DecoderLayer
from merlion.utils.misc import initializer
logger = logging.getLogger(__name__)
class AutoformerConfig(DeepForecasterConfig, NormalizingConfig):
"""
Decomposition Transformers with Auto-Correlation for Long-Term Series Forecasting: https://arxiv.org/abs/2106.13008.
Code adapted from https://github.com/thuml/Autoformer.
"""
@initializer
def __init__(
self,
n_past,
max_forecast_steps: int = None,
moving_avg: int = 25,
encoder_input_size: int = None,
decoder_input_size: int = None,
num_encoder_layers: int = 2,
num_decoder_layers: int = 1,
start_token_len: int = 0,
factor: int = 3,
model_dim: int = 512,
embed: str = "timeF",
dropout: float = 0.05,
activation: str = "gelu",
n_heads: int = 8,
fcn_dim: int = 2048,
**kwargs
):
"""
:param n_past: # of past steps used for forecasting future.
:param max_forecast_steps: Max # of steps we would like to forecast for.
:param moving_avg: Window size of moving average for Autoformer.
:param encoder_input_size: Input size of encoder. If ``encoder_input_size = None``,
then the model will automatically use ``config.dim``, which is the dimension of the input data.
:param decoder_input_size: Input size of decoder. If ``decoder_input_size = None``,
then the model will automatically use ``config.dim``, which is the dimension of the input data.
:param num_encoder_layers: Number of encoder layers.
:param num_decoder_layers: Number of decoder layers.
:param start_token_len: Length of start token for deep transformer encoder-decoder based models.
The start token is similar to the special tokens for NLP models (e.g., bos, sep, eos tokens).
:param factor: Attention factor.
:param model_dim: Dimension of the model.
:param embed: Time feature encoding type, options include ``timeF``, ``fixed`` and ``learned``.
:param dropout: dropout rate.
:param activation: Activation function, can be ``gelu``, ``relu``, ``sigmoid``, etc.
:param n_heads: Number of heads of the model.
:param fcn_dim: Hidden dimension of the MLP layer in the model.
"""
super().__init__(n_past=n_past, max_forecast_steps=max_forecast_steps, **kwargs)
class AutoformerModel(TorchModel):
"""
Implementaion of Autoformer deep torch model.
"""
def __init__(self, config: AutoformerConfig):
super().__init__(config)
if config.dim is not None:
config.encoder_input_size = config.dim if config.encoder_input_size is None else config.encoder_input_size
config.decoder_input_size = (
config.encoder_input_size if config.decoder_input_size is None else config.decoder_input_size
)
config.c_out = config.encoder_input_size
self.n_past = config.n_past
self.start_token_len = config.start_token_len
self.max_forecast_steps = config.max_forecast_steps
kernel_size = config.moving_avg
self.decomp = SeriesDecomposeBlock(kernel_size)
# Embedding
# The series-wise connection inherently contains the sequential information.
# Thus, we can discard the position embedding of transformers.
self.enc_embedding = DataEmbeddingWoPos(
config.encoder_input_size, config.model_dim, config.embed, config.ts_encoding, config.dropout
)
self.dec_embedding = DataEmbeddingWoPos(
config.decoder_input_size, config.model_dim, config.embed, config.ts_encoding, config.dropout
)
# Encoder
self.encoder = Encoder(
[
EncoderLayer(
AutoCorrelationLayer(
AutoCorrelation(False, config.factor, attention_dropout=config.dropout, output_attention=False),
config.model_dim,
config.n_heads,
),
config.model_dim,
config.fcn_dim,
moving_avg=config.moving_avg,
dropout=config.dropout,
activation=config.activation,
)
for l in range(config.num_encoder_layers)
],
norm_layer=SeasonalLayernorm(config.model_dim),
)
# Decoder
self.decoder = Decoder(
[
DecoderLayer(
AutoCorrelationLayer(
AutoCorrelation(True, config.factor, attention_dropout=config.dropout, output_attention=False),
config.model_dim,
config.n_heads,
),
AutoCorrelationLayer(
AutoCorrelation(False, config.factor, attention_dropout=config.dropout, output_attention=False),
config.model_dim,
config.n_heads,
),
config.model_dim,
config.c_out,
config.fcn_dim,
moving_avg=config.moving_avg,
dropout=config.dropout,
activation=config.activation,
)
for l in range(config.num_decoder_layers)
],
norm_layer=SeasonalLayernorm(config.model_dim),
projection=nn.Linear(config.model_dim, config.c_out, bias=True),
)
def forward(
self,
past,
past_timestamp,
future_timestamp,
enc_self_mask=None,
dec_self_mask=None,
dec_enc_mask=None,
**kwargs
):
config = self.config
future_timestamp = torch.cat(
[past_timestamp[:, (past_timestamp.shape[1] - self.start_token_len) :], future_timestamp], dim=1
)
# decomp init
mean = torch.mean(past, dim=1).unsqueeze(1).repeat(1, self.max_forecast_steps, 1)
zeros = torch.zeros(
[past.shape[0], self.max_forecast_steps, past.shape[2]], dtype=torch.float, device=self.device
)
seasonal_init, trend_init = self.decomp(past)
# decoder input
trend_init = torch.cat([trend_init[:, (trend_init.shape[1] - self.start_token_len) :, :], mean], dim=1)
seasonal_init = torch.cat(
[seasonal_init[:, (seasonal_init.shape[1] - self.start_token_len) :, :], zeros], dim=1
)
# enc
enc_out = self.enc_embedding(past, past_timestamp)
enc_out, attns = self.encoder(enc_out, attn_mask=enc_self_mask)
# dec
dec_out = self.dec_embedding(seasonal_init, future_timestamp)
seasonal_part, trend_part = self.decoder(
dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask, trend=trend_init
)
# final
dec_out = trend_part + seasonal_part
if self.config.target_seq_index is not None:
return dec_out[:, -self.max_forecast_steps :, :1]
else:
return dec_out[:, -self.max_forecast_steps :, :] # [B, L, D]
class AutoformerForecaster(DeepForecaster):
"""
Implementaion of Autoformer deep forecaster.
"""
config_class = AutoformerConfig
deep_model_class = AutoformerModel
def __init__(self, config: AutoformerConfig):
super().__init__(config) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/forecast/autoformer.py | 0.912908 | 0.340115 | autoformer.py | pypi |
import copy
import logging
import math
import numpy as np
import pandas as pd
from scipy.stats import norm
from typing import List, Optional, Tuple, Union
from abc import abstractmethod
try:
import torch
import torch.nn as nn
import torch.nn.functional as F
except ImportError as e:
err = (
"Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or "
"`pip install `salesforce-merlion[all]`"
)
raise ImportError(str(e) + ". " + err)
from merlion.utils.misc import initializer
from merlion.models.base import NormalizingConfig
from merlion.models.deep_base import TorchModel
from merlion.models.forecast.deep_base import DeepForecasterConfig, DeepForecaster
from merlion.models.utils.nn_modules import ProbAttention, AttentionLayer, DataEmbedding, ConvLayer
from merlion.models.utils.nn_modules.enc_dec_transformer import (
Decoder,
DecoderLayer,
Encoder,
EncoderLayer,
)
logger = logging.getLogger(__name__)
class InformerConfig(DeepForecasterConfig, NormalizingConfig):
"""
Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting: https://arxiv.org/abs/2012.07436
Code adapted from https://github.com/thuml/Autoformer.
"""
@initializer
def __init__(
self,
n_past,
max_forecast_steps: int = None,
encoder_input_size: int = None,
decoder_input_size: int = None,
num_encoder_layers: int = 2,
num_decoder_layers: int = 1,
start_token_len: int = 0,
factor: int = 3,
model_dim: int = 512,
embed: str = "timeF",
dropout: float = 0.05,
activation: str = "gelu",
n_heads: int = 8,
fcn_dim: int = 2048,
distil: bool = True,
**kwargs
):
"""
:param n_past: # of past steps used for forecasting future.
:param max_forecast_steps: Max # of steps we would like to forecast for.
:param encoder_input_size: Input size of encoder. If ``encoder_input_size = None``,
then the model will automatically use ``config.dim``, which is the dimension of the input data.
:param decoder_input_size: Input size of decoder. If ``decoder_input_size = None``,
then the model will automatically use ``config.dim``, which is the dimension of the input data.
:param num_encoder_layers: Number of encoder layers.
:param num_decoder_layers: Number of decoder layers.
:param start_token_len: Length of start token for deep transformer encoder-decoder based models.
The start token is similar to the special tokens for NLP models (e.g., bos, sep, eos tokens).
:param factor: Attention factor.
:param model_dim: Dimension of the model.
:param embed: Time feature encoding type, options include ``timeF``, ``fixed`` and ``learned``.
:param dropout: dropout rate.
:param activation: Activation function, can be ``gelu``, ``relu``, ``sigmoid``, etc.
:param n_heads: Number of heads of the model.
:param fcn_dim: Hidden dimension of the MLP layer in the model.
:param distil: whether to use distilling in the encoder of the model.
"""
super().__init__(n_past=n_past, max_forecast_steps=max_forecast_steps, **kwargs)
class InformerModel(TorchModel):
"""
Implementaion of informer deep torch model.
"""
def __init__(self, config: InformerConfig):
super().__init__(config)
if config.dim is not None:
config.encoder_input_size = config.dim if config.encoder_input_size is None else config.encoder_input_size
config.decoder_input_size = (
config.encoder_input_size if config.decoder_input_size is None else config.decoder_input_size
)
config.c_out = config.encoder_input_size
self.n_past = config.n_past
self.start_token_len = config.start_token_len
self.max_forecast_steps = config.max_forecast_steps
self.enc_embedding = DataEmbedding(
config.encoder_input_size, config.model_dim, config.embed, config.ts_encoding, config.dropout
)
self.dec_embedding = DataEmbedding(
config.decoder_input_size, config.model_dim, config.embed, config.ts_encoding, config.dropout
)
# Encoder
self.encoder = Encoder(
[
EncoderLayer(
AttentionLayer(
ProbAttention(False, config.factor, attention_dropout=config.dropout, output_attention=False),
config.model_dim,
config.n_heads,
),
config.model_dim,
config.fcn_dim,
dropout=config.dropout,
activation=config.activation,
)
for l in range(config.num_encoder_layers)
],
[ConvLayer(config.model_dim) for l in range(config.num_encoder_layers - 1)] if config.distil else None,
norm_layer=torch.nn.LayerNorm(config.model_dim),
)
# Decoder
self.decoder = Decoder(
[
DecoderLayer(
AttentionLayer(
ProbAttention(True, config.factor, attention_dropout=config.dropout, output_attention=False),
config.model_dim,
config.n_heads,
),
AttentionLayer(
ProbAttention(False, config.factor, attention_dropout=config.dropout, output_attention=False),
config.model_dim,
config.n_heads,
),
config.model_dim,
config.fcn_dim,
dropout=config.dropout,
activation=config.activation,
)
for l in range(config.num_decoder_layers)
],
norm_layer=torch.nn.LayerNorm(config.model_dim),
projection=nn.Linear(config.model_dim, config.c_out, bias=True),
)
self.config = config
def forward(
self,
past,
past_timestamp,
future_timestamp,
enc_self_mask=None,
dec_self_mask=None,
dec_enc_mask=None,
**kwargs
):
config = self.config
start_token = past[:, past.shape[1] - self.start_token_len :]
dec_inp = torch.zeros(
past.shape[0], self.max_forecast_steps, config.decoder_input_size, dtype=torch.float, device=self.device
)
dec_inp = torch.cat([start_token, dec_inp], dim=1)
future_timestamp = torch.cat(
[past_timestamp[:, (past_timestamp.shape[1] - self.start_token_len) :], future_timestamp], dim=1
)
enc_out = self.enc_embedding(past, past_timestamp)
enc_out, attns = self.encoder(enc_out, attn_mask=enc_self_mask)
dec_out = self.dec_embedding(dec_inp, future_timestamp)
dec_out = self.decoder(dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask)
if self.config.target_seq_index is not None:
return dec_out[:, -self.max_forecast_steps :, :1]
else:
return dec_out[:, -self.max_forecast_steps :, :]
class InformerForecaster(DeepForecaster):
"""
Implementaion of Informer deep forecaster.
"""
config_class = InformerConfig
deep_model_class = InformerModel
def __init__(self, config: InformerConfig):
super().__init__(config) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/forecast/informer.py | 0.910341 | 0.414958 | informer.py | pypi |
import logging
import traceback
from typing import List
import pandas as pd
from merlion.evaluate.anomaly import TSADMetric, TSADEvaluator, TSADEvaluatorConfig
from merlion.models.anomaly.base import DetectorBase, DetectorConfig
from merlion.models.ensemble.base import EnsembleConfig, EnsembleTrainConfig, EnsembleBase
from merlion.models.ensemble.combine import Mean
from merlion.post_process.threshold import AggregateAlarms
from merlion.utils import TimeSeries
logger = logging.getLogger(__name__)
class DetectorEnsembleConfig(DetectorConfig, EnsembleConfig):
"""
Config class for an ensemble of anomaly detectors.
"""
_default_combiner = Mean(abs_score=True)
@property
def _default_threshold(self):
if self.per_model_threshold:
return None
return AggregateAlarms(alm_threshold=3.0, abs_score=True)
@property
def per_model_threshold(self):
"""
:return: whether to apply the thresholding rules of each individual
model, before combining their outputs. Only done if doing model
selection.
"""
from merlion.models.ensemble.combine import ModelSelector
return isinstance(self.combiner, ModelSelector) and not self.enable_threshold
def __init__(self, enable_calibrator=False, **kwargs):
"""
:param enable_calibrator: Whether to enable calibration of the ensemble
anomaly score. ``False`` by default.
:param kwargs: Any additional kwargs for `EnsembleConfig` or `DetectorConfig`
"""
super().__init__(enable_calibrator=enable_calibrator, **kwargs)
class DetectorEnsembleTrainConfig(EnsembleTrainConfig):
"""
Config object describing how to train an ensemble of anomaly detectors.
"""
def __init__(self, valid_frac=0.0, per_model_train_configs=None, per_model_post_rule_train_configs=None):
"""
:param valid_frac: fraction of training data to use for validation.
:param per_model_train_configs: list of train configs to use for individual models, one per model.
``None`` means that you use the default for all models. Specifying ``None`` for an individual
model means that you use the default for that model.
:param per_model_post_rule_train_configs: list of post-rule train configs to use for individual models, one per
model. ``None`` means that you use the default for all models. Specifying ``None`` for an individual
model means that you use the default for that model.
"""
super().__init__(valid_frac=valid_frac, per_model_train_configs=per_model_train_configs)
self.per_model_post_rule_train_configs = per_model_post_rule_train_configs
class DetectorEnsemble(EnsembleBase, DetectorBase):
"""
Class representing an ensemble of multiple anomaly detection models.
"""
models: List[DetectorBase]
config_class = DetectorEnsembleConfig
def __init__(self, config: DetectorEnsembleConfig = None, models: List[DetectorBase] = None):
super().__init__(config=config, models=models)
for model in self.models:
assert isinstance(model, DetectorBase), (
f"Expected all models in {type(self).__name__} to be anomaly "
f"detectors, but got a {type(model).__name__}."
)
model.config.enable_threshold = self.per_model_threshold
@property
def require_even_sampling(self) -> bool:
return False
@property
def require_univariate(self) -> bool:
return False
@property
def _default_post_rule_train_config(self):
return dict(metric=TSADMetric.F1, unsup_quantile=None)
@property
def _default_train_config(self):
return DetectorEnsembleTrainConfig()
@property
def per_model_threshold(self):
"""
:return: whether to apply the threshold rule of each individual model
before aggregating their anomaly scores.
"""
return self.config.per_model_threshold
def _train(
self,
train_data: TimeSeries,
train_config: DetectorEnsembleTrainConfig = None,
anomaly_labels: TimeSeries = None,
) -> TimeSeries:
"""
Trains each anomaly detector in the ensemble unsupervised, and each of
their post-rules supervised (if labels are given).
:param train_data: a `TimeSeries` of metric values to train the model.
:param train_config: `DetectorEnsembleTrainConfig` for ensemble training.
:param anomaly_labels: a `TimeSeries` indicating which timestamps are anomalous. Optional.
:return: A `TimeSeries` of the ensemble's anomaly scores on the training data.
"""
train, valid = self.train_valid_split(train_data, train_config)
if valid is not None:
logger.warning("Using a train/validation split to train a DetectorEnsemble is not recommended!")
train_cfgs = train_config.per_model_train_configs
if train_cfgs is None:
train_cfgs = [None] * len(self.models)
assert len(train_cfgs) == len(self.models), (
f"You must provide the same number of per-model train configs as models, but received received"
f"{len(train_cfgs)} train configs for an ensemble with {len(self.models)} models."
)
pr_cfgs = train_config.per_model_post_rule_train_configs
if pr_cfgs is None:
pr_cfgs = [None] * len(self.models)
assert len(pr_cfgs) == len(self.models), (
f"You must provide the same number of per-model post-rule train configs as models, but received "
f"{len(pr_cfgs)} post-rule train configs for an ensemble with {len(self.models)} models."
)
# Train each model individually, with its own train config & post-rule train config
all_scores = []
eval_cfg = TSADEvaluatorConfig(retrain_freq=None, cadence=self.get_max_common_horizon(train))
# TODO: parallelize me
for i, (model, cfg, pr_cfg) in enumerate(zip(self.models, train_cfgs, pr_cfgs)):
try:
train_kwargs = dict(train_config=cfg, anomaly_labels=anomaly_labels, post_rule_train_config=pr_cfg)
train_scores, valid_scores = TSADEvaluator(model=model, config=eval_cfg).get_predict(
train_vals=train, test_vals=valid, train_kwargs=train_kwargs, post_process=True
)
scores = train_scores if valid is None else valid_scores
except Exception:
logger.warning(
f"Caught an exception while training model {i + 1}/{len(self.models)} ({type(model).__name__}). "
f"Model will not be used. {traceback.format_exc()}"
)
self.combiner.set_model_used(i, False)
scores = None
all_scores.append(scores)
# Train combiner on train data if there is no validation data
if valid is None:
return self.train_combiner(all_scores, anomaly_labels)
# Otherwise, train the combiner on the validation data, and re-train the models on the full data
self.train_combiner(all_scores, anomaly_labels.bisect(t=valid.time_stamps[0], t_in_left=False)[1])
all_scores = []
# TODO: parallelize me
for i, (model, cfg, pr_cfg, used) in enumerate(zip(self.models, train_cfgs, pr_cfgs, self.models_used)):
model.reset()
if used:
logger.info(f"Re-training model {i+1}/{len(self.models)} ({type(model).__name__}) on full data...")
train_kwargs = dict(train_config=cfg, anomaly_labels=anomaly_labels, post_rule_train_config=pr_cfg)
train_scores = model.train(train_data, **train_kwargs)
train_scores = model.post_rule(train_scores)
else:
train_scores = None
all_scores.append(train_scores)
return self.combiner(all_scores, anomaly_labels)
def _get_anomaly_score(self, time_series: pd.DataFrame, time_series_prev: pd.DataFrame = None) -> pd.DataFrame:
time_series, time_series_prev = TimeSeries.from_pd(time_series), TimeSeries.from_pd(time_series_prev)
y = [
model.get_anomaly_label(time_series, time_series_prev)
for model, used in zip(self.models, self.models_used)
if used
]
return self.combiner(y, time_series).to_pd() | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/ensemble/anomaly.py | 0.860574 | 0.372248 | anomaly.py | pypi |
"""Ensembles of forecasters."""
import logging
import traceback
from typing import List, Optional, Tuple, Union
import pandas as pd
from merlion.evaluate.forecast import ForecastEvaluator, ForecastEvaluatorConfig
from merlion.models.ensemble.base import EnsembleConfig, EnsembleTrainConfig, EnsembleBase
from merlion.models.ensemble.combine import Mean
from merlion.models.forecast.base import ForecasterBase, ForecasterExogConfig, ForecasterExogBase
from merlion.utils.time_series import TimeSeries
logger = logging.getLogger(__name__)
class ForecasterEnsembleConfig(ForecasterExogConfig, EnsembleConfig):
"""
Config class for an ensemble of forecasters.
"""
_default_combiner = Mean(abs_score=False)
def __init__(self, max_forecast_steps=None, target_seq_index=None, verbose=False, **kwargs):
self.verbose = verbose
super().__init__(max_forecast_steps=max_forecast_steps, target_seq_index=None, **kwargs)
# Override the target_seq_index of all individual models after everything has been initialized
# FIXME: doesn't work if models have heterogeneous transforms which change the dim of the input time series
self.target_seq_index = target_seq_index
if self.models is not None:
assert all(model.target_seq_index == self.target_seq_index for model in self.models)
@property
def target_seq_index(self):
return self._target_seq_index
@target_seq_index.setter
def target_seq_index(self, target_seq_index):
if self.models is not None:
# Get the target_seq_index from the models if None is given
if target_seq_index is None:
non_none_idxs = [m.target_seq_index for m in self.models if m.target_seq_index is not None]
if len(non_none_idxs) > 0:
target_seq_index = non_none_idxs[0]
assert all(m.target_seq_index in [None, target_seq_index] for m in self.models), (
f"Attempted to infer target_seq_index from the individual models in the ensemble, but "
f"not all models have the same target_seq_index. Got {[m.target_seq_index for m in self.models]}"
)
# Only override the target_seq_index from the models if there is one
if target_seq_index is not None:
for model in self.models:
model.config.target_seq_index = target_seq_index
# Save the ensemble-level target_seq_index as a private variable
self._target_seq_index = target_seq_index
class ForecasterEnsemble(EnsembleBase, ForecasterExogBase):
"""
Class representing an ensemble of multiple forecasting models.
"""
models: List[ForecasterBase]
config_class = ForecasterEnsembleConfig
@property
def _default_train_config(self):
return EnsembleTrainConfig(valid_frac=0.2)
@property
def require_even_sampling(self) -> bool:
return False
def __init__(self, config: ForecasterEnsembleConfig = None, models: List[ForecasterBase] = None):
super().__init__(config=config, models=models)
for model in self.models:
assert isinstance(
model, ForecasterBase
), f"Expected all models in {type(self).__name__} to be forecasters, but got a {type(model).__name__}."
model.config.invert_transform = True
def train_pre_process(
self, train_data: TimeSeries, exog_data: TimeSeries = None, return_exog=None
) -> Union[TimeSeries, Tuple[TimeSeries, Union[TimeSeries, None]]]:
idxs = [model.target_seq_index for model in self.models]
if any(i is not None for i in idxs):
self.config.target_seq_index = [i for i in idxs if i is not None][0]
assert all(i in [None, self.target_seq_index] for i in idxs), (
f"All individual forecasters must have the same target_seq_index "
f"to be used in a ForecasterEnsemble, but got the following "
f"target_seq_idx values: {idxs}"
)
return super().train_pre_process(train_data=train_data, exog_data=exog_data, return_exog=return_exog)
def resample_time_stamps(self, time_stamps: Union[int, List[int]], time_series_prev: TimeSeries = None):
return time_stamps
def train_combiner(self, all_model_outs: List[TimeSeries], target: TimeSeries, **kwargs) -> TimeSeries:
return super().train_combiner(all_model_outs, target, target_seq_index=self.target_seq_index, **kwargs)
def _train_with_exog(
self, train_data: TimeSeries, train_config: EnsembleTrainConfig = None, exog_data: TimeSeries = None
) -> Tuple[Optional[TimeSeries], None]:
train, valid = self.train_valid_split(train_data, train_config)
per_model_train_configs = train_config.per_model_train_configs
if per_model_train_configs is None:
per_model_train_configs = [None] * len(self.models)
assert len(per_model_train_configs) == len(self.models), (
f"You must provide the same number of per-model train configs "
f"as models, but received received {len(per_model_train_configs)} "
f"train configs for an ensemble with {len(self.models)} models"
)
# Train individual models on the training data
preds, errs = [], []
eval_cfg = ForecastEvaluatorConfig(retrain_freq=None, horizon=self.get_max_common_horizon(train))
# TODO: parallelize me
for i, (model, cfg) in enumerate(zip(self.models, per_model_train_configs)):
logger.info(f"Training & evaluating model {i+1}/{len(self.models)} ({type(model).__name__})...")
try:
train_kwargs = dict(train_config=cfg)
(train_pred, train_err), pred = ForecastEvaluator(model=model, config=eval_cfg).get_predict(
train_vals=train, test_vals=valid, exog_data=exog_data, train_kwargs=train_kwargs
)
preds.append(train_pred if valid is None else pred)
errs.append(train_err if valid is None else None)
except Exception:
logger.warning(
f"Caught an exception while training model {i+1}/{len(self.models)} ({type(model).__name__}). "
f"Model will not be used. {traceback.format_exc()}"
)
self.combiner.set_model_used(i, False)
preds.append(None)
errs.append(None)
# Train the combiner on the train data if we didn't use validation data.
if valid is None:
pred = self.train_combiner(preds, train_data)
err = None if any(e is None for e in errs) else self.combiner(errs, train_data)
return pred, err
# Otherwise, train the combiner on the validation data, and re-train the models on the full data
self.train_combiner(preds, valid)
full_preds, full_errs = [], []
# TODO: parallelize me
for i, (model, used, cfg) in enumerate(zip(self.models, self.models_used, per_model_train_configs)):
model.reset()
if used:
logger.info(f"Re-training model {i+1}/{len(self.models)} ({type(model).__name__}) on full data...")
pred, err = model.train(train_data, train_config=cfg, exog_data=exog_data)
else:
pred, err = None, None
full_preds.append(pred)
full_errs.append(err)
if any(used and e is None for used, e in zip(self.models_used, full_errs)):
err = None
else:
err = self.combiner(full_errs, train_data)
return self.combiner(full_preds, train_data), err
def _forecast_with_exog(
self,
time_stamps: List[int],
time_series_prev: pd.DataFrame = None,
return_prev=False,
exog_data: pd.DataFrame = None,
exog_data_prev: pd.DataFrame = None,
) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
preds, errs = [], []
time_series_prev = TimeSeries.from_pd(time_series_prev)
if exog_data is not None:
exog_data = pd.concat((exog_data_prev, exog_data)) if exog_data_prev is not None else exog_data
exog_data = TimeSeries.from_pd(exog_data)
for model, used in zip(self.models, self.models_used):
if used:
pred, err = model.forecast(
time_stamps=time_stamps,
time_series_prev=time_series_prev,
exog_data=exog_data,
return_prev=return_prev,
)
preds.append(pred)
errs.append(err)
pred = self.combiner(preds, None).to_pd()
err = None if any(e is None for e in errs) else self.combiner(errs, None).to_pd()
return pred, err | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/ensemble/forecast.py | 0.789964 | 0.435962 | forecast.py | pypi |
import copy
import logging
from typing import Any, Dict, List, Tuple, Union
import numpy as np
import pandas as pd
from merlion.models.base import ModelBase, Config
from merlion.models.ensemble.combine import CombinerBase, CombinerFactory, Mean
from merlion.models.factory import ModelFactory
from merlion.utils import TimeSeries
from merlion.utils.misc import AutodocABCMeta
from merlion.utils.resample import to_offset
logger = logging.getLogger(__name__)
class EnsembleConfig(Config):
"""
An ensemble config contains the each individual model in the ensemble, as well as the Combiner object
to combine those models' outputs. The rationale behind placing the model objects in the EnsembleConfig
(rather than in the Ensemble itself) is discussed in more detail in the documentation for `LayeredModel`.
"""
_default_combiner = Mean(abs_score=False)
models: List[ModelBase]
def __init__(self, models: List[Union[ModelBase, Dict]] = None, combiner: CombinerBase = None, **kwargs):
"""
:param models: A list of models or dicts representing them.
:param combiner: The `CombinerBase` object to combine the outputs of the models in the ensemble.
:param kwargs: Any additional kwargs for `Config`
"""
super().__init__(**kwargs)
if combiner is None:
self.combiner = copy.deepcopy(self._default_combiner)
elif isinstance(combiner, dict):
self.combiner = CombinerFactory.create(**combiner)
else:
self.combiner = combiner
if models is not None:
models = [ModelFactory.create(**m) if isinstance(m, dict) else copy.deepcopy(m) for m in models]
self.models = models
def to_dict(self, _skipped_keys=None):
_skipped_keys = _skipped_keys if _skipped_keys is not None else set()
config_dict = super().to_dict(_skipped_keys.union({"models"}))
if "models" not in _skipped_keys:
if self.models is None:
models = None
else:
models = [
None if m is None else dict(name=type(m).__name__, **m.config.to_dict(_skipped_keys))
for m in self.models
]
config_dict["models"] = models
return config_dict
def __copy__(self):
config_dict = super().to_dict(_skipped_keys={"models"})
config_dict["models"] = self.models
return self.from_dict(config_dict)
def __deepcopy__(self, memodict={}):
copied = copy.copy(self)
copied.models = copy.deepcopy(self.models)
return copied
class EnsembleTrainConfig:
"""
Config object describing how to train an ensemble.
"""
def __init__(self, valid_frac, per_model_train_configs=None):
"""
:param valid_frac: fraction of training data to use for validation.
:param per_model_train_configs: list of train configs to use for
individual models, one per model. ``None`` means that you use
the default for all models. Specifying ``None`` for an individual
model means that you use the default for that model.
"""
assert 0 <= valid_frac < 1
self.valid_frac = valid_frac
self.per_model_train_configs = per_model_train_configs
class EnsembleBase(ModelBase, metaclass=AutodocABCMeta):
"""
An abstract class representing an ensemble of multiple models.
"""
config_class = EnsembleConfig
def __init__(self, config: EnsembleConfig = None, models: List[ModelBase] = None):
"""
:param config: The ensemble's config
:param models: The models in the ensemble. Only provide this argument if you did not specify ``config.models``.
"""
msg = f"Expected exactly one of `config.models` or `models` when creating a {type(self).__name__}."
if config is None and models is None:
raise RuntimeError(f"{msg} Received neither.")
elif config is not None and models is not None:
if config.models is None:
config.models = models
else:
raise RuntimeError(f"{msg} Received both.")
elif config is None:
config = self.config_class(models=models)
super().__init__(config=config)
@property
def models(self):
return self.config.models
@property
def combiner(self) -> CombinerBase:
"""
:return: the object used to combine model outputs.
"""
return self.config.combiner
@property
def _default_train_config(self):
return EnsembleTrainConfig(valid_frac=0.0)
def reset(self):
for model in self.models:
model.reset()
self.combiner.reset()
@property
def models_used(self):
if self.combiner.n_models is not None:
return self.combiner.models_used
else:
return [True] * len(self.models)
@property
def _pandas_train(self):
return False
def train_valid_split(
self, transformed_train_data: TimeSeries, train_config: EnsembleTrainConfig
) -> Tuple[TimeSeries, Union[TimeSeries, None]]:
valid_frac = train_config.valid_frac
if valid_frac == 0 or not self.combiner.requires_training:
return transformed_train_data, None
t0 = transformed_train_data.t0
tf = transformed_train_data.tf
return transformed_train_data.bisect(t0 + (tf - t0) * (1 - valid_frac))
def get_max_common_horizon(self, train_data=None):
horizons = []
for model in self.models:
dt = getattr(model, "timedelta", None)
n = getattr(model, "max_forecast_steps", None)
if train_data is not None and n is not None and dt is None:
try:
model.train_pre_process(train_data)
except:
continue
dt = getattr(model, "timedelta", None)
n = getattr(model, "max_forecast_steps", None)
if dt is not None and n is not None:
horizons.append(to_offset(dt * n))
if all(h is None for h in horizons):
return None
i = np.argmin([pd.to_datetime(0) + h for h in horizons if h is not None])
return horizons[i]
def train_combiner(self, all_model_outs: List[TimeSeries], target: TimeSeries, **kwargs) -> TimeSeries:
combined = self.combiner.train(all_model_outs, target, **kwargs)
if not any(self.models_used):
raise RuntimeError("None of the individual models in the ensemble is used! Check logs for errors.")
used = [f"#{i+1} ({type(m).__name__})" for i, (m, u) in enumerate(zip(self.models, self.models_used)) if u]
logger.info(f"Models used (of {len(self.models)}): {', '.join(used)}")
return combined
def __getstate__(self):
state = super().__getstate__()
if self.models is None:
state["models"] = None
else:
state["models"] = [None if model is None else model.__getstate__() for model in self.models]
return state
def __setstate__(self, state):
if "models" in state:
model_states = state.pop("models")
if self.models is None and model_states is not None:
raise ValueError(f"`{type(self).__name__}.models` is None, but received a non-None `models` state.")
elif self.models is None or model_states is None:
self.config.models = None
else:
for i, (model, model_state) in enumerate(zip(self.models, model_states)):
if model is None and model_state is not None:
raise ValueError(f"One of the Ensemble models is None, but received a non-None model state.")
elif model is None or model_state is None:
self.models[i] = None
else:
model.__setstate__(model_state)
super().__setstate__(state)
def save(self, dirname: str, save_only_used_models=False, **save_config):
"""
Saves the ensemble of models.
:param dirname: directory to save the ensemble to
:param save_only_used_models: whether to save only the models that are actually used by the ensemble.
:param save_config: additional save config arguments
"""
super().save(dirname=dirname, save_only_used_models=save_only_used_models, **save_config)
def _save_state(
self, state_dict: Dict[str, Any], filename: str = None, save_only_used_models=False, **save_config
) -> Dict[str, Any]:
"""
Saves the model's state to the the specified file, or just modifies the state_dict as needed.
:param state_dict: The state dict to save.
:param filename: The name of the file to save the model to.
:param save_only_used_models: whether to save only the models that are actually used by the ensemble.
:param save_config: additional configurations (if needed)
:return: The state dict to save.
"""
state_dict.pop("config", None) # don't save the model's config in binary
if self.models is not None:
model_states = []
for model, model_state, model_used in zip(self.models, state_dict["models"], self.models_used):
if save_only_used_models and not model_used:
model_states.append(None)
else:
model_states.append(
model._save_state(model_state, None, save_only_used_models=save_only_used_models, **save_config)
)
state_dict["models"] = model_states
return super()._save_state(state_dict, filename, **save_config)
def to_bytes(self, save_only_used_models=False, **save_config):
"""
Converts the entire model state and configuration to a single byte object.
:param save_only_used_models: whether to save only the models that are actually used by the ensemble.
:param save_config: additional configurations (if needed)
"""
return super().to_bytes(save_only_used_models=save_only_used_models, **save_config) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/ensemble/base.py | 0.865537 | 0.31068 | base.py | pypi |
from abc import abstractmethod
from collections import OrderedDict
import copy
import logging
from typing import List, Optional, Union
import numpy as np
from merlion.evaluate.anomaly import TSADMetric
from merlion.evaluate.forecast import ForecastMetric
from merlion.utils import UnivariateTimeSeries, TimeSeries
from merlion.utils.misc import AutodocABCMeta
logger = logging.getLogger(__name__)
def _align_outputs(all_model_outs: List[TimeSeries], target: TimeSeries) -> List[Optional[TimeSeries]]:
"""
Aligns the outputs of each model to the time series ``target``.
"""
if all(out is None for out in all_model_outs):
return [None for _ in all_model_outs]
if target is None:
time_stamps = np.unique(np.concatenate([out.to_pd().index for out in all_model_outs if out is not None]))
else:
t0 = min(min(v.index[0] for v in out.univariates) for out in all_model_outs if out is not None)
tf = max(max(v.index[-1] for v in out.univariates) for out in all_model_outs if out is not None)
time_stamps = target.to_pd()[t0:tf].index
return [None if out is None else out.align(reference=time_stamps) for out in all_model_outs]
class CombinerBase(metaclass=AutodocABCMeta):
"""
Abstract base class for combining the outputs of multiple models. Subclasses
should implement the abstract method ``_combine_univariates``. All combiners
are callable objects.
.. automethod:: __call__
"""
def __init__(self, abs_score=False):
"""
:param abs_score: whether to take the absolute value of the model
outputs. Useful for anomaly detection.
"""
self.abs_score = abs_score
self.n_models = None
self._override_models_used = {}
def reset(self):
self._override_models_used = {}
@property
def requires_training(self):
return False
def to_dict(self, _skipped_keys=None):
skipped_keys = set() if _skipped_keys is None else _skipped_keys
state = {k: copy.deepcopy(v) for k, v in self.__dict__.items() if k not in skipped_keys}
state["name"] = type(self).__name__
return state
@classmethod
def from_dict(cls, state):
state = copy.copy(state)
state.pop("name", None)
n_models = state.pop("n_models", None)
override_models_used = state.pop("_override_models_used", {})
ret = cls(**state)
ret.n_models = n_models
ret._override_models_used = {int(k): v for k, v in override_models_used.items()}
return ret
def __copy__(self):
return self.from_dict(self.to_dict())
def __deepcopy__(self, memodict={}):
return self.__copy__()
@abstractmethod
def _combine_univariates(self, univariates: List[UnivariateTimeSeries]):
raise NotImplementedError
def set_model_used(self, i: int, used: bool):
self._override_models_used[i] = used
def get_model_used(self, i: int):
return self.models_used[i] if self.n_models is not None else self._override_models_used.get(i, True)
@property
def models_used(self) -> List[bool]:
"""
:return: which models are actually used to make predictions.
"""
assert self.n_models is not None, "Combiner must be trained to determine which models are used"
return [self._override_models_used.get(i, used) for i, used in enumerate(self._models_used)]
@property
def _models_used(self) -> List[bool]:
return [True] * self.n_models
def train(self, all_model_outs: List[TimeSeries], target: TimeSeries = None, **kwargs) -> TimeSeries:
"""
Trains the model combination rule.
:param all_model_outs: a list of time series, with each time series
representing the output of a single model.
:param target: a target time series (e.g. labels)
:return: a single time series of combined model outputs on this training data.
"""
self.n_models = len(all_model_outs)
return self(all_model_outs, target, _check_dim=False)
def __call__(self, all_model_outs: List[TimeSeries], target: TimeSeries, _check_dim=True) -> TimeSeries:
"""
Applies the model combination rule to combine multiple model outputs.
:param all_model_outs: a list of time series, with each time series
representing the output of a single model.
:param target: a target time series (e.g. labels)
:return: a single time series of combined model outputs on this training data.
"""
if isinstance(target, list):
new_all_model_outs = []
for i, out in enumerate(all_model_outs):
if out is None:
new_all_model_outs.append(out)
else:
assert isinstance(out, list) and len(out) == len(target), (
f"If target is a list of time series, each model output should be a "
f"list with the same length, but target has length {len(target)}, "
f"while model output {i} is a {type(out).__name__} of length {len(out)}"
)
new_all_model_outs.append(sum(out[1:], out[0]))
target = sum(target[1:], target[0])
all_model_outs = new_all_model_outs
js = [j for j, out in enumerate(all_model_outs) if out is not None]
assert len(js) > 0, "`all_model_outs` cannot all be `None`"
j = js[0]
assert all(out.dim == all_model_outs[j].dim for out in all_model_outs if out is not None)
if self.n_models is None:
self.n_models = len(all_model_outs)
models_used = self.models_used
if len(all_model_outs) == self.n_models:
j = 0
all_model_outs = [x for x, used in zip(all_model_outs, models_used) if used]
elif len(all_model_outs) != sum(models_used):
raise RuntimeError(
f"Expected either {self.n_models} or {sum(models_used)} "
f"model outputs, but got {len(all_model_outs)} model outputs "
f"instead."
)
all_model_outs = _align_outputs(all_model_outs, target)
if all(out is None for out in all_model_outs):
return None
combined = OrderedDict()
for i in range(all_model_outs[j].dim):
name = all_model_outs[j].names[i]
all_i = [None if ts is None else ts.univariates[ts.names[i]] for ts in all_model_outs]
combined[name] = self._combine_univariates(all_i)
return TimeSeries(combined)
class Mean(CombinerBase):
"""
Combines multiple models by taking their mean prediction.
"""
@property
def weights(self) -> np.ndarray:
n = sum(self.models_used)
return np.full(shape=n, fill_value=1 / n)
def _combine_univariates(self, univariates: List[UnivariateTimeSeries]) -> UnivariateTimeSeries:
non_none = [var for var in univariates if var is not None]
weights = np.asarray([w for w, var in zip(self.weights, univariates) if var is not None])
weights = weights / weights.sum()
v = non_none[0]
if self.abs_score and sum(self.models_used) > 1:
signs = np.median(np.sign([var.np_values for var in non_none]), axis=0)
signs[signs == 0] = -1
new_vals = signs * np.dot(weights, [np.abs(var.np_values) for var in non_none])
else:
new_vals = np.dot(weights, [var.np_values for var in non_none])
return UnivariateTimeSeries(v.time_stamps, new_vals, v.name)
class Median(CombinerBase):
"""
Combines multiple models by taking their median prediction.
"""
def _combine_univariates(self, univariates: List[UnivariateTimeSeries]) -> UnivariateTimeSeries:
non_none = [var for var in univariates if var is not None]
v = non_none[0]
if self.abs_score and sum(self.models_used) > 1:
signs = np.median(np.sign([var.np_values for var in non_none]), axis=0)
signs[signs == 0] = -1
new_vals = signs * np.median([np.abs(var.np_values) for var in non_none], axis=0)
else:
new_vals = np.median([var.np_values for var in non_none], axis=0)
return UnivariateTimeSeries(v.time_stamps, new_vals, v.name)
class Max(CombinerBase):
"""
Combines multiple models by taking their max prediction.
"""
def _combine_univariates(self, univariates: List[UnivariateTimeSeries]) -> UnivariateTimeSeries:
non_none = [var for var in univariates if var is not None]
v = non_none[0]
if self.abs_score and sum(self.models_used) > 1:
signs = np.median(np.sign([var.np_values for var in non_none]), axis=0)
signs[signs == 0] = -1
new_vals = signs * np.median([np.abs(var.np_values) for var in non_none], axis=0)
else:
new_vals = np.max([var.np_values for var in non_none], axis=0)
return UnivariateTimeSeries(v.time_stamps, new_vals, v.name)
class ModelSelector(Mean):
"""
Takes the mean of the best models, where the models are ranked according to
the value of an evaluation metric.
"""
def __init__(self, metric: Union[str, TSADMetric, ForecastMetric], abs_score=False):
"""
:param metric: the evaluation metric to use
:param abs_score: whether to take the absolute value of the model
outputs. Useful for anomaly detection.
"""
super().__init__(abs_score=abs_score)
if isinstance(metric, str):
metric_cls, name = metric.split(".", maxsplit=1)
metric_cls = {c.__name__: c for c in [ForecastMetric, TSADMetric]}[metric_cls]
metric = metric_cls[name]
self.metric = metric
self.metric_values = None
@property
def invert(self):
if isinstance(self.metric, ForecastMetric):
return True
if self.metric is TSADMetric.MeanTimeToDetect:
return True
return False
@property
def requires_training(self):
return True
def to_dict(self, _skipped_keys=None):
skipped_keys = set() if _skipped_keys is None else _skipped_keys
state = super().to_dict(skipped_keys.union({"metric"}))
state["metric"] = f"{type(self.metric).__name__}.{self.metric.name}"
return state
@classmethod
def from_dict(cls, state):
# Extract the metric values from the state (to set manually later)
metric_values = state.pop("metric_values", None)
ret = super().from_dict(state)
ret.metric_values = metric_values
return ret
@property
def _models_used(self) -> List[bool]:
assert self.n_models is not None, "Combiner must be trained to determine which models are used"
used_metric_values = [v for i, v in enumerate(self.metric_values) if self._override_models_used.get(i, True)]
val = np.min(used_metric_values) if self.invert else np.max(used_metric_values)
return (np.asarray(self.metric_values) == val).tolist()
def train(self, all_model_outs: List[TimeSeries], target: TimeSeries = None, **kwargs) -> TimeSeries:
metric_values = []
self.n_models = len(all_model_outs)
for i, model_out in enumerate(all_model_outs):
if not self._override_models_used.get(i, True):
metric_values.append(np.inf if self.invert else -np.inf) # worst-possible value
elif target is None and self.metric_values is None:
metric_values.append(1)
elif target is not None and not isinstance(target, list):
metric_values.append(self.metric.value(ground_truth=target, predict=model_out, **kwargs))
elif isinstance(target, list):
assert isinstance(model_out, list) and len(model_out) == len(target), (
f"If target is a list of time series, each model output should be a "
f"list with the same length, but target has length {len(target)}, "
f"while model output {i} is a {type(model_out).__name__} of length "
f"{len(model_out)}"
)
vals = [self.metric.value(ground_truth=y, predict=yhat, **kwargs) for y, yhat in zip(target, model_out)]
metric_values.append(np.mean(vals))
if len(metric_values) == len(all_model_outs):
self.metric_values = metric_values
return self(all_model_outs, target)
class MetricWeightedMean(ModelSelector):
"""
Computes a weighted average of model outputs with weights proportional to
the metric values (or their inverses).
"""
@property
def _models_used(self) -> List[bool]:
return CombinerBase._models_used.fget(self)
@property
def weights(self) -> np.ndarray:
w = np.asarray(self.metric_values)
w = 1 / w if self.invert else w
return w / w.sum()
class CombinerFactory(object):
"""
Factory object for creating combiner objects.
"""
@classmethod
def create(cls, name: str, **kwargs) -> CombinerBase:
alias = {cls.__name__: cls for cls in [Mean, Median, Max, ModelSelector, MetricWeightedMean]}
combiner_class = alias[name]
return combiner_class.from_dict(kwargs) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/models/ensemble/combine.py | 0.907762 | 0.427158 | combine.py | pypi |
from enum import Enum
from functools import partial
import logging
import math
import re
from typing import Iterable, Sequence, Union
import numpy as np
import pandas as pd
from pandas.tseries.frequencies import to_offset as pd_to_offset
import scipy.stats
logger = logging.getLogger(__name__)
class AlignPolicy(Enum):
"""Policies for aligning multiple univariate time series."""
OuterJoin = 0
InnerJoin = 1
FixedReference = 2
FixedGranularity = 3
class AggregationPolicy(Enum):
"""
Aggregation policies. Values are partial functions for
pandas.core.resample.Resampler methods.
"""
Mean = partial(lambda df, *args, **kwargs: getattr(df, "mean")(*args, **kwargs))
Sum = partial(lambda df, *args, **kwargs: getattr(df, "sum")(*args, **kwargs))
Median = partial(lambda df, *args, **kwargs: getattr(df, "median")(*args, **kwargs))
First = partial(lambda df, *args, **kwargs: getattr(df, "first")(*args, **kwargs))
Last = partial(lambda df, *args, **kwargs: getattr(df, "last")(*args, **kwargs))
Min = partial(lambda df, *args, **kwargs: getattr(df, "min")(*args, **kwargs))
Max = partial(lambda df, *args, **kwargs: getattr(df, "max")(*args, **kwargs))
class MissingValuePolicy(Enum):
"""
Missing value imputation policies. Values are partial functions for ``pd.Series`` methods.
"""
FFill = partial(lambda df, *args, **kwargs: getattr(df, "ffill")(*args, **kwargs))
"""Fill gap with the first value before the gap."""
BFill = partial(lambda df, *args, **kwargs: getattr(df, "bfill")(*args, **kwargs))
"""Fill gap with the first value after the gap."""
Nearest = partial(lambda df, *args, **kwargs: getattr(df, "interpolate")(*args, **kwargs), method="nearest")
"""Replace missing value with the value closest to it."""
Interpolate = partial(lambda df, *args, **kwargs: getattr(df, "interpolate")(*args, **kwargs), method="time")
"""Fill in missing values by linear interpolation."""
ZFill = partial(lambda df, *args, **kwargs: getattr(df, "replace")(*args, **kwargs), to_replace=np.nan, value=0)
"""Replace missing values with zeros."""
def to_pd_datetime(timestamp):
"""
Converts a timestamp (or list/iterable of timestamps) to pandas Datetime, truncated at the millisecond.
"""
if isinstance(timestamp, pd.DatetimeIndex):
return timestamp
elif isinstance(timestamp, (int, float)):
return pd.to_datetime(int(timestamp * 1000), unit="ms")
elif isinstance(timestamp, Iterable) and all(isinstance(t, (int, float)) for t in timestamp):
timestamp = pd.to_datetime(np.asarray(timestamp).astype(float) * 1000, unit="ms")
elif isinstance(timestamp, np.ndarray) and timestamp.dtype in [int, np.float32, np.float64]:
timestamp = pd.to_datetime(np.asarray(timestamp).astype(float) * 1000, unit="ms")
return pd.to_datetime(timestamp)
def to_offset(dt):
"""
Converts a time gap to a ``pd.Timedelta`` if possible, otherwise a ``pd.DateOffset``.
"""
if dt is None:
return None
if isinstance(dt, (int, float)):
dt = pd.to_timedelta(dt, unit="s")
elif isinstance(dt, str) and not any(re.match(r"\d+" + suf, dt) for suf in ("M", "m", "MS", "Q", "Y", "y")):
try:
dt = pd.to_timedelta(dt)
except ValueError:
pass
return dt if isinstance(dt, pd.Timedelta) else pd_to_offset(dt)
def to_timestamp(t):
"""
Converts a datetime to a Unix timestamp.
"""
if isinstance(t, (int, float)) or isinstance(t, Iterable) and all(isinstance(ti, (int, float)) for ti in t):
return np.asarray(t)
elif isinstance(t, np.ndarray) and t.dtype in [int, np.float32, np.float64]:
return t
return np.asarray(t).astype("datetime64[ms]").astype(float) / 1000
def granularity_str_to_seconds(granularity: Union[str, float, int, None]) -> Union[float, None]:
"""
Converts a string/float/int granularity (representing a timedelta) to the
number of seconds it represents, truncated at the millisecond.
"""
if granularity is None:
return None
if isinstance(granularity, (float, int)):
ms = np.floor(granularity * 1000)
else:
ms = np.floor(pd_to_offset(granularity).nanos / 1e6)
return (ms / 1000).item()
def get_date_offset(time_stamps: pd.DatetimeIndex, reference: pd.DatetimeIndex) -> pd.DateOffset:
"""
Returns the date offset one must add to ``time_stamps`` so its last timestamp aligns with that of ``reference``.
"""
df, tf = time_stamps[-1], reference[-1]
return pd.DateOffset(
months=tf.month - df.month + 12 * (tf.year - df.year),
days=tf.day - df.day,
hours=tf.hour - df.hour,
minutes=tf.minute - df.minute,
seconds=tf.second - df.second,
microseconds=tf.microsecond - df.microsecond,
)
def infer_granularity(time_stamps, return_offset=False):
"""
Infers the granularity of a list of time stamps.
"""
# See if pandas can infer the granularity on its own
orig_t = to_pd_datetime(time_stamps)
if len(orig_t) > 2:
freq = pd.infer_freq(orig_t)
elif len(orig_t) == 2:
freq = orig_t[1] - orig_t[0]
else:
raise ValueError("Need at least 2 timestamps to infer a granularity.")
offset = pd.to_timedelta(0)
if freq is not None:
freq = pd_to_offset(freq)
return (freq, offset) if return_offset else freq
# Otherwise, start with the most commonly occurring timedelta
dt = pd.to_timedelta(scipy.stats.mode(orig_t[1:] - orig_t[:-1], axis=None)[0].item())
# Check if the data could be sampled at a k-monthly granularity.
candidate_freqs = [dt]
for k in range(math.ceil(dt / pd.Timedelta(days=31)), math.ceil(dt / pd.Timedelta(days=28))):
candidate_freqs.extend([pd_to_offset(f"{k}MS"), pd_to_offset(f"{k}M")])
# Pick the sampling frequency which has the most overlap with the actual timestamps
freq2idx = {f: pd.date_range(start=orig_t[0], end=orig_t[-1], freq=f) for f in candidate_freqs}
freq2offset = {f: get_date_offset(time_stamps=freq2idx[f], reference=orig_t) for f in candidate_freqs}
freq = sorted(freq2idx.keys(), key=lambda f: len((freq2idx[f] + freq2offset[f]).intersection(orig_t)))[-1]
return (freq, freq2offset[freq]) if return_offset else freq
def reindex_df(
df: Union[pd.Series, pd.DataFrame], reference: Sequence[Union[int, float]], missing_value_policy: MissingValuePolicy
):
"""
Reindexes a Datetime-indexed dataframe ``df`` to have the same time stamps
as a reference sequence of timestamps. Imputes missing values with the given
`MissingValuePolicy`.
"""
reference = to_pd_datetime(reference)
all_times = np.unique(np.concatenate((reference.values, df.index.values)))
df = df.reindex(index=all_times)
df = missing_value_policy.value(df).ffill().bfill()
return df.loc[reference] | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/utils/resample.py | 0.899646 | 0.313302 | resample.py | pypi |
from bisect import bisect_left, bisect_right
import itertools
import logging
from typing import Any, Callable, Dict, Iterable, Mapping, Sequence, Tuple, Union
import warnings
import numpy as np
import pandas as pd
from merlion.utils.misc import ValIterOrderedDict
from merlion.utils.resample import (
AggregationPolicy,
AlignPolicy,
MissingValuePolicy,
get_date_offset,
infer_granularity,
reindex_df,
to_pd_datetime,
to_timestamp,
to_offset,
)
logger = logging.getLogger(__name__)
_time_col_name = "time"
class UnivariateTimeSeries(pd.Series):
"""
Please read the `tutorial <tutorials/TimeSeries>` before reading this API doc.
This class is a time-indexed ``pd.Series`` which represents a univariate
time series. For the most part, it supports all the same features as
``pd.Series``, with the following key differences to iteration and indexing:
1. Iterating over a `UnivariateTimeSeries` is implemented as
.. code-block:: python
for timestamp, value in univariate:
# do stuff...
where ``timestamp`` is a Unix timestamp, and ``value`` is the
corresponding time series value.
2. Integer index: ``u[i]`` yields the tuple ``(u.time_stamps[i], u.values[i])``
3. Slice index: ``u[i:j:k]`` yields a new
``UnivariateTimeSeries(u.time_stamps[i:j:k], u.values[i:j:k])``
The class also supports the following additional features:
1. ``univariate.time_stamps`` returns the list of Unix timestamps, and
``univariate.values`` returns the list of the time series values. You
may access the ``pd.DatetimeIndex`` directly with ``univariate.index``
(or its ``np.ndarray`` representation with ``univariate.np_time_stamps``),
and the ``np.ndarray`` of values with ``univariate.np_values``.
2. ``univariate.concat(other)`` will concatenate the UnivariateTimeSeries
``other`` to the right end of ``univariate``.
3. ``left, right = univariate.bisect(t)`` will split the univariate at the
given timestamp ``t``.
4. ``window = univariate.window(t0, tf)`` will return the subset of the time
series occurring between timestamps ``t0`` (inclusive) and ``tf``
(non-inclusive)
5. ``series = univariate.to_pd()`` will convert the `UnivariateTimeSeries`
into a regular ``pd.Series`` (for compatibility).
6. ``univariate = UnivariateTimeSeries.from_pd(series)`` uses a time-indexed
``pd.Series`` to create a `UnivariateTimeSeries` object directly.
.. document special functions
.. automethod:: __getitem__
.. automethod:: __iter__
"""
def __init__(
self,
time_stamps: Union[None, Sequence[Union[int, float]]],
values: Sequence[float],
name: str = None,
freq="1h",
):
"""
:param time_stamps: a sequence of Unix timestamps. You may specify
``None`` if you only have ``values`` with no specific time stamps.
:param values: a sequence of univariate values, where ``values[i]``
occurs at time ``time_stamps[i]``
:param name: the name of the univariate time series
:param freq: if ``time_stamps`` is not provided, the univariate is
assumed to be sampled at frequency ``freq``. ``freq`` may be a
string (e.g. ``"1h"``), timedelta, or ``int``/``float`` (in units
of seconds).
"""
is_pd = isinstance(values, pd.Series)
if name is None and is_pd:
name = values.name
if is_pd and isinstance(values.index, pd.DatetimeIndex):
super().__init__(values, name=name)
elif is_pd and values.index.dtype == "O":
super().__init__(values.values, name=name, index=pd.to_datetime(values.index))
else:
if time_stamps is None:
freq = to_offset(freq)
if is_pd and values.index.dtype in ("int64", "float64"):
index = pd.to_datetime(0) + freq * values.index
else:
index = pd.date_range(start=0, periods=len(values), freq=freq)
else:
index = to_pd_datetime(time_stamps)
super().__init__(np.asarray(values), index=index, name=name, dtype=float)
if len(self) >= 3 and self.index.freq is None:
self.index.freq = pd.infer_freq(self.index)
self.index.name = _time_col_name
@property
def np_time_stamps(self):
"""
:rtype: np.ndarray
:return: the ``numpy`` representation of this time series's Unix timestamps
"""
return to_timestamp(self.index.values)
@property
def np_values(self):
"""
:rtype: np.ndarray
:return: the ``numpy`` representation of this time series's values
"""
return super().values
@property
def time_stamps(self):
"""
:rtype: List[float]
:return: the list of Unix timestamps for the time series
"""
return self.np_time_stamps.tolist()
@property
def values(self):
"""
:rtype: List[float]
:return: the list of values for the time series.
"""
return self.np_values.tolist()
@property
def t0(self):
"""
:rtype: float
:return: the first timestamp in the univariate time series.
"""
return self.np_time_stamps[0]
@property
def tf(self):
"""
:rtype: float
:return: the final timestamp in the univariate time series.
"""
return self.np_time_stamps[-1]
def is_empty(self):
"""
:rtype: bool
:return: True if the univariate is empty, False if not.
"""
return len(self) == 0
def __iter__(self):
"""
The i'th item in the iterator is the tuple ``(self.time_stamps[i], self.values[i])``.
"""
return itertools.starmap(lambda t, x: (t.item(), x.item()), zip(self.np_time_stamps, self.np_values))
def __getitem__(self, i: Union[int, slice]):
"""
:param i: integer index or slice
:rtype: Union[Tuple[float, float], UnivariateTimeSeries]
:return: ``(self.time_stamps[i], self.values[i])`` if ``i`` is
an integer. ``UnivariateTimeSeries(self.time_series[i], self.values[i])``
if ``i`` is a slice.
"""
if isinstance(i, int):
return self.np_time_stamps[i].item(), self.np_values[i].item()
elif isinstance(i, slice):
return UnivariateTimeSeries.from_pd(self.iloc[i])
else:
raise KeyError(
f"Indexing a `UnivariateTimeSeries` with key {i} of "
f"type {type(i).__name__} is not supported. Try "
f"using loc[] or iloc[] for more complicated "
f"indexing."
)
def __eq__(self, other):
return self.time_stamps == other.time_stamps and (self.np_values == other.np_values).all()
def copy(self, deep=True):
"""
Copies the `UnivariateTimeSeries`. Simply a wrapper around the
``pd.Series.copy()`` method.
"""
return UnivariateTimeSeries.from_pd(super().copy(deep=deep))
def concat(self, other):
"""
Concatenates the `UnivariateTimeSeries` ``other`` to the right of this one.
:param UnivariateTimeSeries other: another `UnivariateTimeSeries`
:rtype: UnivariateTimeSeries
:return: concatenated univariate time series
"""
return UnivariateTimeSeries.from_pd(pd.concat((self, other)), name=self.name)
def bisect(self, t: float, t_in_left: bool = False):
"""
Splits the time series at the point where the given timestamp occurs.
:param t: a Unix timestamp or datetime object. Everything before time
``t`` is in the left split, and everything after time ``t`` is in
the right split.
:param t_in_left: if ``True``, ``t`` is in the left split. Otherwise,
``t`` is in the right split.
:rtype: Tuple[UnivariateTimeSeries, UnivariateTimeSeries]
:return: the left and right splits of the time series.
"""
t = to_pd_datetime(t)
if t_in_left:
i = bisect_right(self.index, t)
else:
i = bisect_left(self.index, t)
return self[:i], self[i:]
def window(self, t0: float, tf: float, include_tf: bool = False):
"""
:param t0: The timestamp/datetime at the start of the window (inclusive)
:param tf: The timestamp/datetime at the end of the window (inclusive
if ``include_tf`` is ``True``, non-inclusive otherwise)
:param include_tf: Whether to include ``tf`` in the window.
:rtype: UnivariateTimeSeries
:return: The subset of the time series occurring between timestamps
``t0`` (inclusive) and ``tf`` (included if ``include_tf`` is
``True``, excluded otherwise).
"""
times = self.index
t0, tf = to_pd_datetime(t0), to_pd_datetime(tf)
i_0 = bisect_left(times, t0)
i_f = bisect_right(times, tf) if include_tf else bisect_left(times, tf)
return self[i_0:i_f]
def to_dict(self) -> Dict[float, float]:
"""
:return: A dictionary representing the data points in the time series.
"""
return dict(zip(self.time_stamps, self.values))
@classmethod
def from_dict(cls, obj: Dict[float, float], name=None):
"""
:param obj: A dictionary of timestamp - value pairs
:param name: the name to assign the output
:rtype: UnivariateTimeSeries
:return: the `UnivariateTimeSeries` represented by series.
"""
time_stamps, values = [], []
for point in sorted(obj.items(), key=lambda p: p[0]):
time_stamps.append(point[0])
values.append(point[1])
return cls(time_stamps, values, name)
def to_pd(self) -> pd.Series:
"""
:return: A pandas Series representing the time series, indexed by time.
"""
return pd.Series(self.np_values, index=self.index, name=self.name)
@classmethod
def from_pd(cls, series: Union[pd.Series, pd.DataFrame], name=None, freq="1h"):
"""
:param series: a ``pd.Series``. If it has a``pd.DatetimeIndex``, we will use that index for the timestamps.
Otherwise, we will create one at the specified frequency.
:param name: the name to assign the output
:param freq: if ``series`` is not indexed by time, this is the frequency at which we will assume it is sampled.
:rtype: UnivariateTimeSeries
:return: the `UnivariateTimeSeries` represented by series.
"""
if series is None:
return None
if isinstance(series, TimeSeries) and series.dim == 1:
series = list(series.univariates)[0]
if isinstance(series, UnivariateTimeSeries):
if name is not None:
series.name = name
return series
if isinstance(series, pd.DataFrame) and series.shape[1] == 1:
series = series.iloc[:, 0]
return cls(time_stamps=None, values=series.astype(float), name=name, freq=freq)
def to_ts(self, name=None):
"""
:name: a name to assign the univariate when converting it to a time series. Can override the existing name.
:rtype: TimeSeries
:return: A `TimeSeries` representing this univariate time series.
"""
if self.name is None and name is None:
return TimeSeries([self])
else:
name = name if self.name is None else self.name
return TimeSeries({name: self})
@classmethod
def empty(cls, name=None):
"""
:rtype: `UnivariateTimeSeries`
:return: A Merlion `UnivariateTimeSeries` that has empty timestamps and values.
"""
return cls([], [], name)
class TimeSeries:
"""
Please read the `tutorial <tutorials/TimeSeries>` before reading this API doc.
This class represents a general multivariate time series as a wrapper around
a number of (optionally named) `UnivariateTimeSeries`. A `TimeSeries` object
is initialized as ``time_series = TimeSeries(univariates)``, where
``univariates`` is either a list of `UnivariateTimeSeries`, or a dictionary
mapping string names to their corresponding `UnivariateTimeSeries` objects.
Because the individual ``univariates`` need not be sampled at the same times, an
important concept for `TimeSeries` is *alignment*. We say that a `TimeSeries`
is *aligned* if all of its univariates have observations sampled at the exact
set set of times.
One may access the `UnivariateTimeSeries` comprising this `TimeSeries` in four ways:
1. Iterate over the individual univariates using
.. code-block:: python
for var in time_series.univariates:
# do stuff with each UnivariateTimeSeries var
2. Access an individual `UnivariateTimeSeries` by name as
``time_series.univariates[name]``. If you supplied unnamed univariates to
the constructor (i.e. using a list), the name of a univariate will just
be its index in that list.
3. Get the list of each univariate's name with ``time_series.names``.
4. Iterate over named univariates as
.. code-block:: python
for name, var in time_series.items():
# do stuff
Note that this is equivalent to iterating over
``zip(time_series.names, time_series.univariates)``.
This class supports the following additional features as well:
1. Interoperability with ``pandas``
- ``df = time_series.to_pd()`` yields a time-indexed ``pd.DataFrame``,
where each column (with the appropriate name) corresponds to a
variable. Missing values are ``NaN``.
- ``time_series = TimeSeries.from_pd(df)`` takes a time-indexed
``pd.DataFrame`` and returns a corresponding `TimeSeries` object
(missing values are handled appropriately). The order of
``time_series.univariates`` is the order of ``df.keys()``.
2. Automated alignment: ``aligned = time_series.align()`` resamples each of
``time_series.univariates`` so that they all have the same timestamps.
By default, this is done by taking the union of all timestamps present
in any individual univariate time series, and imputing missing values
via interpolation. See the method documentation for details on how you
may configure the alignment policy.
3. Transparent indexing and iteration for `TimeSeries` which have all
univariates aligned (i.e. they all have the same timestamps)
- Get the length and shape of the time series (equal to the number of
observations in each individual univariate). Note that if the time
series is not aligned, we will return the length/shape of an equivalent
``pandas`` dataframe and emit a warning.
- Index ``time_series[i] = (times[i], (x1[i], ..., xn[i]))``
(assuming ``time_series`` has ``n`` aligned univariates with timestamps
``times``, and ``xk = time_series.univariates[k-1].values``). Slice
returns a `TimeSeries` object and works as one would expect.
- Assuming ``time_series`` has ``n`` variables, you may iterate with
.. code-block:: python
for t_i, (x1_i, ..., xn_i) in time_series:
# do stuff
Notably, this lets you call ``times, val_vectors = zip(*time_series)``
4. Time-based queries for any time series
- Get the two sub `TimeSeries` before and after a timestamp ``t`` via
``left, right = time_series.bisect(t)``
- Get the sub `TimeSeries` between timestamps ``t0`` (inclusive) and
``tf`` (non-inclusive) via ``window = time_series.window(t0, tf)``
5. Concatenation: two `TimeSeries` may be concatenated (in time) as
``time_series = time_series_1 + time_series_2``.
.. document special functions
.. automethod:: __getitem__
.. automethod:: __iter__
"""
def __init__(
self,
univariates: Union[Mapping[Any, UnivariateTimeSeries], Iterable[UnivariateTimeSeries]],
*,
freq: str = "1h",
check_aligned=True,
):
# Type/length checking of univariates
if isinstance(univariates, Mapping):
univariates = ValIterOrderedDict((str(k), v) for k, v in univariates.items())
assert all(isinstance(var, UnivariateTimeSeries) for var in univariates.values())
elif isinstance(univariates, Iterable):
univariates = list(univariates)
assert all(isinstance(var, UnivariateTimeSeries) for var in univariates)
names = [str(var.name) for var in univariates]
if len(set(names)) == len(names):
names = [str(i) if name is None else name for i, name in enumerate(names)]
univariates = ValIterOrderedDict(zip(names, univariates))
else:
univariates = ValIterOrderedDict((str(i), v) for i, v in enumerate(univariates))
else:
raise TypeError(
"Expected univariates to be either a `Sequence[UnivariateTimeSeries]` or a "
"`Mapping[Hashable, UnivariateTimeSeries]`."
)
assert len(univariates) > 0
# Assign all the individual univariate series the appropriate names
for name, var in univariates.items():
var.name = name
# Set self.univariates and check if they are perfectly aligned
self.univariates = univariates
if check_aligned and self.dim > 1:
t = self.univariates[self.names[0]].time_stamps
self._is_aligned = all(self.univariates[name].time_stamps == t for name in self.names[1:])
else:
self._is_aligned = len(univariates) <= 1
# Raise a warning if the univariates are too mis-aligned
if check_aligned and not self.is_aligned:
all_t0 = [var.index[0] for var in univariates if len(var) > 0]
all_tf = [var.index[-1] for var in univariates if len(var) > 0]
min_elapsed = min(tf - t0 for t0, tf in zip(all_t0, all_tf))
min_t0, max_t0 = min(all_t0), max(all_t0)
min_tf, max_tf = min(all_tf), max(all_tf)
if max_t0 - min_t0 > 0.1 * min_elapsed:
logger.warning(
f"The earliest univariate starts at {min_t0}, but the "
f"latest univariate starts at {max_t0}, a difference of "
f"{max_t0 - min_t0}. This is more than 10% of the length "
f"of the shortest univariate ({min_elapsed}). You may "
f"want to check that the univariates cover the same "
f"window of time.",
stack_info=True,
)
if max_tf - min_tf > 0.1 * min_elapsed:
logger.warning(
f"The earliest univariate ends at {min_tf}, but the "
f"latest univariate ends at {max_tf}, a difference of "
f"{max_tf - min_tf}. This is more than 10% of the length "
f"of the shortest univariate ({min_elapsed}). You may "
f"want to check that the univariates cover the same "
f"window of time.",
stack_info=True,
)
@property
def names(self):
""":return: The list of the names of the univariates."""
return list(self.univariates.keys())
def items(self):
""":return: Iterator over ``(name, univariate)`` tuples."""
return self.univariates.items()
@property
def dim(self) -> int:
"""
:return: The dimension of the time series (the number of variables).
"""
return len(self.univariates)
def rename(self, mapper: Union[Iterable[str], Mapping[str, str], Callable[[str], str]]):
"""
:param mapper: Dict-like or function transformations to apply to the univariate names. Can also be an iterable
of new univariate names.
:return: the time series with renamed univariates.
"""
if isinstance(mapper, Callable):
mapper = [mapper(old) for old in self.names]
elif isinstance(mapper, Mapping):
mapper = [mapper.get(old, old) for old in self.names]
univariates = ValIterOrderedDict((new_name, var) for new_name, var in zip(mapper, self.univariates))
return self.__class__(univariates)
@property
def is_aligned(self) -> bool:
"""
:return: Whether all individual variable time series are sampled at the same time stamps, i.e. they are aligned.
"""
return self._is_aligned
@property
def index(self):
return to_pd_datetime(self.np_time_stamps)
@property
def np_time_stamps(self):
"""
:rtype: np.ndarray
:return: the ``numpy`` representation of this time series's Unix timestamps
"""
return np.unique(np.concatenate([var.np_time_stamps for var in self.univariates]))
@property
def time_stamps(self):
"""
:rtype: List[float]
:return: the list of Unix timestamps for the time series
"""
return self.np_time_stamps.tolist()
@property
def t0(self) -> float:
"""
:rtype: float
:return: the first timestamp in the time series.
"""
return min(var.t0 for var in self.univariates)
@property
def tf(self) -> float:
"""
:rtype: float
:return: the final timestamp in the time series.
"""
return max(var.tf for var in self.univariates)
@staticmethod
def _txs_to_vec(txs):
"""
Helper function that converts [(t_1[i], x_1[i]), ..., (t_k[i], x_k[i])],
i.e. [var[i] for var in self.univariates], into the desired output form
(t_1[i], (x_1[i], ..., x_k[i])).
"""
return txs[0][0], tuple(tx[1] for tx in txs)
def __iter__(self):
"""
Only supported if all individual variable time series are sampled at the
same time stamps. The i'th item of the iterator is the tuple
``(time_stamps[i], tuple(var.values[i] for var in self.univariates))``.
"""
if not self.is_aligned:
raise RuntimeError(
"The univariates comprising this time series are not aligned "
"(they have different time stamps), but alignment is required "
"to iterate over the time series."
)
return map(self._txs_to_vec, zip(*self.univariates))
def __getitem__(self, i: Union[int, slice]):
"""
Only supported if all individual variable time series are sampled at the
same time stamps.
:param i: integer index or slice.
:rtype: Union[Tuple[float, Tuple[float]], TimeSeries]
:return: If ``i`` is an integer, returns the tuple
``(time_stamps[i], tuple(var.values[i] for var in self.univariates))``.
If ``i`` is a slice, returns the time series
``TimeSeries([var[i] for var in self.univariates])``
"""
if not self.is_aligned:
raise RuntimeError(
"The univariates comprising this time series are not aligned "
"(they have different time stamps), but alignment is required "
"to index into the time series."
)
if isinstance(i, int):
return self._txs_to_vec([var[i] for var in self.univariates])
elif isinstance(i, slice):
# ret must be aligned, so bypass the (potentially) expensive check
univariates = ValIterOrderedDict([(k, v[i]) for k, v in self.items()])
ret = TimeSeries(univariates, check_aligned=False)
ret._is_aligned = True
return ret
else:
raise KeyError(
f"Indexing a `TimeSeries` with key {i} of type "
f"{type(i).__name__} not supported. Perhaps you "
f"meant to index into `time_series.univariates`, "
f"rather than `time_series`?"
)
def is_empty(self) -> bool:
"""
:return: whether the time series is empty
"""
return all(len(var) == 0 for var in self.univariates)
def squeeze(self) -> UnivariateTimeSeries:
"""
:return: `UnivariateTimeSeries` if the time series is univariate; otherwise returns itself, a `TimeSeries`
"""
if self.dim == 1:
return self.univariates[self.names[0]]
return self
def __len__(self):
"""
:return: the number of observations in the time series
"""
if not self.is_aligned:
warning = (
"The univariates comprising this time series are not aligned "
"(they have different time stamps). The length returned is "
"equal to the length of the _union_ of all time stamps present "
"in any of the univariates."
)
warnings.warn(warning)
logger.warning(warning)
return len(self.to_pd())
return len(self.univariates[self.names[0]])
@property
def shape(self) -> Tuple[int, int]:
"""
:return: the shape of this time series, i.e. ``(self.dim, len(self))``
"""
return self.dim, len(self)
def __add__(self, other):
"""
Concatenates the `TimeSeries` ``other`` to the right of this one.
:param TimeSeries other:
:rtype: TimeSeries
:return: concatenated time series
"""
return self.concat(other, axis=0)
def concat(self, other, axis=0):
"""
Concatenates the `TimeSeries` ``other`` on the time axis if ``axis = 0`` or the variable axis if ``axis = 1``.
:rtype: TimeSeries
:return: concatenated time series
"""
assert axis in [0, 1]
if axis == 0:
assert self.dim == other.dim, (
f"Cannot concatenate a {self.dim}-dimensional time series with a {other.dim}-dimensional "
f"time series on the time axis."
)
assert self.names == other.names, (
f"Cannot concatenate time series on the time axis if they have two different sets of "
f"variable names, {self.names} and {other.names}."
)
univariates = ValIterOrderedDict(
[(name, ts0.concat(ts1)) for (name, ts0), ts1 in zip(self.items(), other.univariates)]
)
ret = TimeSeries(univariates, check_aligned=False)
ret._is_aligned = self.is_aligned and other.is_aligned
return ret
else:
univariates = ValIterOrderedDict([(name, var.copy()) for name, var in [*self.items(), other.items()]])
ret = TimeSeries(univariates, check_aligned=False)
ret._is_aligned = self.is_aligned and other.is_aligned and self.time_stamps == other.time_stamps
return ret
def __eq__(self, other):
if self.dim != other.dim:
return False
return all(u == v for u, v in zip(self.univariates, other.univariates))
def __repr__(self):
return repr(self.to_pd())
def bisect(self, t: float, t_in_left: bool = False):
"""
Splits the time series at the point where the given timestamp ``t`` occurs.
:param t: a Unix timestamp or datetime object. Everything before time ``t`` is in the left split,
and everything after time ``t`` is in the right split.
:param t_in_left: if ``True``, ``t`` is in the left split. Otherwise, ``t`` is in the right split.
:rtype: Tuple[TimeSeries, TimeSeries]
:return: the left and right splits of the time series.
"""
left, right = ValIterOrderedDict(), ValIterOrderedDict()
for name, var in self.items():
left[name], right[name] = var.bisect(t, t_in_left)
if self.is_aligned:
left = TimeSeries(left, check_aligned=False)
right = TimeSeries(right, check_aligned=False)
left._is_aligned = True
right._is_aligned = True
return left, right
else:
return TimeSeries(left), TimeSeries(right)
def window(self, t0: float, tf: float, include_tf: bool = False):
"""
:param t0: The timestamp/datetime at the start of the window (inclusive)
:param tf: The timestamp/datetime at the end of the window (inclusive
if ``include_tf`` is ``True``, non-inclusive otherwise)
:param include_tf: Whether to include ``tf`` in the window.
:return: The subset of the time series occurring between timestamps ``t0`` (inclusive) and ``tf``
(included if ``include_tf`` is ``True``, excluded otherwise).
:rtype: `TimeSeries`
"""
return TimeSeries(ValIterOrderedDict([(k, var.window(t0, tf, include_tf)) for k, var in self.items()]))
def to_pd(self) -> pd.DataFrame:
"""
:return: A pandas DataFrame (indexed by time) which represents this time
series. Each variable corresponds to a column of the DataFrame.
Timestamps which are present for one variable but not another, are
represented with NaN.
"""
t = pd.DatetimeIndex([])
univariates = [(name, var.to_pd()[~var.index.duplicated()]) for name, var in self.items()]
for _, var in univariates:
t = t.union(var.index)
t = t.sort_values()
t.name = _time_col_name
if len(t) >= 3:
t.freq = pd.infer_freq(t)
df = pd.DataFrame(np.full((len(t), len(univariates)), np.nan), index=t, columns=self.names)
for name, var in univariates:
df.loc[var.index, name] = var[~var.index.duplicated()]
return df
def to_csv(self, file_name, **kwargs):
self.to_pd().to_csv(file_name, **kwargs)
@classmethod
def from_pd(cls, df: Union[pd.Series, pd.DataFrame, np.ndarray], check_times=True, drop_nan=True, freq="1h"):
"""
:param df: A ``pandas.DataFrame`` with a ``DatetimeIndex``. Each column corresponds to a different variable of
the time series, and the key of column (in sorted order) give the relative order of those variables in
``self.univariates``. Missing values should be represented with ``NaN``. May also be a ``pandas.Series``
for single-variable time series.
:param check_times: whether to check that all times in the index are unique (up to the millisecond) and sorted.
:param drop_nan: whether to drop all ``NaN`` entries before creating the time series. Specifying ``False`` is
useful if you wish to impute the values on your own.
:param freq: if ``df`` is not indexed by time, this is the frequency at which we will assume it is sampled.
:rtype: TimeSeries
:return: the `TimeSeries` object corresponding to ``df``.
"""
if df is None:
return None
elif isinstance(df, TimeSeries):
return df
elif isinstance(df, UnivariateTimeSeries):
return cls([df])
elif isinstance(df, pd.Series):
if drop_nan:
df = df[~df.isna()]
return cls({df.name: UnivariateTimeSeries.from_pd(df)})
elif isinstance(df, np.ndarray):
arr = df.reshape(len(df), -1).T
ret = cls([UnivariateTimeSeries(time_stamps=None, values=v, freq=freq) for v in arr], check_aligned=False)
ret._is_aligned = True
return ret
elif not isinstance(df, pd.DataFrame):
df = pd.DataFrame(df)
# Time series is not aligned iff there are missing values
aligned = df.shape[1] == 1 or not df.isna().any().any()
# Check for a string-type index
if df.index.dtype == "O":
df = df.copy()
df.index = pd.to_datetime(df.index)
# Make sure there are no time duplicates (by milliseconds) if desired
dt_index = isinstance(df.index, pd.DatetimeIndex)
if check_times:
if not df.index.is_unique:
df = df[~df.index.duplicated()]
if not df.index.is_monotonic_increasing:
df = df.sort_index()
if dt_index:
times = df.index.values.astype("datetime64[ms]").astype(np.int64)
df = df.reindex(pd.to_datetime(np.unique(times), unit="ms"), method="bfill")
elif not aligned and not dt_index and df.index.dtype not in ("int64", "float64"):
raise RuntimeError(
f"We only support instantiating time series from a "
f"``pd.DataFrame`` with missing values when the data frame is "
f"indexed by time, int, or float. This dataframe's index is of "
f"type {type(df.index).__name__}"
)
if drop_nan and not aligned:
ret = cls(
ValIterOrderedDict(
[(k, UnivariateTimeSeries.from_pd(ser[~ser.isna()], freq=freq)) for k, ser in df.items()]
),
check_aligned=False,
)
else:
ret = cls(
ValIterOrderedDict([(k, UnivariateTimeSeries.from_pd(ser, freq=freq)) for k, ser in df.items()]),
check_aligned=False,
)
ret._is_aligned = aligned
return ret
@classmethod
def from_ts_list(cls, ts_list, *, check_aligned=True):
"""
:param Iterable[TimeSeries] ts_list: iterable of time series we wish to form a multivariate time series with
:param bool check_aligned: whether to check if the output time series is aligned
:rtype: TimeSeries
:return: A multivariate `TimeSeries` created from all the time series in the inputs.
"""
ts_list = list(ts_list)
all_names = [set(ts.names) for ts in ts_list]
if all(
len(names_i.intersection(names_j)) == 0
for i, names_i in enumerate(all_names)
for names_j in all_names[i + 1 :]
):
univariates = ValIterOrderedDict(itertools.chain.from_iterable(ts.items() for ts in ts_list))
else:
univariates = list(itertools.chain.from_iterable(ts.univariates for ts in ts_list))
return cls(univariates, check_aligned=check_aligned)
def align(
self,
*,
reference: Sequence[Union[int, float]] = None,
granularity: Union[str, int, float] = None,
origin: int = None,
remove_non_overlapping=True,
alignment_policy: AlignPolicy = None,
aggregation_policy: AggregationPolicy = AggregationPolicy.Mean,
missing_value_policy: MissingValuePolicy = MissingValuePolicy.Interpolate,
):
"""
Aligns all the univariates comprising this multivariate time series so that they all have the same time stamps.
:param reference: A specific set of timestamps we want the resampled time series to contain. Required if
``alignment_policy`` is `AlignPolicy.FixedReference`. Overrides other alignment policies if specified.
:param granularity: The granularity (in seconds) of the resampled time time series. Defaults to the GCD time
difference between adjacent elements of ``time_series`` (otherwise). Ignored if ``reference`` is given or
``alignment_policy`` is `AlignPolicy.FixedReference`. Overrides other alignment policies if specified.
:param origin: The first timestamp of the resampled time series. Only used if the alignment policy is
`AlignPolicy.FixedGranularity`.
:param remove_non_overlapping: If ``True``, we will only keep the portions of the univariates that overlap with
each other. For example, if we have 3 univariates which span timestamps [0, 3600], [60, 3660], and
[30, 3540], we will only keep timestamps in the range [60, 3540]. If ``False``, we will keep all timestamps
produced by the resampling.
:param alignment_policy: The policy we want to use to align the time series.
- `AlignPolicy.FixedReference` aligns each single-variable time
series to ``reference``, a user-specified sequence of timestamps.
- `AlignPolicy.FixedGranularity` resamples each single-variable time
series at the same granularity, aggregating windows and imputing
missing values as desired.
- `AlignPolicy.OuterJoin` returns a time series with the union of
all timestamps present in any single-variable time series.
- `AlignPolicy.InnerJoin` returns a time series with the intersection
of all timestamps present in all single-variable time series.
:param aggregation_policy: The policy used to aggregate windows of adjacent observations when downsampling.
:param missing_value_policy: The policy used to impute missing values created when upsampling.
:rtype: TimeSeries
:return: The resampled multivariate time series.
"""
if self.is_empty():
if reference is not None or granularity is not None:
logger.warning(
"Attempting to align an empty time series to a set of reference time stamps or a "
"fixed granularity. Doing nothing."
)
return TimeSeries.from_pd(self.to_pd())
if reference is not None or alignment_policy is AlignPolicy.FixedReference:
if reference is None:
raise RuntimeError("`reference` is required when using `alignment_policy` FixedReference.")
if alignment_policy not in [None, AlignPolicy.FixedReference]:
logger.warning(
f"TimeSeries.align() received alignment policy "
f"{alignment_policy.name}, but a reference sequence of "
f"timestamps was also provided. `reference` is higher "
f"priority than `alignment_policy`, so we are using "
f"alignment policy FixedReference."
)
if granularity is not None:
logger.warning(
"TimeSeries.align() received a granularity at which to "
"resample the time series, but a reference sequence of "
"timestamps was also provided. `reference` is higher "
"priority than `granularity`, so we are using alignment "
"policy FixedReference, not FixedGranularity."
)
# Align each univariate time series to the reference timestamps
df = reindex_df(self.to_pd(), reference, missing_value_policy)
return TimeSeries.from_pd(df, check_times=False)
elif granularity is not None or alignment_policy is AlignPolicy.FixedGranularity:
if alignment_policy not in [None, AlignPolicy.FixedGranularity]:
logger.warning(
f"TimeSeries.align() received alignment policy "
f"{alignment_policy.name}, but a desired granularity at "
f"which to resample the time series was also received. "
f"`granularity` is higher priority than `alignment_policy`, "
f"so we are using alignment policy FixedGranularity."
)
# Get the granularity in seconds, if one is specified and the granularity is a fixed number of seconds.
# Otherwise, infer the granularity. If we have a non-fixed granularity, record that fact.
fixed_granularity = True
if granularity is None:
granularity = infer_granularity(self.time_stamps)
granularity = to_offset(granularity)
if isinstance(granularity, pd.DateOffset):
try:
granularity.nanos
except ValueError:
fixed_granularity = False
# Remove non-overlapping portions of univariates if desired
df = self.to_pd()
if remove_non_overlapping:
t0 = max(v.index[0] for v in self.univariates if len(v) > 0)
tf = min(v.index[-1] for v in self.univariates if len(v) > 0)
df = df[t0:tf]
# Resample at the desired granularity, setting the origin as needed
if origin is None and isinstance(granularity, pd.Timedelta):
elapsed = df.index[-1] - df.index[0]
origin = df.index[0] + elapsed % granularity
direction = None if not fixed_granularity else "right"
new_df = df.resample(granularity, origin=to_pd_datetime(origin), label=direction, closed=direction)
# Apply aggregation & missing value imputation policies
new_df = aggregation_policy.value(new_df)
if missing_value_policy is MissingValuePolicy.Interpolate and not fixed_granularity:
new_df = new_df.interpolate()
else:
new_df = missing_value_policy.value(new_df)
# Add the date offset only if we're resampling to a non-fixed granularity
if not fixed_granularity:
new_df.index += get_date_offset(time_stamps=new_df.index, reference=df.index)
# Do any forward-filling/back-filling to cover all the indices
return TimeSeries.from_pd(new_df[df.index[0] : df.index[-1]].ffill().bfill(), check_times=False)
elif alignment_policy in [None, AlignPolicy.OuterJoin]:
# Outer join is the union of all timestamps appearing in any of the
# univariate time series. We just need to apply the missing value
# policy to self.to_pd() (and bfill()/ffill() to take care of any
# additional missing values at the start/end), and then return
# from_pd().
df = missing_value_policy.value(self.to_pd())
if remove_non_overlapping:
t0 = max(v.index[0] for v in self.univariates if len(v) > 0)
tf = min(v.index[-1] for v in self.univariates if len(v) > 0)
df = df[t0:tf]
else:
df = df.ffill().bfill()
return TimeSeries.from_pd(df, check_times=False)
elif alignment_policy is AlignPolicy.InnerJoin:
# Inner join is the intersection of all the timestamps appearing in
# all of the univariate time series. Just get the indexes of the
# univariate sub time series where all variables are present.
# TODO: add a resampling step instead of just indexing?
ts = [set(var.np_time_stamps) for var in self.univariates]
t = ts[0]
for tprime in ts[1:]:
t = t.intersection(tprime)
if len(t) == 0:
raise RuntimeError(
"No time stamps are shared between all variables! Try again with a different alignment policy."
)
t = to_pd_datetime(sorted(t))
return TimeSeries.from_pd(self.to_pd().loc[t], check_times=False)
else:
raise RuntimeError(f"Alignment policy {alignment_policy.name} not supported")
def assert_equal_timedeltas(time_series: UnivariateTimeSeries, granularity, offset=None):
"""
Checks that all time deltas in the time series are equal, either to each
other, or a pre-specified timedelta (in seconds).
"""
if len(time_series) <= 2:
return
index = time_series.index
offset = pd.to_timedelta(0) if offset is None else offset
expected = pd.date_range(start=index[0], end=index[-1], freq=granularity) + offset
deviation = expected - time_series.index[-len(expected) :]
max_deviation = np.abs(deviation.total_seconds().values).max()
assert max_deviation < 2e-3, f"Data must have the same time difference between each element of the time series" | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/utils/time_series.py | 0.914305 | 0.707417 | time_series.py | pypi |
from abc import abstractmethod
from typing import List
from math import sqrt
import numpy as np
class IStat:
"""
An abstract base class for computing various statistics incrementally,
with emphasis on recency-weighted variants.
"""
def __init__(self, value: float = None, n: int = 0):
"""
:param value: Initial value of the statistic. Defaults to None.
:param n: Initial sample size. Defaults to 0.
"""
if n > 0:
assert value is not None
self.value = value
self.n = n
@property
def n(self):
return self._n
@n.setter
def n(self, n: int):
assert n >= 0
self._n = n
@property
def value(self):
return self._value
@value.setter
def value(self, value: float):
self._value = value
@abstractmethod
def add(self, x):
"""
Add a new value to update the statistic.
:param x: new value to add to the sample.
"""
raise NotImplementedError
@abstractmethod
def drop(self, x):
"""
Drop a value to update the statistic.
:param x: value to drop from the sample.
"""
raise NotImplementedError
def add_batch(self, batch: List[float]):
"""
Add a batch of new values to update the statistic.
:param batch: new values to add to the sample.
"""
for x in batch:
self.add(x)
def drop_batch(self, batch: List[float]):
"""
Drop a batch of new values to update the statistic.
:param batch: new values to add to the sample.
"""
for x in batch:
self.drop(x)
class Mean(IStat):
"""
Class for incrementally computing the mean of a series of numbers.
"""
def __init__(self, value: float = None, n: int = 0):
super().__init__(value=value, n=n)
self.sum = n * value if n > 0 else None
@IStat.value.getter
def value(self):
if self.sum is None:
return None
return self.sum / self.n
def add(self, x):
assert isinstance(x, (int, float))
self.n += 1
if self.n == 1:
self._add_first(x)
else:
self._add(x)
def _add_first(self, x):
self.sum = x
def _add(self, x):
self.sum += float(x)
def drop(self, x):
assert isinstance(x, (int, float))
if self.n == 0:
return
self.n -= 1
if self.n == 0:
self.sum = None
else:
self.sum -= float(x)
class Variance(IStat):
"""
Class for incrementally computing the variance of a series of numbers.
"""
mean_class = Mean
def __init__(self, ex_value: float = None, ex2_value: float = None, n: int = 0, ddof: int = 1):
"""
:param ex_value: Initial value of the first moment (mean).
:param ex2_value: Initial value of the second moment.
:param n: Initial sample size.
:param ddof: The delta degrees of freedom to use when correcting
the estimate of the variance.
.. math::
\\text{Var}(x_i) = \\text{E}(x_i^2) - \\text{E}(x_i)^2
"""
if ex_value is not None and ex2_value is not None:
super().__init__(value=ex2_value - ex_value**2, n=n)
else:
super().__init__()
self.ex = self.mean_class(value=ex_value, n=n)
self.ex2 = self.mean_class(value=ex2_value, n=n)
self.ddof = ddof
def add(self, x):
self.n += 1
self.ex.add(x)
self.ex2.add(x**2)
def drop(self, x):
if self.n == 0:
return
self.n -= 1
self.ex.drop(x)
self.ex2.drop(x**2)
@property
def true_value(self):
if self.ex2.value is None or self.ex.value is None:
return None
return max(0, self.ex2.value - self.ex.value**2)
@property
def corrected_value(self):
if self.true_value is None:
return None
elif self.n - self.ddof <= 0:
return np.inf
return (self.n / (self.n - self.ddof)) * self.true_value
@IStat.value.getter
def value(self):
if self.corrected_value is None:
return None
return self.corrected_value + 1e-16
@property
def sd(self):
if self.true_value is None:
return None
return sqrt(self.corrected_value) + 1e-16
@property
def se(self):
if self.sd is None:
return None
return self.sd / sqrt(self.n)
class ExponentialMovingAverage(Mean):
"""
Class for incrementally computing the exponential moving average of a series of numbers.
"""
def __init__(self, recency_weight: float = 0.1, **kwargs):
"""
:param recency_weight: Recency weight to use when updating the
exponential moving average.
Letting ``w`` be the recency weight,
.. math::
\\begin{align*}
\\text{EMA}_w(x_0) & = x_0 \\\\
\\text{EMA}_w(x_t) & = w \\cdot x_t + (1-w) \\cdot \\text{EMA}_w(x_{t-1})
\\end{align*}
"""
super().__init__(**kwargs)
self.recency_weight = recency_weight
@property
def recency_weight(self):
return self._recency_weight
@recency_weight.setter
def recency_weight(self, weight: float):
assert 0.0 < weight <= 1.0
self._recency_weight = weight
@IStat.value.getter
def value(self):
return self._value
def _add_first(self, x):
if self.value is None:
self.value = x
else:
self._add(x)
def _add(self, x):
self.value = (1 - self.recency_weight) * self.value + self.recency_weight * x
def drop(self, x):
"""
Exponential Moving Average does not support dropping values
"""
pass
class RecencyWeightedVariance(Variance):
"""
Class for incrementally computing the recency-weighted variance of a series of numbers.
"""
mean_class = ExponentialMovingAverage
def __init__(self, recency_weight: float, **kwargs):
"""
:param recency_weight: Recency weight to use when updating the
recency weighted variance.
Letting ``w`` be the recency weight,
.. math::
\\text{RWV}_w(x_t) = \\text{EMA}_w({x^2_t}) - \\text{EMA}_w(x_t)^2
"""
super().__init__(**kwargs)
self.recency_weight = recency_weight
@property
def recency_weight(self):
return self._recency_weight
@recency_weight.setter
def recency_weight(self, weight: float):
assert 0.0 < weight <= 1.0
self._recency_weight = weight
self.ex.recency_weight = weight
self.ex2.recency_weight = weight
def drop(self, x):
"""
Recency Weighted Variance does not support dropping values
"""
pass | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/utils/istat.py | 0.95511 | 0.663798 | istat.py | pypi |
from collections import OrderedDict
from typing import List
import numpy as np
import pandas as pd
from merlion.utils.time_series import TimeSeries, to_pd_datetime
def minT_reconciliation(
forecasts: List[TimeSeries], errs: List[TimeSeries], sum_matrix: np.ndarray, n_leaves: int
) -> List[TimeSeries]:
"""
Computes the minimum trace reconciliation for hierarchical time series, as described by
`Wickramasuriya et al. 2018 <https://robjhyndman.com/papers/mint.pdf>`__. This algorithm assumes that
we have a number of time series aggregated at various levels (the aggregation tree is described by ``sum_matrix``),
and we obtain independent forecasts at each level of the hierarchy. Minimum trace reconciliation finds the optimal
way to adjust (reconcile) the forecasts to reduce the variance of the estimation.
:param forecasts: forecast for each aggregation level of the hierarchy
:param errs: standard errors of forecasts for each level of the hierarchy. While not strictly necessary,
reconciliation performs better if all forecasts are accompanied by uncertainty estimates.
:param sum_matrix: matrix describing how the hierarchy is aggregated
:param n_leaves: the number of leaf forecasts (i.e. the number of forecasts at the most dis-aggregated level
of the hierarchy). We assume that the leaf forecasts are last in the lists ``forecasts`` & ``errs``,
and that ``sum_matrix`` reflects this fact.
:return: reconciled forecasts for each aggregation level of the hierarchy
"""
m = len(forecasts)
n = n_leaves
assert len(errs) == m > n
assert all(yhat.dim == 1 for yhat in forecasts)
assert sum_matrix.shape == (m, n), f"Expected sum_matrix to have shape ({m}, {n}) got {sum_matrix.shape}"
assert (sum_matrix[-n:] == np.eye(n)).all()
# Convert forecasts to a single aligned multivariate time series
names = [yhat.names[0] for yhat in forecasts]
forecasts = OrderedDict((i, yhat.univariates[yhat.names[0]]) for i, yhat in enumerate(forecasts))
forecasts = TimeSeries(univariates=forecasts).align()
t_ref = forecasts.time_stamps
H = len(forecasts)
# Matrix of stderrs (if any) at each prediction horizon. shape is [m, H].
# If no stderrs are given, we the estimation error is proportional to the number of leaf nodes being combined.
coefs = sum_matrix.sum(axis=1)
if all(e is None for e in errs):
# FIXME: This heuristic can be improved if training errors are given.
# However, the model code should probably be responsible for this, not the reconciliation code.
Wh = [np.diag(coefs) for _ in range(H)]
else:
coefs = coefs.reshape(-1, 1)
errs = np.asarray(
[np.full(H, np.nan) if e is None else e.align(reference=t_ref).to_pd().values.flatten() ** 2 for e in errs]
) # [m, H]
# Replace NaN's w/ the mean of non-NaN stderrs & create diagonal error matrices
nan_errs = np.isnan(errs[:, 0])
if nan_errs.any():
errs[nan_errs] = np.nanmean(errs / coefs, axis=0) * coefs[nan_errs]
Wh = [np.diag(errs[:, h]) for h in range(H)]
# Create other supplementary matrices
J = np.zeros((n, m))
J[:, -n:] = np.eye(n)
U = np.zeros((m - n, m))
U[:, : m - n] = np.eye(m - n)
U[:, m - n :] = -sum_matrix[:-n]
# Compute projection matrices to compute coherent leaf forecasts
Ph = []
for W in Wh:
inv = np.linalg.inv(U @ W @ U.T)
P = J - ((J @ W) @ U.T) @ (inv @ U)
Ph.append(P)
# Compute reconciled forecasts
reconciled = []
for (t, yhat_h), P in zip(forecasts, Ph):
reconciled.append(sum_matrix @ (P @ yhat_h))
reconciled = pd.DataFrame(np.asarray(reconciled), index=to_pd_datetime(t_ref))
return [u.to_ts(name=name) for u, name in zip(TimeSeries.from_pd(reconciled).univariates, names)] | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/utils/hts.py | 0.910722 | 0.885532 | hts.py | pypi |
from collections import OrderedDict
import inspect
from typing import List, Union
import pandas as pd
from merlion.utils.misc import combine_signatures, parse_basic_docstring
from merlion.utils.time_series import TimeSeries
def df_to_time_series(
df: pd.DataFrame, time_col: str = None, timestamp_unit="s", data_cols: Union[str, List[str]] = None
) -> TimeSeries:
"""
Converts a general ``pandas.DataFrame`` to a `TimeSeries` object.
:param df: the dataframe to process
:param time_col: the name of the column specifying time. If ``None`` is specified, the existing index
is used if it is a ``DatetimeIndex``. Otherwise, the first column is used.
:param timestamp_unit: if the time column is in Unix timestamps, this is the unit of the timestamp.
:param data_cols: the columns representing the actual data values of interest.
"""
# Set up the time index
if not isinstance(df.index, pd.DatetimeIndex):
if time_col is None:
time_col = df.columns[0]
elif time_col not in df.columns:
raise KeyError(f"Expected `time_col` to be in {df.columns}. Got {time_col}.")
df[time_col] = pd.to_datetime(df[time_col], unit=None if df[time_col].dtype == "O" else timestamp_unit)
df = df.set_index(time_col)
df = df.sort_index()
# Get only the desired columns from the dataframe
if data_cols is not None:
data_cols = [data_cols] if not isinstance(data_cols, (list, tuple)) else data_cols
if not all(c in df.columns for c in data_cols):
raise KeyError(f"Expected each of `data_cols` to be in {df.colums}. Got {data_cols}.")
df = df[data_cols]
# Convert the dataframe to a time series & return it
return TimeSeries.from_pd(df)
def data_io_decorator(func):
"""
Decorator to standardize docstrings for data I/O functions.
"""
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
# Parse the docstrings of the base df_to_time_series function & decorated function.
prefix, suffix, params = parse_basic_docstring(func.__doc__)
base_prefix, base_suffix, base_params = parse_basic_docstring(df_to_time_series.__doc__)
# Combine the prefixes. Base prefix starts after the first line break.
i_lb = [i for i, line in enumerate(base_prefix) if line == ""][1]
prefix = ("\n".join(prefix) if any([line != "" for line in prefix]) else "") + "\n".join(base_prefix[i_lb:])
# The base docstring has no suffix, so just use the function's
suffix = "\n".join(suffix) if any([line != "" for line in suffix]) else ""
# Combine the parameter lists
for param, docstring_lines in base_params.items():
if param not in params:
params[param] = "\n".join(docstring_lines).rstrip("\n")
# Combine the signatures, but remove some parameters that are specific to the original (as well as kwargs).
new_sig_params = []
sig = combine_signatures(inspect.signature(func), inspect.signature(df_to_time_series))
for param in sig.parameters.values():
if param.kind in {inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD}:
break
if param.name not in ["df"]:
new_sig_params.append(param)
sig = sig.replace(parameters=new_sig_params)
# Update the signature and docstring of the wrapper we are returning. Use only the params in the new signature.
wrapper.__signature__ = sig
params = OrderedDict((p, params[p]) for p in sig.parameters if p in params)
wrapper.__doc__ = (prefix or "") + "\n" + "\n".join(params.values()) + "\n\n" + (suffix or "")
return wrapper
@data_io_decorator
def csv_to_time_series(file_name: str, **kwargs) -> TimeSeries:
"""
Reads a CSV file and converts it to a `TimeSeries` object.
"""
return df_to_time_series(pd.read_csv(file_name), **kwargs) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/utils/data_io.py | 0.915587 | 0.615868 | data_io.py | pypi |
import numpy as np
import pandas as pd
from operator import add
from functools import reduce
from typing import Callable, List, Union
from merlion.utils.time_series import UnivariateTimeSeries, TimeSeries
class TimeSeriesGenerator:
"""
An abstract base class for generating synthetic time series data.
Generates a 1-dimensional grid x(0), x(1), ..., x(n-1), where x(i) = x0 + i * step.
Then generates a time series y(0), y(1), ..., y(n-1), where y(i) = f(x(i)) + noise.
"""
def __init__(
self,
f: Callable[[float], float],
n: int,
x0: float = 0.0,
step: float = 1.0,
scale: float = 1.0,
noise: Callable[[], float] = np.random.normal,
distort: Callable[[float, float], float] = add,
name: str = None,
t0: str = "1970 00:00:00",
tdelta: str = "5min",
):
"""
:param n: The number of points to be generated.
:param x0: The initial value to use to form that 1-dimensional grid that
will be used to compute the synthetic values.
:param step: The step size to use when forming the 1-dimensional grid.
:param scale: A scalar to use to either inflate or deflate the synthetic data.
:param noise: A function that generates a random value when called.
:param distort: A function mapping two real numbers to one real number which will
be used to inject noise into the time series.
:param name: The name to assign the univariate that will be generated.
:param t0: Initial timestamp to use when wrapping the generated values into a
TimeSeries object.
:param tdelta: the time delta to use when wrapping the generated values into a
TimeSeries object.
"""
assert step > 0, f"step must be a postive real number but is {step}."
assert scale > 0, f"scale must be a postive real number but is {scale}."
self.f = f
self.n = n
self.x0 = x0
self.step = step
self.scale = scale
self.noise = noise
self.distort = distort
self.name = name
self.t0 = t0
self.tdelta = tdelta
@property
def n(self):
return self._n
@n.setter
def n(self, n: int):
self._n = n
self._update_steps()
@property
def x0(self):
return self._x0
@x0.setter
def x0(self, x: float):
self._x0 = x
self._update_steps()
@property
def step(self):
return self._step
@step.setter
def step(self, step: float):
self._step = step
self._update_steps()
def _update_steps(self):
"""
Updates the x-steps that are used to generate the time series
based on the current values of `n`, `x0`, and `step`.
"""
if all(hasattr(self, attr) for attr in ("_n", "_x0", "_step")):
self.steps = [self.x0 + self.step * x for x in range(self.n)]
def y(self, x: float):
return self.scale * self.distort(self.f(x), self.noise())
def generate(self, return_ts=True) -> Union[List[float], TimeSeries]:
"""
Generates synthetic time series data according and returns it as a list or as a
TimeSeries object.
"""
vals = self._generate()
if return_ts:
assert self.t0 is not None and self.tdelta is not None
times = pd.date_range(self.t0, periods=self.n, freq=self.tdelta)
return UnivariateTimeSeries(times, vals, self.name).to_ts()
return vals
def _generate(self):
return [self.y(x) for x in self.steps]
class GeneratorComposer(TimeSeriesGenerator):
"""
A class for generating synthetic time series by composing
other TimeSeriesGenerator's.
"""
def __init__(self, generators: List[TimeSeriesGenerator], per_generator_noise: bool = False, **kwargs):
kwargs["f"] = lambda x: x
super().__init__(**kwargs)
"""
:param generators: A list of other time series generators to compose.
:param per_generator_noise: True if noise should be injected by each generator
during composition. i.e., if we have two generators with generating functions
f and g. If per_generator_noise = True, y = f(g+noise)+noise. Otherwise,
y = f(g) + noise.
"""
self.per_generator_noise = per_generator_noise
self.generators = generators
@property
def generators(self):
return self._generators
@generators.setter
def generators(self, generators: List[TimeSeriesGenerator]):
"""
Sets the generators for the GeneratorSequence.
:param generators: The list of generators to set. Note that generators'
attributes related to forming `steps` will not be relevant.
"""
if self.per_generator_noise:
self.noise = lambda: 0
else:
for generator in generators:
generator.noise = lambda: 0
self.f = reduce(lambda f, g: lambda x: f(g(x)), [g.f for g in generators], lambda x: x)
class GeneratorConcatenator(GeneratorComposer):
"""
A class for generating synthetic time series data that undergoes
fundamental changes to it's behavior that certain points in time.
For example, with this class one could generate a time series that begins
as linear and then becomes stationary.
For example, let f = 0 with for 3 steps 0,1,2 and g = 2 * x for the next three
steps 3,4,5. generate() returns:
- [0, 0, 0, 6, 8, 10] if string_outputs is False
- [0, 0, 0, 2, 4, 6] if string_outputs is True.
"""
def __init__(self, string_outputs: bool = True, **kwargs):
"""
param string_outputs: If True, ensure that the end and beginning of each
pair of consecutive time series are connected. For example, Let there be
two generating functions f, and g belonging to consecutive generators. If
True, adjust g by a constant c such that f(x) = g(x) at the last point x
that f uses to generate its series.
"""
kwargs["f"] = None
kwargs["n"] = 1
self.string_outputs = string_outputs
super().__init__(**kwargs)
@GeneratorComposer.generators.setter
def generators(self, generators: List[TimeSeriesGenerator]):
"""
Sets the generators for the GeneratorSequence.
:param generators: The list of generators to set. Note that the
individual generators `step` and `x0` attributes will be overriden
by the `step` and `x0` belonging to the GeneratorSequence.
"""
for i, generator in enumerate(generators):
if not self.per_generator_noise:
generator.noise = self.noise
generator.distort = self.distort
if i == 0:
generator.x0 = self.x0
self.n = generator.n
elif i > 0:
generator.x0 = self.x0 + self.n * self.step
self.n += generator.n
generator.step = self.step
self._generators = generators
def y(self, x: float):
"""
A Generator Sequence has no method `y`.
"""
pass
def _generate(self) -> Union[List[float], TimeSeries]:
"""
Generates the time series by concatenating the time series
generated by each generator
"""
result = []
for generator in self.generators:
y = generator.generate(return_ts=False)
if self.string_outputs and result:
y = np.asarray(y) + result[-1] - generator.f(generator.x0 - self.step)
y = y.tolist()
result += y
return result | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/utils/ts_generator.py | 0.923065 | 0.718767 | ts_generator.py | pypi |
import sys
import logging
import importlib
from merlion.models.factory import ModelFactory
from merlion.evaluate.anomaly import TSADMetric
from merlion.utils.time_series import TimeSeries
from merlion.plot import MTSFigure, plot_anoms_plotly
from merlion.dashboard.models.utils import ModelMixin, DataMixin
from merlion.dashboard.utils.log import DashLogger
dash_logger = DashLogger(stream=sys.stdout)
class AnomalyModel(ModelMixin, DataMixin):
univariate_algorithms = [
"DefaultDetector",
"ArimaDetector",
"DynamicBaseline",
"IsolationForest",
"ETSDetector",
"MSESDetector",
"ProphetDetector",
"RandomCutForest",
"SarimaDetector",
"WindStats",
"SpectralResidual",
"ZMS",
"DeepPointAnomalyDetector",
]
multivariate_algorithms = ["IsolationForest", "AutoEncoder", "VAE", "DAGMM", "LSTMED"]
thresholds = ["Threshold", "AggregateAlarms"]
def __init__(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(dash_logger)
@staticmethod
def get_available_algorithms(num_input_metrics):
if num_input_metrics <= 0:
return []
elif num_input_metrics == 1:
return AnomalyModel.univariate_algorithms
else:
return AnomalyModel.multivariate_algorithms
@staticmethod
def get_available_thresholds():
return AnomalyModel.thresholds
@staticmethod
def get_threshold_info(threshold):
module = importlib.import_module("merlion.post_process.threshold")
model_class = getattr(module, threshold)
param_info = AnomalyModel._param_info(model_class.__init__)
if not param_info["alm_threshold"]["default"]:
param_info["alm_threshold"]["default"] = 3.0
return param_info
@staticmethod
def _compute_metrics(labels, predictions):
metrics = {}
for metric in [TSADMetric.Precision, TSADMetric.Recall, TSADMetric.F1, TSADMetric.MeanTimeToDetect]:
m = metric.value(ground_truth=labels, predict=predictions)
metrics[metric.name] = round(m, 5) if metric.name != "MeanTimeToDetect" else str(m)
return metrics
@staticmethod
def _plot_anomalies(model, ts, scores, labels=None):
title = f"{type(model).__name__}: Anomalies in Time Series"
fig = MTSFigure(y=ts, y_prev=None, anom=scores)
return plot_anoms_plotly(fig=fig.plot_plotly(title=title), anomaly_labels=labels)
@staticmethod
def _check(df, columns, label_column, is_train):
kind = "train" if is_train else "test"
if label_column and label_column not in df:
label_column = int(label_column)
assert label_column in df, f"The label column {label_column} is not in the {kind} time series."
for i in range(len(columns)):
if columns[i] not in df:
columns[i] = int(columns[i])
assert columns[i] in df, f"The variable {columns[i]} is not in the time {kind} series."
return columns, label_column
def train(self, algorithm, train_df, test_df, columns, label_column, params, threshold_params, set_progress):
columns, label_column = AnomalyModel._check(train_df, columns, label_column, is_train=True)
columns, label_column = AnomalyModel._check(test_df, columns, label_column, is_train=False)
if threshold_params is not None:
thres_class, thres_params = threshold_params
module = importlib.import_module("merlion.post_process.threshold")
model_class = getattr(module, thres_class)
params["threshold"] = model_class(**thres_params)
model_class = ModelFactory.get_model_class(algorithm)
model = model_class(model_class.config_class(**params))
train_ts, train_labels = TimeSeries.from_pd(train_df[columns]), None
test_ts, test_labels = TimeSeries.from_pd(test_df[columns]), None
if label_column is not None and label_column != "":
train_labels = TimeSeries.from_pd(train_df[label_column])
test_labels = TimeSeries.from_pd(test_df[label_column])
self.logger.info(f"Training the anomaly detector: {algorithm}...")
set_progress(("2", "10"))
scores = model.train(train_data=train_ts)
set_progress(("6", "10"))
self.logger.info("Computing training performance metrics...")
train_pred = model.post_rule(scores) if model.post_rule is not None else scores
train_metrics = AnomalyModel._compute_metrics(train_labels, train_pred) if train_labels is not None else None
set_progress(("7", "10"))
self.logger.info("Getting test-time results...")
test_pred = model.get_anomaly_label(test_ts)
test_metrics = AnomalyModel._compute_metrics(test_labels, test_pred) if test_labels is not None else None
set_progress(("9", "10"))
self.logger.info("Plotting anomaly scores...")
figure = AnomalyModel._plot_anomalies(model, test_ts, test_pred, test_labels)
self.logger.info("Finished.")
set_progress(("10", "10"))
return model, train_metrics, test_metrics, figure
def test(self, model, df, columns, label_column, threshold_params, set_progress):
columns, label_column = AnomalyModel._check(df, columns, label_column, is_train=False)
threshold = None
if threshold_params is not None:
thres_class, thres_params = threshold_params
module = importlib.import_module("merlion.post_process.threshold")
model_class = getattr(module, thres_class)
threshold = model_class(**thres_params)
if threshold is not None:
model.threshold = threshold
self.logger.info(f"Detecting anomalies...")
set_progress(("2", "10"))
test_ts, label_ts = TimeSeries.from_pd(df[columns]), None
if label_column is not None and label_column != "":
label_ts = TimeSeries.from_pd(df[[label_column]])
predictions = model.get_anomaly_label(time_series=test_ts)
set_progress(("7", "10"))
self.logger.info("Computing test performance metrics...")
metrics = AnomalyModel._compute_metrics(label_ts, predictions) if label_ts is not None else None
set_progress(("8", "10"))
self.logger.info("Plotting anomaly labels...")
figure = AnomalyModel._plot_anomalies(model, test_ts, predictions, label_ts)
self.logger.info("Finished.")
set_progress(("10", "10"))
return metrics, figure | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/dashboard/models/anomaly.py | 0.687315 | 0.461017 | anomaly.py | pypi |
import logging
import sys
import pandas as pd
from merlion.models.factory import ModelFactory
from merlion.evaluate.forecast import ForecastEvaluator, ForecastMetric
from merlion.utils.time_series import TimeSeries
from merlion.dashboard.models.utils import ModelMixin, DataMixin
from merlion.dashboard.utils.log import DashLogger
dash_logger = DashLogger(stream=sys.stdout)
class ForecastModel(ModelMixin, DataMixin):
algorithms = [
"DefaultForecaster",
"Arima",
"LGBMForecaster",
"ETS",
"AutoETS",
"Prophet",
"AutoProphet",
"Sarima",
"VectorAR",
"RandomForestForecaster",
"ExtraTreesForecaster",
]
def __init__(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(dash_logger)
@staticmethod
def get_available_algorithms():
return ForecastModel.algorithms
@staticmethod
def _compute_metrics(evaluator, ts, predictions):
return {
m: round(evaluator.evaluate(ground_truth=ts, predict=predictions, metric=ForecastMetric[m]), 5)
for m in ["MAE", "MARRE", "RMSE", "sMAPE", "RMSPE"]
}
def train(self, algorithm, train_df, test_df, target_column, feature_columns, exog_columns, params, set_progress):
if target_column not in train_df:
target_column = int(target_column)
assert target_column in train_df, f"The target variable {target_column} is not in the time series."
try:
feature_columns = [int(c) if c not in train_df else c for c in feature_columns]
except ValueError:
feature_columns = []
try:
exog_columns = [int(c) if c not in train_df else c for c in exog_columns]
except ValueError:
exog_columns = []
for exog_column in exog_columns:
assert exog_column in train_df, f"Exogenous variable {exog_column} is not in the time series."
# Re-arrange dataframe so that the target column is first, and exogenous columns are last
columns = [target_column] + feature_columns + exog_columns
train_df = train_df.loc[:, columns]
test_df = test_df.loc[:, columns]
# Get the target_seq_index & initialize the model
params["target_seq_index"] = columns.index(target_column)
model_class = ModelFactory.get_model_class(algorithm)
model = model_class(model_class.config_class(**params))
# Handle exogenous regressors if they are supported by the model
if model.supports_exog and len(exog_columns) > 0:
exog_ts = TimeSeries.from_pd(pd.concat((train_df.loc[:, exog_columns], test_df.loc[:, exog_columns])))
train_df = train_df.loc[:, [target_column] + feature_columns]
test_df = test_df.loc[:, [target_column] + feature_columns]
else:
exog_ts = None
self.logger.info(f"Training the forecasting model: {algorithm}...")
set_progress(("2", "10"))
train_ts = TimeSeries.from_pd(train_df)
predictions = model.train(train_ts, exog_data=exog_ts)
if isinstance(predictions, tuple):
predictions = predictions[0]
self.logger.info("Computing training performance metrics...")
set_progress(("6", "10"))
evaluator = ForecastEvaluator(model, config=ForecastEvaluator.config_class())
train_metrics = ForecastModel._compute_metrics(evaluator, train_ts, predictions)
set_progress(("7", "10"))
test_ts = TimeSeries.from_pd(test_df)
if "max_forecast_steps" in params and params["max_forecast_steps"] is not None:
n = min(len(test_ts) - 1, int(params["max_forecast_steps"]))
test_ts, _ = test_ts.bisect(t=test_ts.time_stamps[n])
self.logger.info("Computing test performance metrics...")
test_pred, test_err = model.forecast(time_stamps=test_ts.time_stamps, exog_data=exog_ts)
test_metrics = ForecastModel._compute_metrics(evaluator, test_ts, test_pred)
set_progress(("8", "10"))
self.logger.info("Plotting forecasting results...")
figure = model.plot_forecast_plotly(
time_series=test_ts, time_series_prev=train_ts, exog_data=exog_ts, plot_forecast_uncertainty=True
)
figure.update_layout(width=None, height=500)
self.logger.info("Finished.")
set_progress(("10", "10"))
return model, train_metrics, test_metrics, figure | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/dashboard/models/forecast.py | 0.493409 | 0.448849 | forecast.py | pypi |
from collections import OrderedDict
from enum import Enum
import inspect
import json
import os
import numpy as np
import pandas as pd
from merlion.models.factory import ModelFactory
class DataMixin:
def load_data(self, file_path, nrows=None):
if nrows is None:
self.logger.info("Loading the time series...")
df = pd.read_csv(file_path, nrows=nrows)
index_type = df.dtypes[df.columns[0]]
df = df.set_index(df.columns[0])
df.index = pd.to_datetime(df.index.values, unit="ms" if index_type in [np.int32, np.int64] else None)
return df
class ModelMixin:
@staticmethod
def get_parameter_info(algorithm):
model_class = ModelFactory.get_model_class(algorithm)
param_info = ModelMixin._param_info(model_class.config_class.__init__)
if "max_forecast_steps" in param_info:
if param_info["max_forecast_steps"]["default"] == "":
param_info["max_forecast_steps"]["default"] = 100
return param_info
@staticmethod
def _param_info(function):
def is_enum(t):
return isinstance(t, type) and issubclass(t, Enum)
def is_valid_type(t):
return t in [int, float, str, bool, list, tuple, dict] or is_enum(t)
param_info = OrderedDict()
signature = inspect.signature(function).parameters
for name, param in signature.items():
if name in ["self", "target_seq_index"]:
continue
value = param.default
if value == param.empty:
value = ""
if is_valid_type(type(param.default)):
value = value.name if isinstance(value, Enum) else value
param_info[name] = {"type": type(param.default), "default": value}
elif is_valid_type(param.annotation):
value = value.name if isinstance(value, Enum) else value
param_info[name] = {"type": param.annotation, "default": value}
return param_info
@staticmethod
def parse_parameters(param_info, params):
for key in params.keys():
assert key in param_info, f"{key} is not in `param_info`."
kwargs = {}
for name, value in params.items():
info = param_info[name]
value_type = info["type"]
if value.lower() in ["none", "null"]:
kwargs[name] = None
elif value_type in [int, float, str]:
kwargs[name] = value_type(value)
elif issubclass(value_type, Enum):
valid_enum_values = value_type.__members__.keys()
assert value in valid_enum_values, f"The value of {name} should be in {valid_enum_values}"
kwargs[name] = value_type[value]
elif value_type == bool:
assert value.lower() in ["true", "false"], f"The value of {name} should be either True or False."
kwargs[name] = value.lower() == "true"
elif info["type"] in [list, tuple, dict]:
value = value.replace(" ", "").replace("\t", "")
value = value.replace("(", "[").replace(")", "]").replace(",]", "]")
kwargs[name] = json.loads(value)
return kwargs
@staticmethod
def save_model(directory, model, algorithm):
if model is None:
return
d = os.path.join(directory, algorithm)
if not os.path.exists(d):
os.makedirs(d)
model.save(d)
@staticmethod
def load_model(directory, algorithm):
d = os.path.join(directory, algorithm)
model_class = ModelFactory.get_model_class(algorithm)
return model_class.load(d) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/dashboard/models/utils.py | 0.738198 | 0.293822 | utils.py | pypi |
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from dash import dash_table, dcc
from merlion.dashboard.settings import *
def data_table(df, n=1000, page_size=10):
if df is not None:
df = df.head(n)
columns = [{"name": "Index", "id": "Index"}] + [{"name": c, "id": c} for c in df.columns]
data = []
for i in range(df.shape[0]):
d = {c: v for c, v in zip(df.columns, df.values[i])}
d.update({"Index": df.index[i]})
data.append(d)
table = dash_table.DataTable(
id="table",
columns=columns,
data=data,
style_cell_conditional=[{"textAlign": "center"}],
style_table={"overflowX": "scroll"},
editable=False,
column_selectable="single",
page_action="native",
page_size=page_size,
page_current=0,
style_header=dict(backgroundColor=TABLE_HEADER_COLOR),
style_data=dict(backgroundColor=TABLE_DATA_COLOR),
)
return table
else:
return dash_table.DataTable()
def plot_timeseries(ts, figure_height=500):
traces = []
color_list = plotly.colors.qualitative.Dark24
for i, col in enumerate(ts.columns):
v = ts[col]
if v.dtype in ["int", "float", "bool"]:
v = v.astype(float)
color = color_list[i % len(color_list)]
traces.append(go.Scatter(name=col, x=v.index, y=v.values.flatten(), mode="lines", line=dict(color=color)))
layout = dict(
showlegend=True,
xaxis=dict(
title="Time",
type="date",
rangeselector=dict(
buttons=list(
[
dict(count=7, label="1w", step="day", stepmode="backward"),
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
),
),
)
fig = make_subplots(figure=go.Figure(layout=layout))
fig.update_yaxes(title_text="Time Series")
for trace in traces:
fig.add_trace(trace)
fig.update_layout(
height=figure_height,
xaxis_rangeselector_font_color="white",
xaxis_rangeselector_activecolor="#0176D3",
xaxis_rangeselector_bgcolor="#1B96FF",
xaxis_rangeselector_font_family="Salesforce Sans",
)
return dcc.Graph(figure=fig) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/dashboard/utils/plot.py | 0.563498 | 0.272491 | plot.py | pypi |
from dash import dcc
from dash import html, dash_table
from merlion.dashboard.pages.utils import create_modal, create_empty_figure
from merlion.dashboard.settings import *
def create_stats_table(data_stats=None):
if data_stats is None or len(data_stats) == 0:
data = [{"Stats": "", "Value": ""}]
else:
data = [{"Stats": key, "Value": value} for key, value in data_stats["@global"].items()]
table = dash_table.DataTable(
id="data-stats",
data=data,
columns=[{"id": "Stats", "name": "Stats"}, {"id": "Value", "name": "Value"}],
editable=False,
style_header_conditional=[{"textAlign": "center", "font-family": "Salesforce Sans"}],
style_cell_conditional=[{"textAlign": "center", "font-family": "Salesforce Sans"}],
style_header=dict(backgroundColor=TABLE_HEADER_COLOR, color="white"),
style_data=dict(backgroundColor=TABLE_DATA_COLOR),
)
return table
def create_metric_stats_table(metric_stats=None, column=None):
if metric_stats is None or len(metric_stats) == 0 or column not in metric_stats:
data = [{"Stats": "", "Value": ""}]
else:
data = [{"Stats": key, "Value": value} for key, value in metric_stats[column].items()]
table = dash_table.DataTable(
id="metric-stats",
data=data,
columns=[{"id": "Stats", "name": "Stats"}, {"id": "Value", "name": "Value"}],
editable=False,
style_header_conditional=[{"textAlign": "center", "font-family": "Salesforce Sans"}],
style_cell_conditional=[{"textAlign": "center", "font-family": "Salesforce Sans"}],
style_header=dict(backgroundColor=TABLE_HEADER_COLOR, color="white"),
style_data=dict(backgroundColor=TABLE_DATA_COLOR),
)
return table
def create_control_panel() -> html.Div:
return html.Div(
id="control-card",
children=[
html.Br(),
html.P(id="label", children="Upload Time Series Data File"),
dcc.Upload(
id="upload-data",
children=html.Div(
children=[
html.Img(src="../assets/upload.svg"),
html.Div(id="select-a-file", children=[html.P("Select a file"), html.P(" or drag it here.")]),
]
),
style={
"height": "50px",
"lineHeight": "50px",
"borderWidth": "1px",
"borderStyle": "dashed",
"borderRadius": "5px",
"textAlign": "center",
"margin": "5px",
},
multiple=True,
),
html.Br(),
html.P("Select Data File"),
dcc.Dropdown(id="select-file", options=[], style={"width": "100%"}),
html.Br(),
html.P("Overall Stats"),
html.Div(id="data-stats-table", children=[create_metric_stats_table()]),
html.Br(),
html.P("Per-Variable Stats"),
html.Div(
id="select-column-parent",
children=[dcc.Dropdown(id="select-column", options=[], style={"width": "100%"})],
),
html.Br(),
html.Div(id="metric-stats-table", children=[create_stats_table()]),
html.Br(),
html.Div(
children=[
html.Button(id="data-btn", children="Load", n_clicks=0),
html.Button(id="data-cancel-btn", children="Cancel", style={"margin-left": "15px"}),
],
style={"textAlign": "center"},
),
html.Br(),
html.P("Download Trained Model"),
html.Div(
id="data-download-parent",
children=[dcc.Dropdown(id="data-download", options=[], style={"width": "100%"})],
),
html.Br(),
html.Div(
children=[
html.Button(id="data-download-btn", children="Download", n_clicks=0),
dcc.Download(id="download-data"),
],
style={"textAlign": "center"},
),
html.Br(),
create_modal(
modal_id="data-exception-modal",
header="An Exception Occurred",
content="An exception occurred. Please click OK to continue.",
content_id="data-exception-modal-content",
button_id="data-exception-modal-close",
),
create_modal(
modal_id="data-download-exception-modal",
header="An Exception Occurred",
content="An exception occurred. Please click OK to continue.",
content_id="data-download-exception-modal-content",
button_id="data-download-exception-modal-close",
),
],
)
def create_right_column() -> html.Div:
return html.Div(
id="right-column-data",
children=[
html.Div(
id="result_table_card",
children=[
html.B("Time Series Plots"),
html.Hr(),
html.Div(id="data-plots", children=[create_empty_figure()]),
],
),
html.Div(
id="result_table_card", children=[html.B("Time Series Samples"), html.Hr(), html.Div(id="data-table")]
),
],
)
def create_data_layout() -> html.Div:
return html.Div(
id="data_views",
children=[
# Left column
html.Div(id="left-column-data", className="three columns", children=[create_control_panel()]),
# Right column
html.Div(className="nine columns", children=create_right_column()),
],
) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/dashboard/pages/data.py | 0.540924 | 0.299662 | data.py | pypi |
import dash_bootstrap_components as dbc
from dash import dcc
from dash import html
from merlion.dashboard.pages.utils import create_modal, create_param_table, create_metric_table, create_empty_figure
def create_control_panel() -> html.Div:
return html.Div(
id="control-card",
children=[
html.Br(),
html.P("Select Training Data File"),
html.Div(
id="anomaly-select-file-parent",
children=[
dbc.RadioItems(
id="anomaly-file-radio",
options=[
{"label": "Single data file", "value": "single"},
{"label": "Separate train/test files", "value": "separate"},
],
value="single",
inline=True,
),
dcc.Dropdown(id="anomaly-select-file", options=[], style={"width": "100%"}),
],
),
dbc.Collapse(
html.Div(
id="control-card",
children=[
html.Br(),
html.P("Training Data Percentage"),
dcc.Slider(
id="anomaly-training-slider",
min=5,
max=95,
step=1,
marks={t * 10: str(t * 10) for t in range(1, 10)},
value=50,
),
],
),
id="anomaly-slider-collapse",
is_open=True,
),
dbc.Collapse(
html.Div(
id="control-card",
children=[
html.Br(),
html.P("Select Test Data File"),
html.Div(
id="anomaly-select-test-file-parent",
children=[dcc.Dropdown(id="anomaly-select-test-file", options=[], style={"width": "100%"})],
),
],
),
id="anomaly-test-file-collapse",
is_open=False,
),
html.Br(),
html.P("Select Feature Column(s)"),
html.Div(
id="anomaly-select-features-parent",
children=[dcc.Dropdown(id="anomaly-select-features", options=[], multi=True, style={"width": "100%"})],
),
html.Br(),
html.P("Select Label Column for Evaluation (Optional)"),
html.Div(
id="anomaly-select-label-parent",
children=[dcc.Dropdown(id="anomaly-select-label", options=[], style={"width": "100%"})],
),
html.Br(),
html.P("Select Anomaly Detection Algorithm"),
html.Div(
id="anomaly-select-algorithm-parent",
children=[dcc.Dropdown(id="anomaly-select-algorithm", options=[], style={"width": "100%"})],
),
html.Br(),
html.P("Algorithm Setting"),
html.Div(id="anomaly-param-table", children=[create_param_table()]),
html.Progress(id="anomaly-progressbar", style={"width": "100%"}),
html.Br(),
html.Div(
children=[
html.Button(id="anomaly-train-btn", children="Train", n_clicks=0),
html.Button(id="anomaly-cancel-btn", children="Cancel", style={"margin-left": "15px"}),
],
style={"textAlign": "center"},
),
html.Br(),
html.P("Threshold Setting"),
html.Div(
id="anomaly-select-threshold-parent",
children=[dcc.Dropdown(id="anomaly-select-threshold", options=[], style={"width": "100%"})],
),
html.Br(),
html.Div(id="anomaly-threshold-param-table", children=[create_param_table(height=80)]),
html.Div(
children=[html.Button(id="anomaly-test-btn", children="Update Threshold", n_clicks=0)],
style={"textAlign": "center"},
),
html.Br(),
create_modal(
modal_id="anomaly-exception-modal",
header="An Exception Occurred",
content="An exception occurred. Please click OK to continue.",
content_id="anomaly-exception-modal-content",
button_id="anomaly-exception-modal-close",
),
],
)
def create_right_column() -> html.Div:
return html.Div(
id="right-column-data",
children=[
html.Div(
id="result_table_card",
children=[
html.B("Anomaly Detection Results"),
html.Hr(),
html.Div(id="anomaly-plots", children=[create_empty_figure()]),
],
),
html.Div(
id="result_table_card",
children=[
html.B("Testing Metrics"),
html.Hr(),
html.Div(id="anomaly-test-metrics", children=[create_metric_table()]),
],
),
html.Div(
id="result_table_card",
children=[
html.B("Training Metrics"),
html.Hr(),
html.Div(id="anomaly-training-metrics", children=[create_metric_table()]),
],
),
],
)
def create_anomaly_layout() -> html.Div:
return html.Div(
id="anomaly_views",
children=[
# Left column
html.Div(id="left-column-data", className="three columns", children=[create_control_panel()]),
# Right column
html.Div(className="nine columns", children=create_right_column()),
],
) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/dashboard/pages/anomaly.py | 0.495361 | 0.262186 | anomaly.py | pypi |
import dash_bootstrap_components as dbc
from dash import dcc
from dash import html
from merlion.dashboard.pages.utils import create_modal, create_param_table, create_metric_table, create_empty_figure
def create_control_panel() -> html.Div:
return html.Div(
id="control-card",
children=[
html.Br(),
html.P("Select Training Data File"),
html.Div(
id="forecasting-select-file-parent",
children=[
dbc.RadioItems(
id="forecasting-file-radio",
options=[
{"label": "Single data file", "value": "single"},
{"label": "Separate train/test files", "value": "separate"},
],
value="single",
inline=True,
),
dcc.Dropdown(id="forecasting-select-file", options=[], style={"width": "100%"}),
],
),
dbc.Collapse(
html.Div(
id="control-card",
children=[
html.Br(),
html.P("Training Data Percentage"),
dcc.Slider(
id="forecasting-training-slider",
min=5,
max=95,
step=1,
marks={t * 10: str(t * 10) for t in range(1, 10)},
value=80,
),
],
),
id="forecasting-slider-collapse",
is_open=True,
),
dbc.Collapse(
html.Div(
id="control-card",
children=[
html.Br(),
html.P("Select Test Data File"),
html.Div(
id="forecasting-select-test-file-parent",
children=[
dcc.Dropdown(id="forecasting-select-test-file", options=[], style={"width": "100%"})
],
),
],
),
id="forecasting-test-file-collapse",
is_open=False,
),
html.Br(),
html.P("Select Target Column"),
html.Div(
id="forecasting-select-target-parent",
children=[dcc.Dropdown(id="forecasting-select-target", options=[], style={"width": "100%"})],
),
html.Br(),
html.P("Select Other Features (Optional)"),
html.Div(
id="forecasting-select-features-parent",
children=[
dcc.Dropdown(id="forecasting-select-features", options=[], multi=True, style={"width": "100%"})
],
),
html.Br(),
html.P("Select Exogenous Variables (Optional; Known A Priori)"),
html.Div(
id="forecasting-select-exog-parent",
children=[dcc.Dropdown(id="forecasting-select-exog", options=[], multi=True, style={"width": "100%"})],
),
html.Br(),
html.P("Select Forecasting Algorithm"),
html.Div(
id="forecasting-select-algorithm-parent",
children=[dcc.Dropdown(id="forecasting-select-algorithm", options=[], style={"width": "100%"})],
),
html.Br(),
html.P("Algorithm Setting"),
html.Div(id="forecasting-param-table", children=[create_param_table()]),
html.Progress(id="forecasting-progressbar", style={"width": "100%", "color": "#1AB9FF"}),
html.Br(),
html.Div(
children=[
html.Button(id="forecasting-train-btn", children="Train", n_clicks=0),
html.Button(id="forecasting-cancel-btn", children="Cancel", style={"margin-left": "15px"}),
],
style={"textAlign": "center"},
),
html.Br(),
create_modal(
modal_id="forecasting-exception-modal",
header="An Exception Occurred",
content="An exception occurred. Please click OK to continue.",
content_id="forecasting-exception-modal-content",
button_id="forecasting-exception-modal-close",
),
],
)
def create_right_column() -> html.Div:
return html.Div(
id="right-column-data",
children=[
html.Div(
id="result_table_card",
children=[
html.B("Forecasting Results"),
html.Hr(),
html.Div(id="forecasting-plots", children=[create_empty_figure()]),
],
),
html.Div(
id="result_table_card",
children=[
html.B("Testing Metrics"),
html.Hr(),
html.Div(id="forecasting-test-metrics", children=[create_metric_table()]),
],
),
html.Div(
id="result_table_card",
children=[
html.B("Training Metrics"),
html.Hr(),
html.Div(id="forecasting-training-metrics", children=[create_metric_table()]),
],
),
],
)
def create_forecasting_layout() -> html.Div:
return html.Div(
id="forecasting_views",
children=[
# Left column
html.Div(id="left-column-data", className="three columns", children=[create_control_panel()]),
# Right column
html.Div(className="nine columns", children=create_right_column()),
],
) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/dashboard/pages/forecast.py | 0.510496 | 0.230443 | forecast.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.